code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + nbsphinx="hidden" import open3d as o3d import numpy as np import matplotlib.pyplot as plt import copy import os import sys # only needed for tutorial, monkey patches visualization sys.path.append('..') import open3d_tutorial as o3dtut # change to True if you want to interact with the visualization windows o3dtut.interactive = not "CI" in os.environ # - # # Octree # An **octree** is a tree data structure where each internal node has eight children. Octrees are commonly used for spatial partitioning of 3D point clouds. Non-empty leaf nodes of an octree contain one or more points that fall within the same spatial subdivision. Octrees are a useful description of 3D space and can be used to quickly find nearby points. Open3D has the geometry type `Octree` that can be used to create, search, and traverse octrees with a user-specified maximum tree depth, `max_depth`. # ## From point cloud # An octree can be constructed from a point cloud using the method `convert_from_point_cloud`. Each point is inserted into the tree by following the path from the root node to the appropriate leaf node at depth `max_depth`. As the tree depth increases, internal (and eventually leaf) nodes represents a smaller partition of 3D space. # # If the point cloud has color, the the corresponding leaf node takes the color of the last inserted point. The `size_expand` parameter increases the size of the root octree node so it is slightly bigger than the original point cloud bounds to accommodate all points. # + print('input') N = 2000 armadillo = o3d.data.ArmadilloMesh() mesh = o3d.io.read_triangle_mesh(armadillo.path) pcd = mesh.sample_points_poisson_disk(N) # fit to unit cube pcd.scale(1 / np.max(pcd.get_max_bound() - pcd.get_min_bound()), center=pcd.get_center()) pcd.colors = o3d.utility.Vector3dVector(np.random.uniform(0, 1, size=(N, 3))) o3d.visualization.draw_geometries([pcd]) print('octree division') octree = o3d.geometry.Octree(max_depth=4) octree.convert_from_point_cloud(pcd, size_expand=0.01) o3d.visualization.draw_geometries([octree]) # - # ## From voxel grid # An octree can also be constructed from an Open3D `VoxelGrid` geometry using the method `create_from_voxel_grid`. Each voxel of the input `VoxelGrid` is treated as a point in 3D space with coordinates corresponding to the origin of the voxel. Each leaf node takes the color of its corresponding voxel. # + print('voxelization') voxel_grid = o3d.geometry.VoxelGrid.create_from_point_cloud(pcd, voxel_size=0.05) o3d.visualization.draw_geometries([voxel_grid]) print('octree division') octree = o3d.geometry.Octree(max_depth=4) octree.create_from_voxel_grid(voxel_grid) o3d.visualization.draw_geometries([octree]) # - # Additionally, an `Octree` can be converted to a `VoxelGrid` with `to_voxel_grid`. # ## Traversal # An octree can be traversed which can be useful for searching or processing subsections of 3D geometry. By providing the `traverse` method with a callback, each time a node (internal or leaf) is visited, additional processing can be performed. # # In the following example, an early stopping criterion is used to only process internal/leaf nodes with more than a certain number of points. This early stopping ability can be used to efficiently process spatial regions meeting certain conditions. def f_traverse(node, node_info): early_stop = False if isinstance(node, o3d.geometry.OctreeInternalNode): if isinstance(node, o3d.geometry.OctreeInternalPointNode): n = 0 for child in node.children: if child is not None: n += 1 print( "{}{}: Internal node at depth {} has {} children and {} points ({})" .format(' ' * node_info.depth, node_info.child_index, node_info.depth, n, len(node.indices), node_info.origin)) # we only want to process nodes / spatial regions with enough points early_stop = len(node.indices) < 250 elif isinstance(node, o3d.geometry.OctreeLeafNode): if isinstance(node, o3d.geometry.OctreePointColorLeafNode): print("{}{}: Leaf node at depth {} has {} points with origin {}". format(' ' * node_info.depth, node_info.child_index, node_info.depth, len(node.indices), node_info.origin)) else: raise NotImplementedError('Node type not recognized!') # early stopping: if True, traversal of children of the current node will be skipped return early_stop octree = o3d.geometry.Octree(max_depth=4) octree.convert_from_point_cloud(pcd, size_expand=0.01) octree.traverse(f_traverse) # ## Find leaf node containing point # Using the above traversal mechanism, an octree can be quickly searched for the leaf node that contains a given point. This functionality is provided via the `locate_leaf_node` method. octree.locate_leaf_node(pcd.points[0])
docs/jupyter/geometry/octree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: visualization-curriculum-gF8wUgMm # language: python # name: visualization-curriculum-gf8wugmm # --- # + [markdown] colab_type="text" id="bMoe_6i9CVfd" papermill={"duration": 0.018225, "end_time": "2020-03-17T18:14:05.525568", "exception": false, "start_time": "2020-03-17T18:14:05.507343", "status": "completed"} tags=[] # # Estimating The Mortality Rate For COVID-19 # > Using Country-Level Covariates To Correct For Testing & Reporting Biases And Estimate a True Mortality Rate. # - author: <NAME> # - image: images/corvid-mortality.png # - comments: true # - categories: [MCMC, mortality] # - permalink: /covid-19-mortality-estimation/ # - toc: true # + colab={"base_uri": "https://localhost:8080/", "height": 358} colab_type="code" id="XtQpnxQeDuc4" outputId="fdce59d4-f94b-4f92-e9b1-c749c7512b6f" papermill={"duration": 0.021561, "end_time": "2020-03-17T18:14:05.561854", "exception": false, "start_time": "2020-03-17T18:14:05.540293", "status": "completed"} tags=[] #hide # # ! pip install pymc3 arviz xlrd # + colab={} colab_type="code" id="gYLQETHHEsHR" papermill={"duration": 1.722618, "end_time": "2020-03-17T18:14:07.299151", "exception": false, "start_time": "2020-03-17T18:14:05.576533", "status": "completed"} tags=[] #hide # Setup and imports # %matplotlib inline import warnings warnings.simplefilter('ignore') import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc3 as pm from IPython.display import display, Markdown # + colab={} colab_type="code" id="-SYMy6oJMw9t" papermill={"duration": 0.023719, "end_time": "2020-03-17T18:14:07.335765", "exception": false, "start_time": "2020-03-17T18:14:07.312046", "status": "completed"} tags=[] #hide # constants ignore_countries = [ 'Others', 'Cruise Ship' ] cpi_country_mapping = { 'United States of America': 'US', 'China': 'Mainland China' } wb_country_mapping = { 'United States': 'US', 'Egypt, Arab Rep.': 'Egypt', 'Hong Kong SAR, China': 'Hong Kong', 'Iran, Islamic Rep.': 'Iran', 'China': 'Mainland China', 'Russian Federation': 'Russia', 'Slovak Republic': 'Slovakia', 'Korea, Rep.': 'Korea, South' } wb_covariates = [ ('SH.XPD.OOPC.CH.ZS', 'healthcare_oop_expenditure'), ('SH.MED.BEDS.ZS', 'hospital_beds'), ('HD.HCI.OVRL', 'hci'), ('SP.POP.65UP.TO.ZS', 'population_perc_over65'), ('SP.RUR.TOTL.ZS', 'population_perc_rural') ] # + colab={} colab_type="code" id="phZcRKGJMzJ3" papermill={"duration": 0.043477, "end_time": "2020-03-17T18:14:07.392270", "exception": false, "start_time": "2020-03-17T18:14:07.348793", "status": "completed"} tags=[] #hide # data loading and manipulation from datetime import datetime import os import numpy as np import pandas as pd def get_all_data(): ''' Main routine that grabs all COVID and covariate data and returns them as a single dataframe that contains: * count of cumulative cases and deaths by country (by today's date) * days since first case for each country * CPI gov't transparency index * World Bank data on population, healthcare, etc. by country ''' all_covid_data = _get_latest_covid_timeseries() covid_cases_rollup = _rollup_by_country(all_covid_data['Confirmed']) covid_deaths_rollup = _rollup_by_country(all_covid_data['Deaths']) todays_date = covid_cases_rollup.columns.max() # Create DataFrame with today's cumulative case and death count, by country df_out = pd.DataFrame({'cases': covid_cases_rollup[todays_date], 'deaths': covid_deaths_rollup[todays_date]}) _clean_country_list(df_out) _clean_country_list(covid_cases_rollup) # Add observed death rate: df_out['death_rate_observed'] = df_out.apply( lambda row: row['deaths'] / float(row['cases']), axis=1) # Add covariate for days since first case df_out['days_since_first_case'] = _compute_days_since_first_case( covid_cases_rollup) # Add CPI covariate: _add_cpi_data(df_out) # Add World Bank covariates: _add_wb_data(df_out) # Drop any country w/o covariate data: num_null = df_out.isnull().sum(axis=1) to_drop_idx = df_out.index[num_null > 1] print('Dropping %i/%i countries due to lack of data' % (len(to_drop_idx), len(df_out))) df_out.drop(to_drop_idx, axis=0, inplace=True) return df_out, todays_date def _get_latest_covid_timeseries(): ''' Pull latest time-series data from JHU CSSE database ''' repo = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/' data_path = 'csse_covid_19_data/csse_covid_19_time_series/' all_data = {} for status in ['Confirmed', 'Deaths', 'Recovered']: file_name = 'time_series_19-covid-%s.csv' % status all_data[status] = pd.read_csv( '%s%s%s' % (repo, data_path, file_name)) return all_data def _rollup_by_country(df): ''' Roll up each raw time-series by country, adding up the cases across the individual states/provinces within the country :param df: Pandas DataFrame of raw data from CSSE :return: DataFrame of country counts ''' gb = df.groupby('Country/Region') df_rollup = gb.sum() df_rollup.drop(['Lat', 'Long'], axis=1, inplace=True, errors='ignore') # Drop dates with all 0 count data df_rollup.drop(df_rollup.columns[df_rollup.sum(axis=0) == 0], axis=1, inplace=True) # Convert column strings to dates: idx_as_dt = [datetime.strptime(x, '%m/%d/%y') for x in df_rollup.columns] df_rollup.columns = idx_as_dt return df_rollup def _clean_country_list(df): ''' Clean up input country list in df ''' # handle recent changes in country names: country_rename = { 'Hong Kong SAR': 'Hong Kong', 'Taiwan*': 'Taiwan', 'Czechia': 'Czech Republic', 'Brunei': 'Brunei Darussalam', 'Iran (Islamic Republic of)': 'Iran', 'Viet Nam': 'Vietnam', 'Russian Federation': 'Russia', 'Republic of Korea': 'South Korea', 'Republic of Moldova': 'Moldova', 'China': 'Mainland China' } df.rename(country_rename, axis=0, inplace=True) df.drop(ignore_countries, axis=0, inplace=True, errors='ignore') def _compute_days_since_first_case(df_cases): ''' Compute the country-wise days since first confirmed case :param df_cases: country-wise time-series of confirmed case counts :return: Series of country-wise days since first case ''' date_first_case = df_cases[df_cases > 0].idxmin(axis=1) days_since_first_case = date_first_case.apply( lambda x: (df_cases.columns.max() - x).days) # Add 1 month for China, since outbreak started late 2019: days_since_first_case.loc['Mainland China'] += 30 return days_since_first_case def _add_cpi_data(df_input): ''' Add the Government transparency (CPI - corruption perceptions index) data (by country) as a column in the COVID cases dataframe. :param df_input: COVID-19 data rolled up country-wise :return: None, add CPI data to df_input in place ''' cpi_data = pd.read_excel( 'https://github.com/jwrichar/COVID19-mortality/blob/master/data/CPI2019.xlsx?raw=true', skiprows=2) cpi_data.set_index('Country', inplace=True, drop=True) cpi_data.rename(cpi_country_mapping, axis=0, inplace=True) # Add CPI score to input df: df_input['cpi_score_2019'] = cpi_data['CPI score 2019'] def _add_wb_data(df_input): ''' Add the World Bank data covariates as columns in the COVID cases dataframe. :param df_input: COVID-19 data rolled up country-wise :return: None, add World Bank data to df_input in place ''' wb_data = pd.read_csv( 'https://raw.githubusercontent.com/jwrichar/COVID19-mortality/master/data/world_bank_data.csv', na_values='..') for (wb_name, var_name) in wb_covariates: wb_series = wb_data.loc[wb_data['Series Code'] == wb_name] wb_series.set_index('Country Name', inplace=True, drop=True) wb_series.rename(wb_country_mapping, axis=0, inplace=True) # Add WB data: df_input[var_name] = _get_most_recent_value(wb_series) def _get_most_recent_value(wb_series): ''' Get most recent non-null value for each country in the World Bank time-series data ''' ts_data = wb_series[wb_series.columns[3::]] def _helper(row): row_nn = row[row.notnull()] if len(row_nn): return row_nn[-1] else: return np.nan return ts_data.apply(_helper, axis=1) # + colab={"base_uri": "https://localhost:8080/", "height": 33} colab_type="code" id="AlGjkxH9N4Bt" outputId="746bd546-5ded-4565-e10e-64aa5af0df0c" papermill={"duration": 2.221075, "end_time": "2020-03-17T18:14:09.625986", "exception": false, "start_time": "2020-03-17T18:14:07.404911", "status": "completed"} tags=[] #hide # Load the data (see source/data.py): df, todays_date = get_all_data() # Impute NA's column-wise: df = df.apply(lambda x: x.fillna(x.mean()),axis=0) # + [markdown] colab_type="text" id="dwPgV5dBO1Ll" papermill={"duration": 0.015633, "end_time": "2020-03-17T18:14:09.656019", "exception": false, "start_time": "2020-03-17T18:14:09.640386", "status": "completed"} tags=[] # # Observed mortality rates # + colab={"base_uri": "https://localhost:8080/", "height": 618} colab_type="code" id="afCZhOL-OSOd" outputId="227444ff-df43-4936-e652-9974a7843a56" papermill={"duration": 0.422043, "end_time": "2020-03-17T18:14:10.091831", "exception": false, "start_time": "2020-03-17T18:14:09.669788", "status": "completed"} tags=[] #collapse-hide display(Markdown('Data as of %s' % todays_date)) reported_mortality_rate = df['deaths'].sum() / df['cases'].sum() display(Markdown('Overall reported mortality rate: %.2f%%' % (100.0 * reported_mortality_rate))) df_highest = df.sort_values('cases', ascending=False).head(15) mortality_rate = pd.Series( data=(df_highest['deaths']/df_highest['cases']).values, index=map(lambda x: '%s (%i cases)' % (x, df_highest.loc[x]['cases']), df_highest.index)) ax = mortality_rate.plot.bar( figsize=(14,7), title='Reported Mortality Rate by Country (countries w/ highest case counts)') ax.axhline(reported_mortality_rate, color='k', ls='--') plt.show() # + [markdown] colab_type="text" id="Z5qTKZtBTTCb" papermill={"duration": 0.013506, "end_time": "2020-03-17T18:14:10.121620", "exception": false, "start_time": "2020-03-17T18:14:10.108114", "status": "completed"} tags=[] # # Model # + [markdown] papermill={"duration": 0.015751, "end_time": "2020-03-17T18:14:10.150941", "exception": false, "start_time": "2020-03-17T18:14:10.135190", "status": "completed"} tags=[] # Estimate COVID-19 mortality rate, controling for country factors. # + colab={} colab_type="code" id="SZeyDBm3O8g6" papermill={"duration": 0.034634, "end_time": "2020-03-17T18:14:10.199426", "exception": false, "start_time": "2020-03-17T18:14:10.164792", "status": "completed"} tags=[] #hide import numpy as np import pymc3 as pm def initialize_model(df): # Normalize input covariates in a way that is sensible: # (1) days since first case: upper # mu_0 to reflect asymptotic mortality rate months after outbreak _normalize_col(df, 'days_since_first_case', how='upper') # (2) CPI score: upper # mu_0 to reflect scenario in absence of corrupt govts _normalize_col(df, 'cpi_score_2019', how='upper') # (3) healthcare OOP spending: mean # not sure which way this will go _normalize_col(df, 'healthcare_oop_expenditure', how='mean') # (4) hospital beds: upper # more beds, more healthcare and tests _normalize_col(df, 'hospital_beds', how='mean') # (5) hci = human capital index: upper # HCI measures education/health; mu_0 should reflect best scenario _normalize_col(df, 'hci', how='mean') # (6) % over 65: mean # mu_0 to reflect average world demographic _normalize_col(df, 'population_perc_over65', how='mean') # (7) % rural: mean # mu_0 to reflect average world demographic _normalize_col(df, 'population_perc_rural', how='mean') n = len(df) covid_mortality_model = pm.Model() with covid_mortality_model: # Priors: mu_0 = pm.Beta('mu_0', alpha=0.3, beta=10) sig_0 = pm.Uniform('sig_0', lower=0.0, upper=mu_0 * (1 - mu_0)) beta = pm.Normal('beta', mu=0, sigma=5, shape=7) sigma = pm.HalfNormal('sigma', sigma=5) # Model mu from country-wise covariates: # Apply logit transformation so logistic regression performed mu_0_logit = np.log(mu_0 / (1 - mu_0)) mu_est = mu_0_logit + \ beta[0] * df['days_since_first_case_normalized'].values + \ beta[1] * df['cpi_score_2019_normalized'].values + \ beta[2] * df['healthcare_oop_expenditure_normalized'].values + \ beta[3] * df['hospital_beds_normalized'].values + \ beta[4] * df['hci_normalized'].values + \ beta[5] * df['population_perc_over65_normalized'].values + \ beta[6] * df['population_perc_rural_normalized'].values mu_model_logit = pm.Normal('mu_model_logit', mu=mu_est, sigma=sigma, shape=n) # Transform back to probability space: mu_model = np.exp(mu_model_logit) / (np.exp(mu_model_logit) + 1) # tau_i, mortality rate for each country # Parametrize with (mu, sigma) # instead of (alpha, beta) to ease interpretability. tau = pm.Beta('tau', mu=mu_model, sigma=sig_0, shape=n) # tau = pm.Beta('tau', mu=mu_0, sigma=sig_0, shape=n) # Binomial likelihood: d_obs = pm.Binomial('d_obs', n=df['cases'].values, p=tau, observed=df['deaths'].values) return covid_mortality_model def _normalize_col(df, colname, how='mean'): ''' Normalize an input column in one of 3 ways: * how=mean: unit normal N(0,1) * how=upper: normalize to [-1, 0] with highest value set to 0 * how=lower: normalize to [0, 1] with lowest value set to 0 Returns df modified in place with extra column added. ''' colname_new = '%s_normalized' % colname if how == 'mean': mu = df[colname].mean() sig = df[colname].std() df[colname_new] = (df[colname] - mu) / sig elif how == 'upper': maxval = df[colname].max() minval = df[colname].min() df[colname_new] = (df[colname] - maxval) / (maxval - minval) elif how == 'lower': maxval = df[colname].max() minval = df[colname].min() df[colname_new] = (df[colname] - minval) / (maxval - minval) # + colab={"base_uri": "https://localhost:8080/", "height": 228} colab_type="code" id="W8t6hN76TiCE" outputId="83c6739f-2868-4b53-b36e-78283e58ea14" papermill={"duration": 216.364399, "end_time": "2020-03-17T18:17:46.581514", "exception": false, "start_time": "2020-03-17T18:14:10.217115", "status": "completed"} tags=[] #hide # Initialize the model: mod = initialize_model(df) # Run MCMC sampler1 with mod: trace = pm.sample(300, tune=100, chains=3, cores=2) # + colab={"base_uri": "https://localhost:8080/", "height": 572} colab_type="code" id="avV7l1JWTtAp" outputId="786c8ebb-d672-4ef1-d16b-b6ffbe8d168f" papermill={"duration": 3.834175, "end_time": "2020-03-17T18:17:50.773715", "exception": false, "start_time": "2020-03-17T18:17:46.939540", "status": "completed"} tags=[] #collapse-hide n_samp = len(trace['mu_0']) mu0_summary = pm.summary(trace).loc['mu_0'] print("COVID-19 Global Mortality Rate Estimation:") print("Posterior mean: %0.2f%%" % (100*trace['mu_0'].mean())) print("Posterior median: %0.2f%%" % (100*np.median(trace['mu_0']))) lower = np.sort(trace['mu_0'])[int(n_samp*0.025)] upper = np.sort(trace['mu_0'])[int(n_samp*0.975)] print("95%% posterior interval: (%0.2f%%, %0.2f%%)" % (100*lower, 100*upper)) prob_lt_reported = sum(trace['mu_0'] < reported_mortality_rate) / len(trace['mu_0']) print("Probability true rate less than reported rate (%.2f%%) = %.2f%%" % (100*reported_mortality_rate, 100*prob_lt_reported)) print("") # Posterior plot for mu0 print('Posterior probability density for COVID-19 mortality rate, controlling for country factors:') ax = pm.plot_posterior(trace, var_names=['mu_0'], figsize=(18, 8), textsize=18, credible_interval=0.95, bw=3.0, lw=3, kind='kde', ref_val=round(reported_mortality_rate, 3)) # + [markdown] colab_type="text" id="8kGfpMrGU1ML" papermill={"duration": 0.355035, "end_time": "2020-03-17T18:17:51.437023", "exception": false, "start_time": "2020-03-17T18:17:51.081988", "status": "completed"} tags=[] # ## Magnitude and Significance of Factors # # For bias in reported COVID-19 mortality rate # + colab={} colab_type="code" id="HUgv4QaNVOMt" papermill={"duration": 3.663374, "end_time": "2020-03-17T18:17:55.429741", "exception": false, "start_time": "2020-03-17T18:17:51.766367", "status": "completed"} tags=[] #collapse-hide # Posterior summary for the beta parameters: beta_summary = pm.summary(trace).head(7) beta_summary.index = ['days_since_first_case', 'cpi', 'healthcare_oop', 'hospital_beds', 'hci', 'percent_over65', 'percent_rural'] beta_summary.reset_index(drop=False, inplace=True) err_vals = ((beta_summary['hpd_3%'] - beta_summary['mean']).values, (beta_summary['hpd_97%'] - beta_summary['mean']).values) ax = beta_summary.plot(x='index', y='mean', kind='bar', figsize=(14, 7), title='Posterior Distribution of Beta Parameters', yerr=err_vals, color='lightgrey', legend=False, grid=True, capsize=5) beta_summary.plot(x='index', y='mean', color='k', marker='o', linestyle='None', ax=ax, grid=True, legend=False, xlim=plt.gca().get_xlim()) plt.savefig('../images/corvid-mortality.png') # + [markdown] papermill={"duration": 0.300118, "end_time": "2020-03-17T18:17:56.047164", "exception": false, "start_time": "2020-03-17T18:17:55.747046", "status": "completed"} tags=[] # # About This Analysis # # This analysis was done by [<NAME>](https://twitter.com/joeyrichar) # # In this project[^3], we attempt to estimate the true mortality rate[^1] for COVID-19 while controlling for country-level covariates[^2][^4] such as: # * age of outbreak in the country # * transparency of the country's government # * access to healthcare # * demographics such as age of population and rural vs. urban # # Estimating a mortality rate lower than the overall reported rate likely implies that there has been **significant under-testing and under-reporting of cases globally**. # # ## Interpretation of Country-Level Parameters # # 1. days_since_first_case - positive (very statistically significant). As time since outbreak increases, expected mortality rate **increases**, as expected. # 2. cpi - negative (statistically significant). As government transparency increases, expected mortality rate **decreases**. This may mean that less transparent governments under-report cases, hence inflating the mortality rate. # 3. healthcare avg. out-of-pocket spending - no significant trend. # 4. hospital beds per capita - no significant trend. # 5. Human Capital Index - no significant trend (slightly negative = mortality rates decrease with increased mobilization of the country) # 6. percent over 65 - positive (statistically significant). As population age increases, the mortality rate also **increases**, as expected. # 7. percent rural - no significant trend. # # # [^1]: As of March 10, the **overall reported mortality rate is 3.5%**. However, this figure does not account for **systematic biases in case reporting and testing**. The observed mortality of COVID-19 has varied widely from country to country (as of early March 2020). For instance, as of March 10, mortality rates have ranged from < 0.1% in places like Germany (1100+ cases) to upwards of 5% in Italy (9000+ cases) and 3.9% in China (80k+ cases). # # [^2]: The point of our modelling work here is to **try to understand and correct for the country-to-country differences that may cause the observed discrepancies in COVID-19 country-wide mortality rates**. That way we can "undo" those biases and try to **pin down an overall *real* mortality rate**. # # [^3]: Full details about the model are available at: https://github.com/jwrichar/COVID19-mortality # # [^4]: The affects of these parameters are subject to change as more data are collected. # # + [markdown] papermill={"duration": 0.32221, "end_time": "2020-03-17T18:17:56.675900", "exception": false, "start_time": "2020-03-17T18:17:56.353690", "status": "completed"} tags=[] # # Appendix: Model Diagnostics # # The following trace plots help to assess the convergence of the MCMC sampler. # + papermill={"duration": 15.390342, "end_time": "2020-03-17T18:18:12.379195", "exception": false, "start_time": "2020-03-17T18:17:56.988853", "status": "completed"} tags=[] #hide_input import arviz as az az.plot_trace(trace, compact=True); # + papermill={"duration": 0.336867, "end_time": "2020-03-17T18:18:13.022316", "exception": false, "start_time": "2020-03-17T18:18:12.685449", "status": "completed"} tags=[]
_notebooks/2020-03-11-Mortality_Rate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="V1JS0B802NXL" # Before we dive deep into the world of PyTorch development, it’s important to familiarize yourself with the fundamental data structure in PyTorch: the torch.tensor. By understanding the tensor, you will understand how PyTorch handles and stores data, and since deep learning is fundamentally the collection and manipulation of floating-point numbers, understanding tensors will help you understand how PyTorch implements more advanced functions for deep learning. In addition, you may find yourself using tensor operations frequently when preprocessing input data or manipulating output data during model development # + [markdown] id="dmq42z9n2qPS" # In PyTorch, a tensor is a data structure used to store and manipulate data. Like a NumPy array, a tensor is a multidimensional array containing elements of a single data type. Tensors can be used to represent scalars, vectors, matrices, and n-dimensional arrays and are derived from the torch.Tensor class. However, tensors are more than just arrays of numbers. Creating or instantiating a tensor object from the torch.Tensor class gives us access to a set of built-in class attributes and operations or class methods that provide a robust set of built-in capabilities. This guide describes these attributes and operations in detail. # + [markdown] id="ClxVmz532rjS" # Tensors also include added benefits that make them more suitable than NumPy arrays for deep learning calculations. First, tensor operations can be performed significantly faster using GPU acceleration. Second, tensors can be stored and manipulated at scale using distributed processing on multiple CPUs and GPUs and across multiple servers. And third, tensors keep track of their graph computations, which is very important in implementing a deep learning library. # + [markdown] id="mW6c7P-N22oi" # **Simple example** # + [markdown] id="JddfsjK83LfC" # First, we import the PyTorch library, then we create two tensors, x and y, from two-dimensional lists. Next, we add the two tensors and store the result in z. We can just use the + operator here because the torch.Tensor class supports operator overloading. Finally, we print the new tensor, z, which we can see is the matrix sum of x and y, and we print the size of z. Notice that z is a tensor object itself and the size() method is used to return its matrix dimensions, namely 2 × 3: # + id="FsxVCauz3MKH" executionInfo={"status": "ok", "timestamp": 1631129005705, "user_tz": -330, "elapsed": 4290, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} import torch x = torch.tensor([[1,2,3],[4,5,6]]) y = torch.tensor([[7,8,9],[10,11,12]]) z = x + y # + colab={"base_uri": "https://localhost:8080/"} id="mEIMUuTr3Udo" executionInfo={"status": "ok", "timestamp": 1631129024511, "user_tz": -330, "elapsed": 467, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="e8894882-b7a2-4255-a9dd-400e80ec6d03" print(z) # + colab={"base_uri": "https://localhost:8080/"} id="GXEbbu4M3TNI" executionInfo={"status": "ok", "timestamp": 1631129038084, "user_tz": -330, "elapsed": 425, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="c4fec653-9e02-468f-83de-f1317127d6f2" print(z.size()) # + colab={"base_uri": "https://localhost:8080/", "height": 885} id="WyeCg7C73eiY" executionInfo={"status": "ok", "timestamp": 1631129159852, "user_tz": -330, "elapsed": 541, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="fa75bf8e-78fc-4201-f7ce-50913cf15b61" ', '.join(dir(z)) # + [markdown] id="_dYo0ElY3gmG" # **Running it on gpu (if available)** # + id="t7tC4u3r4Fl_" executionInfo={"status": "ok", "timestamp": 1631129342724, "user_tz": -330, "elapsed": 518, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} device = "cuda" if torch.cuda.is_available() else "cpu" x = torch.tensor([[1,2,3],[4,5,6]], device=device) y = torch.tensor([[7,8,9],[10,11,12]], device=device) z = x + y # + colab={"base_uri": "https://localhost:8080/"} id="__m8emIe4o2F" executionInfo={"status": "ok", "timestamp": 1631129347120, "user_tz": -330, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="4efd6e86-e5ad-48ed-8c85-b6562e61a108" print(z) # + colab={"base_uri": "https://localhost:8080/"} id="zjYvTp7c4p8V" executionInfo={"status": "ok", "timestamp": 1631129356196, "user_tz": -330, "elapsed": 456, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="331b0120-3803-40ff-944d-557bc90b65fd" print(z.size()) # + colab={"base_uri": "https://localhost:8080/"} id="G8z472dV4sLM" executionInfo={"status": "ok", "timestamp": 1631129360441, "user_tz": -330, "elapsed": 505, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3a4bee36-05c4-4acc-9590-1bfa2fa2d8d9" print(z.device) # + [markdown] id="gkJvL2pC4tMn" # The previous section showed a simple way to create tensors; however, there are many other ways to do it. You can create tensors from preexisting numeric data or create random samplings. Tensors can be created from preexisting data stored in array-like structures such as lists, tuples, scalars, or serialized data files, as well as in NumPy arrays. # # The following code illustrates some common ways to create tensors. First, it shows how to create a tensor from a list using torch.tensor(). This method can also be used to create tensors from other data structures like tuples, sets, or NumPy arrays: # + id="alVKYnj45bT9" executionInfo={"status": "ok", "timestamp": 1631129714555, "user_tz": -330, "elapsed": 624, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} import numpy # Created from pre-existing arrays w = torch.tensor([1,2,3]) # <1> w = torch.tensor((1,2,3)) # <2> w = torch.tensor(numpy.array([1,2,3])) # <3> # Initialized by size w = torch.empty(100,200) # <4> w = torch.zeros(100,200) # <5> w = torch.ones(100,200) # <6> # Initialized by size with random values w = torch.rand(100,200) # <7> w = torch.randn(100,200) # <8> w = torch.randint(5,10,(100,200)) # <9> # Initialized with specified data type or device w = torch.empty((100,200), dtype=torch.float64, device="cpu") # Initialized to have same size, data type, # and device as another tensor x = torch.empty_like(w) # + [markdown] id="2LVb-lUd5pLa" # 1. from a list # 2. from a tuple # 3. from a numpy array # 4. uninitialized, elements values are not predictable # 5. all elements initialized with 0.0 # 6. all elements initialized with 1.0 # 7. creates a 100 x 200 tensor with elements from a uniform distribution on the interval [0, 1) # 8. elements are random numbers from a normal distribution with mean 0 and variance 1 # 9. elements are random integers between 5 and 10 # + colab={"base_uri": "https://localhost:8080/"} id="Xdx5vlig_kIn" executionInfo={"status": "ok", "timestamp": 1631131339172, "user_tz": -330, "elapsed": 619, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="ebc0a95a-8a12-4b1d-acaa-11d079b00ba2" x = torch.tensor([[1,2,3],[4,5,6]]) print(torch.empty_like(x)) print(torch.empty_like(x)) print(torch.zeros_like(x)) print(torch.ones_like(x)) print(torch.full_like(x, fill_value=5)) # + [markdown] id="SN3rMtBV6HE4" # Following table lists PyTorch functions used to create tensors. You should use each one with the torch namespace, e.g., torch.empty(). # + [markdown] id="HuBlN-LC6njl" # | Function | Description | # | -------- | ----------- | # | torch.tensor(data, dtype=None, device=None, <br /> requires_grad=False, pin_memory=False) | Creates a tensor from an existing data structure | # | torch.empty(*size, out=None, dtype=None, <br />layout=torch.strided, device=None, requires_grad=False) | Creates a tensor from uninitialized elements based on the random state of values in memory | # | torch.zeros(*size, out=None, dtype=None, <br />layout=torch.strided, device=None, requires_grad=False) | Creates a tensor with all elements initialized to 0.0 | # | torch.ones(*size, out=None, dtype=None, <br />layout=torch.strided, device=None, requires_grad=False) | Creates a tensor with all elements initialized to 1.0 | # | torch.arange(start=0, end, step=1, out=None, <br />dtype=None, layout=torch.strided, device=None, requires_grad=False) | Creates a 1D tensor of values over a range with a common step value | # | torch.linspace(start, end, steps=100, <br />out=None, dtype=None, layout=torch.strided, <br />device=None, requires_grad=False) | Creates a 1D tensor of linearly spaced points between the start and end | # | torch.logspace(start, end, steps=100, <br />base=10.0, out=None, dtype=None, <br />layout=torch.strided, device=None, requires_grad=False) | Creates a 1D tensor of logarithmically spaced points between the start and end | # | torch.eye(n, m=None, out=None, dtype=None, <br />layout=torch.strided, device=None, requires_grad=False) | Creates a 2D tensor with ones on the diagonal and zeros everywhere else | # | torch.full(size, fill_value, out=None, <br />dtype=None, layout=torch.strided, device=None, requires_grad=False) | Creates a tensor filled with fill_value | # | torch.load(f) | Loads a tensor from a serialized pickle file | # | torch.save(f) | Saves a tensor to a serialized pickle file | # + [markdown] id="d7p4SR2x7TYN" # During deep learning development, it’s important to be aware of the data type used by your data and its calculations. So when you create tensors, you should control what data types are being used. As mentioned previously, all tensor elements have the same data type. You can specify the data type when creating the tensor by using the dtype parameter, or you can cast a tensor to a new dtype using the appropriate casting method or the to() method, as shown in the following code: # + colab={"base_uri": "https://localhost:8080/"} id="mLRaLgB_93Og" executionInfo={"status": "ok", "timestamp": 1631130783444, "user_tz": -330, "elapsed": 525, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="24ef8ed3-944d-469c-ff3a-aa9b1ec6de1c" # Specify data type at creation using dtype w = torch.tensor([1,2,3], dtype=torch.float32) # Use casting method to cast to a new data type w.int() # w remains a float32 after cast w = w.int() # w changes to int32 after cast # Use to() method to cast to a new type w = w.to(torch.float64) # <1> w = w.to(dtype=torch.float64) # <2> # Python automatically converts data types during operations x = torch.tensor([1,2,3], dtype=torch.int32) y = torch.tensor([1,2,3], dtype=torch.float32) z = x + y # <3> print(z.dtype) # + [markdown] id="trT-uz9G_HV8" # Table below lists all the available data types in PyTorch. Each data type results in a different tensor class depending on the tensor’s device. The corresponding tensor classes are shown in the two rightmost columns for CPUs and GPUs, respectively. # + [markdown] id="D5zCAiIQ-Inm" # | Data type | dtype | CPU tensor | GPU tensor | # | --------- | ----- | ---------- | ---------- | # | 32-bit floating point (default) | torch.float32 or torch.float | torch.​​Float⁠Ten⁠sor | torch.cuda.​Float⁠Tensor | # | 64-bit floating point | torch.float64 or torch.dou⁠ble | torch.​​Dou⁠ble⁠Tensor | torch.cuda.​​Dou⁠bleTensor | # | 16-bit floating point | torch.float16 or torch.half | torch.​Half⁠Tensor | torch.cuda.​Half⁠Tensor | # | 8-bit integer (unsigned) | torch.uint8 | torch.​Byte⁠Tensor | torch.cuda.​Byte⁠Tensor | # | 8-bit integer (signed) | torch.int8 | torch.​Char⁠Tensor | torch.cuda.​Char⁠Tensor | # | 16-bit integer (signed) | torch.int16 or torch.short | torch.​Short⁠Tensor | torch.cuda.​Short⁠Tensor | # | 32-bit integer (signed) | torch.int32 or torch.int | torch.​IntTen⁠sor | torch.cuda.​IntTen⁠sor | # | 64-bit integer (signed) | torch.int64 or torch.long | torch.​Long⁠Tensor | torch.cuda.​Long⁠Tensor | # | Boolean | torch.bool | torch.​Bool⁠Tensor | torch.cuda.​Bool⁠Tensor | # + [markdown] id="8AjkaSqO-7Zu" # **Indexing, Slicing, Combining, and Splitting Tensors** # + [markdown] id="m_VAewgyAjwx" # Once you have created tensors, you may want to access portions of the data and combine or split tensors to form new tensors. The following code demonstrates how to perform these types of operations. You can slice and index tensors in the same way you would slice and index NumPy arrays. # + colab={"base_uri": "https://localhost:8080/"} id="uQGhWD-6AlPD" executionInfo={"status": "ok", "timestamp": 1631131457846, "user_tz": -330, "elapsed": 4, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="d4e71867-d856-4022-e32c-812db216e17a" x = torch.tensor([[1,2],[3,4],[5,6],[7,8]]) x # + colab={"base_uri": "https://localhost:8080/"} id="7O6X3OYuAtOa" executionInfo={"status": "ok", "timestamp": 1631131458570, "user_tz": -330, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="89f2c247-2efe-48c5-fdff-1009791d1ca7" # Indexing, returns a tensor print(x[1,1]) # + colab={"base_uri": "https://localhost:8080/"} id="-LbU5OomAthN" executionInfo={"status": "ok", "timestamp": 1631131478134, "user_tz": -330, "elapsed": 807, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="91f2a05b-c509-4295-e596-76a2e511907c" # Indexing, returns a value as a Python number print(x[1,1].item()) # + colab={"base_uri": "https://localhost:8080/"} id="ptvE3KWXAyKE" executionInfo={"status": "ok", "timestamp": 1631131493805, "user_tz": -330, "elapsed": 451, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="45ac3bac-dce8-4cde-b604-459b1e086b29" # Slicing print(x[:2,1]) # + colab={"base_uri": "https://localhost:8080/"} id="42-DNNI4A2Df" executionInfo={"status": "ok", "timestamp": 1631131513968, "user_tz": -330, "elapsed": 612, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="02a75a93-d252-4ea5-90a4-bf0bcf4b38aa" # Boolean indexing # Only keep elements less than 5 print(x[x<5]) # + colab={"base_uri": "https://localhost:8080/"} id="nuG5jm8BA682" executionInfo={"status": "ok", "timestamp": 1631131530965, "user_tz": -330, "elapsed": 510, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="02be09e8-4f1a-45d8-b662-28b7cfb3130b" # Transpose array; x.t() or x.T can be used print(x.t()) # + colab={"base_uri": "https://localhost:8080/"} id="zHp9ZqpHA_HI" executionInfo={"status": "ok", "timestamp": 1631131548192, "user_tz": -330, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="4e0fe869-b7f9-42c2-c83e-51a49c08f2b0" # Change shape; usually view() is preferred over # reshape() print(x.view((2,4))) # + [markdown] id="fCZINmpeBDO1" # You can also combine or split tensors by using functions like torch.stack() and torch.unbind(), respectively, as shown in the following code: # + colab={"base_uri": "https://localhost:8080/"} id="Wu2qkNljBQlP" executionInfo={"status": "ok", "timestamp": 1631131625385, "user_tz": -330, "elapsed": 446, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="0d3d90ef-0c3c-4d6a-f42a-f295b76be533" # Combining tensors y = torch.stack((x, x)) print(y) # + colab={"base_uri": "https://localhost:8080/"} id="W9IKh1U-Bsmn" executionInfo={"status": "ok", "timestamp": 1631131718764, "user_tz": -330, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="a07d0050-9ad3-4442-9854-7570cd066ece" x # + colab={"base_uri": "https://localhost:8080/"} id="Zxyw4MgwBWMR" executionInfo={"status": "ok", "timestamp": 1631131697996, "user_tz": -330, "elapsed": 890, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="1736171c-1bfa-416d-8736-a15b5dc155c8" # Splitting tensors a,b = x.unbind(dim=1) print(a,b) # + colab={"base_uri": "https://localhost:8080/"} id="85HKy4e_Bn0V" executionInfo={"status": "ok", "timestamp": 1631131777258, "user_tz": -330, "elapsed": 621, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="9226f00b-ed00-459b-db86-e35a8bfa4ebb" # Splitting tensors a,b,c,d = x.unbind(dim=0) print(a,b,c,d) # + [markdown] id="w5GOH2K8BvZ5" # PyTorch provides a robust set of built-in functions that can be used to access, split, and combine tensors in different ways. Table below lists some commonly used functions to manipulate tensor elements. # + [markdown] id="rJUrzJA8CWDm" # | Function | Description | # | -------- | ----------- | # | torch.**cat**() | Concatenates the given sequence of tensors in the given dimension. | # | torch.**chunk**() | Splits a tensor into a specific number of chunks. Each chunk is a view of the input tensor. | # | torch.**gather**() | Gathers values along an axis specified by the dimension. | # | torch.**index\_select**() | Returns a new tensor that indexes the input tensor along a dimension using the entries in the index, which is a LongTensor. | # | torch.**masked\_select**() | Returns a new 1D tensor that indexes the input tensor according to the Boolean mask, which is a BoolTensor. | # | torch.**narrow**() | Returns a tensor that is a narrow version of the input tensor. | # | torch.**nonzero**() | Returns the indices of nonzero elements. | # | torch.**reshape**() | Returns a tensor with the same data and number of elements as the input tensor, but a different shape. </br>Use view() instead to ensure the tensor is not copied. | # | torch.**split**() | Splits the tensor into chunks. Each chunk is a view or subdivision of the original tensor. | # | torch.**squeeze**() | Returns a tensor with all the dimensions of the input tensor of size 1 removed. | # | torch.**stack**() | Concatenates a sequence of tensors along a new dimension. | # | torch.**t**() | Expects the input to be a 2D tensor and transposes dimensions 0 and 1. | # | torch.**take**() | Returns a tensor at specified indices when slicing is not continuous. | # | torch.**transpose**() | Transposes only the specified dimensions. | # | torch.**unbind**() | Removes a tensor dimension by returning a tuple of the removed dimension. | # | torch.**unsqueeze**() | Returns a new tensor with a dimension of size 1 inserted at the specified position. | # | torch.**where**() | Returns a tensor of selected elements from either one of two tensors, depending on the specified condition. | # + [markdown] id="lJLaGnC7C3xR" # Deep learning development is strongly based on mathematical computations, so PyTorch supports a very robust set of built-in math functions. Whether you are creating new data transforms, customizing loss functions, or building your own optimization algorithms, you can speed up your research and development with the math functions provided by PyTorch. # + [markdown] id="OsVAAMcODZ2L" # PyTorch supports many different types of math functions, including pointwise operations, reduction functions, comparison calculations, and linear algebra operations, as well as spectral and other math computations. The first category of useful math operations we’ll look at are pointwise operations. Pointwise operations perform an operation on each point in the tensor individually and return a new tensor. # # They are useful for rounding and truncation as well as trigonometrical and logical operations. By default, the functions will create a new tensor or use one passed in by the out parameter. If you want to perform an in-place operation, remember to append an underscore to the function name. # # Table below lists some commonly used pointwise operations. # + [markdown] id="hNIlm_R5DlCy" # | Operation type | Sample functions | # | -------------- | ---------------- | # | Basic math | add(), div(), mul(), neg(), reciprocal(), true\_divide() | # | Truncation | ceil(), clamp(), floor(), floor\_divide(), fmod(), frac(), lerp(), remainder(), round(), sigmoid(), trunc() | # | Complex numbers | abs(), angle(), conj(), imag(), real() | # | Trigonometry | acos(), asin(), atan(), cos(), cosh(), deg2rad(), rad2deg(), sin(), sinh(), tan(), tanh() | # | Exponents and logarithms | exp(), expm1(), log(), log10(), log1p(), log2(), logaddexp(), pow(), rsqrt(), sqrt(), square() | # | Logical | logical\_and(), logical\_not(), logical\_or(), logical\_xor() | # | Cumulative math | addcdiv(), addcmul() | # | Bitwise operators | bitwise\_not(), bitwise\_and(), bitwise\_or(), bitwise\_xor() | # | Error functions | erf(), erfc(), erfinv() | # | Gamma functions | digamma(), lgamma(), mvlgamma(), polygamma() | # + [markdown] id="6SfQFUgAD4sS" # The second category of math functions we’ll look at are reduction operations. Reduction operations reduce a bunch of numbers down to a single number or a smaller set of numbers. That is, they reduce the dimensionality or rank of the tensor. Reduction operations include functions for finding maximum or minimum values as well as many statistical calculations, like finding the mean or standard deviation. # # These operations are frequently used in deep learning. For example, deep learning classification often uses the argmax() function to reduce softmax outputs to a dominant class. # + [markdown] id="5F3pQs2vFAqX" # | Function | Description | # | -------- | ----------- | # | torch.**argmax**(_input, dim, keepdim=False, out=None_) | Returns the index(es) of the maximum value across all elements, or just a dimension if it’s specified | # | torch.**argmin**(_input, dim, keepdim=False, out=None_) | Returns the index(es) of the minimum value across all elements, or just a dimension if it’s specified | # | torch.**dist**(_input, dim, keepdim=False, out=None_) | Computes the _p_\-norm of two tensors | # | torch.**logsumexp**(_input, dim, keepdim=False, out=None_) | Computes the log of summed exponentials of each row of the input tensor in the given dimension | # | torch.**mean**(_input, dim, keepdim=False, out=None_) | Computes the mean or average across all elements, or just a dimension if it’s specified | # | torch.**median**(_input, dim, keepdim=False, out=None_) | Computes the median or middle value across all elements, or just a dimension if it’s specified | # | torch.**mode**(_input, dim, keepdim=False, out=None_) | Computes the mode or most frequent value across all elements, or just a dimension if it’s specified | # | torch.**norm**(_input, p='fro', dim=None,__keepdim=False,__out=None, dtype=None_) | Computes the matrix or vector norm across all elements, or just a dimension if it’s specified | # | torch.**prod**(_input, dim, keepdim=False, dtype=None_) | Computes the product of all elements, or of each row of the input tensor if it’s specified | # | torch.**std**(_input, dim, keepdim=False, out=None_) | Computes the standard deviation across all elements, or just a dimension if it’s specified | # | torch.**std\_mean**(_input, unbiased=True_) | Computes the standard deviation and mean across all elements, or just a dimension if it’s specified | # | torch.**sum**(_input, dim, keepdim=False, out=None_) | Computes the sum of all elements, or just a dimension if it’s specified | # | torch.**unique**(_input, dim, keepdim=False, out=None_) | Removes duplicates across the entire tensor, or just a dimension if it’s specified | # | torch.unique\_​consecutive(_input, dim, keepdim=False, out=None_) | Similar to torch.unique() but only removes consecutive duplicates | # | torch.**var**(_input, dim, keepdim=False, out=None_) | Computes the variance across all elements, or just a dimension if it’s specified | # | torch.**var\_mean**(_input, dim, keepdim=False, out=None_) | Computes the mean and variance across all elements, or just a dimension if it’s specified | # + [markdown] id="L52k6ec3FQde" # Note that many of these functions accept the dim parameter, which specifies the dimension of reduction for multidimensional tensors. This is similar to the axis parameter in NumPy. By default, when dim is not specified, the reduction occurs across all dimensions. Specifying dim = 1 will compute the operation across each row. For example, torch.mean(x,1) will compute the mean for each row in tensor x. # + [markdown] id="1chfJ5HJF1Bp" # > Tip: It’s common to chain methods together. For example, torch.rand(2,2).max().item() creates a 2 × 2 tensor of random floats, finds the maximum value, and returns the value itself from the resulting tensor. # + [markdown] id="qUE7FYCCF3JW" # Next, we’ll look at PyTorch’s comparison functions. Comparison functions usually compare all the values within a tensor, or compare one tensor’s values to another’s. They can return a tensor full of Booleans based on each element’s value such as torch.eq() or torch.is_boolean(). There are also functions to find the maximum or minimum value, sort tensor values, return the top subset of tensor elements, and more. # # Table below lists some commonly used comparison functions for your reference. # + [markdown] id="YXtj9JNOF-mn" # | Operation type | Sample functions | # | -------------- | ---------------- | # | Compare a tensor to other tensors | eq(), ge(), gt(), le(), lt(), ne() or \==, \>, \>=, <, <=, !=, respectively | # | Test tensor status or conditions | isclose(), isfinite(), isinf(), isnan() | # | Return a single Boolean for the entire tensor | allclose(), equal() | # | Find value(s) over the entire tensor or along a given dimension | argsort(), kthvalue(), max(), min(), sort(), topk() | # + [markdown] id="2rqA_1urGJRf" # The next type of mathematical functions we’ll look at are linear algebra functions. Linear algebra functions facilitate matrix operations and are important for deep learning computations. # # Many computations, including gradient descent and optimization algorithms, use linear algebra to implement their calculations. PyTorch supports a robust set of built-in linear algebra operations, many of which are based on the Basic Linear Algebra Subprograms (BLAS) and Linear Algebra Package (LAPACK) standardized libraries. # + [markdown] id="vYenKx13GSTS" # | Function | Description | # | -------- | ----------- | # | torch.**matmul**() | Computes a matrix product of two tensors; supports broadcasting | # | torch.**chain\_matmul**() | Computes a matrix product of _N_ tensors | # | torch.**mm**() | Computes a matrix product of two tensors (if broadcasting is required, use matmul()) | # | torch.**addmm**() | Computes a matrix product of two tensors and adds it to the input | # | torch.**bmm**() | Computes a batch of matrix products | # | torch.**addbmm**() | Computes a batch of matrix products and adds it to the input | # | torch.**baddbmm**() | Computes a batch of matrix products and adds it to the input batch | # | torch.**mv**() | Computes the product of the matrix and vector | # | torch.**addmv**() | Computes the product of the matrix and vector and adds it to the input | # | torch.**matrix\_power** | Returns a tensor raised to the power of _n_ (for square tensors) | # | torch.**eig**() | Finds the eigenvalues and eigenvectors of a real square tensor | # | torch.**inverse**() | Computes the inverse of a square tensor | # | torch.**det**() | Computes the determinant of a matrix or batch of matrices | # | torch.**logdet**() | Computes the log determinant of a matrix or batch of matrices | # | torch.**dot**() | Computes the inner product of two tensors | # | torch.**addr**() | Computes the outer product of two tensors and adds it to the input | # | torch.**solve**() | Returns the solution to a system of linear equations | # | torch.**svd**() | Performs a single-value decomposition | # | torch.**pca\_lowrank**() | Performs a linear principle component analysis | # | torch.**cholesky**() | Computes a Cholesky decomposition | # | torch.**cholesky\_inverse**() | Computes the inverse of a symmetric positive definite matrix and returns the Cholesky factor | # | torch.**cholesky\_solve**() | Solves a system of linear equations using the Cholesky factor | # + [markdown] id="rmCdezGDGc-1" # The final type of mathematical operations we’ll consider are spectral and other math operations. Depending on the domain of interest, these functions may be useful for data transforms or analysis. For example, spectral operations like the fast Fourier transform (FFT) can play an important role in computer vision or digital signal processing applications. # + [markdown] id="e4pOe7roGrGN" # | Operation type | Sample functions | # | -------------- | ---------------- | # | Fast, inverse, and short-time Fourier transforms | fft(), ifft(), stft() | # | Real-to-complex FFT and complex-to-real inverse FFT (IFFT) | rfft(), irfft() | # | Windowing algorithms | bartlett\_window(), blackman\_window(),hamming\_window(), hann\_window() | # | Histogram and bin counts | histc(), bincount() | # | Cumulative operations | cummax(), cummin(), cumprod(), cumsum(),trace() (sum of the diagonal), </br> einsum() (sum of products using Einstein summation) | # | Normalization functions | cdist(), renorm() | # | Cross product, dot product, and Cartesian product | cross(), tensordot(), cartesian\_prod() | # | Functions that create a diagonal tensor with elements of the input tensor | diag(), diag\_embed(), diag\_flat(), diagonal() | # | Einstein summation | einsum() | # | Matrix reduction and restructuring functions | flatten(), flip(), rot90(), repeat\_interleave(), meshgrid(), roll(), combinations() | # | Functions that return the lower or upper triangles and their indices | tril(), tril\_indices, triu(), triu\_indices() | # + [markdown] id="xo2INk3MG-yP" # One function, backward(), is worth calling out in its own subsection because it’s what makes PyTorch so powerful for deep learning development. The backward() function uses PyTorch’s automatic differentiation package, torch.autograd, to differentiate and compute gradients of tensors based on the chain rule. # + colab={"base_uri": "https://localhost:8080/"} id="0y6hSl9PHPwh" executionInfo={"status": "ok", "timestamp": 1631133192775, "user_tz": -330, "elapsed": 522, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="71d6cb87-37f6-414b-8427-2dfeb7cb75c5" x = torch.tensor([[1,2,3],[4,5,6]], dtype=torch.float, requires_grad=True) print(x) # + colab={"base_uri": "https://localhost:8080/"} id="ZzqsahapHWKV" executionInfo={"status": "ok", "timestamp": 1631133204109, "user_tz": -330, "elapsed": 425, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3a474ad1-549f-4497-c43f-b6acf9b708a3" f = x.pow(2).sum() print(f) # + colab={"base_uri": "https://localhost:8080/"} id="aajVVfvhHU1k" executionInfo={"status": "ok", "timestamp": 1631133219903, "user_tz": -330, "elapsed": 634, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="33b1db4f-a128-47f6-8475-edc6d9c05283" f.backward() print(x.grad) # df/dx = 2x
_docs/nbs/reco-tut-ml-pytorch-fundamentals-tensors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="gUEGZLwQq0Vl" # # "Intro til Anvendt Matematik og Python opfriskning" # > "19 April 2021 - HA-AAUBS" # # - toc: true # - branch: master # - badges: true # - comments: true # - author: <NAME> # - categories: [intro, forelæsning] # + [markdown] id="xiOWOHbyqVa1" # # Intro til Anvendt Matematik og Python opfriskning # # # - Matematik bruges i finance, økonomistyring, data science, tech og meget andet - men også helt sikkert senere hvis I skal videre med en kandidat. # - Analytiske skills er meget [eftertragtede på arbejdsmarkedet](https://youtu.be/u2oupkbxddc # ) # > [Ny DI-analyse viser](https://www.danskindustri.dk/tech-der-taller/analysearkiv/analyser/2020/10/kompetencer-til-et-digitalt-arbejdsliv/), at den digitale omstilling i virksomheder ikke kan drives af it-specialisterne alene. Der er i stærkt stigende omfang behov for, at samfundsvidenskabelige profiler også har gode digitale kompetencer. # # + [markdown] id="zkpGMgOTr1eT" # ### Hvad sker her fra idag til 21 Juni? # # - overblik over linkeær algebra og calculus (ikke meget mere end B niveau) # - Brug gerne fx https://www.webmatematik.dk/ # - $\LaTeX$ [cheat-sheet](http://tug.ctan.org/info/undergradmath/undergradmath.pdf) # - [Markdown cheatsheet](https://www.markdownguide.org/cheat-sheet/) # - Lære at **bruge** matematik - ikke være matematiker¨ # - lære fra et data/computer science perspektiv, hvor det handler mest at kunne implementere matematik direkte og bruge til fx at bygge en søgemaskine, recommender system, visualisere eller automatisere BI # - "computational tilgang" - Python som tool # - Danglish # + [markdown] id="gsygonndQP7f" # ### Pingvin Motivation og Intuition - Fra Data og Statistik til Liniær Algebra # + [markdown] id="PPHyBrDOHJBv" # Pinguin data: https://github.com/allisonhorst/palmerpenguins # ![](https://github.com/allisonhorst/palmerpenguins/raw/master/man/figures/lter_penguins.png) # # Vi bygger en søgemaskine til pingviner 🤔 # # Antagelse: # - Pingviner kan bedst lide at være sammen med dem, der ligner dem mest # # + id="GZ40xDOXNau-" import pandas as pd import numpy as np np.set_printoptions(suppress=True) import seaborn as sns sns.set(color_codes=True, rc={'figure.figsize':(10,8)}) # + id="XfQG6XzHQMIA" pinguins = pd.read_csv("https://github.com/allisonhorst/palmerpenguins/raw/5b5891f01b52ae26ad8cb9755ec93672f49328a8/data/penguins_size.csv") # + colab={"base_uri": "https://localhost:8080/", "height": 198} id="dns4ru0pPxN7" outputId="34e8320b-63ea-4acc-aa3e-76b98971e586" pinguins.head() # + colab={"base_uri": "https://localhost:8080/"} id="bQX8ibJ2P83O" outputId="14bc289a-5637-42ab-d403-9c08b784093f" pinguins = pinguins.dropna() pinguins.species_short.value_counts() # + id="F-4jI1HSd38w" pinguins.index = range(len(pinguins)) # + colab={"base_uri": "https://localhost:8080/", "height": 744} id="HwmeRp9DQAIN" outputId="20afe723-9187-4694-abe0-b47ffa6d62af" # Hvordan ser vores data ud? sns.pairplot(pinguins, hue='species_short', kind="reg", corner=True, markers=["o", "s", "D"], plot_kws={'line_kws':{'color':'white'}}) # + [markdown] id="vfscWKkwQ3fA" # Vi danner alle variable om til Z-scores (så de er på samme skala) # # $Z = \frac{x-\mu}{\sigma} $ # # x = værdi, # $\mu$ = gennemsnit, $\sigma$ = stadnardafvigelse # # + id="-mIH0WT5Qmxr" # scaling - vi tager kun de 4 nummeriske variable from sklearn.preprocessing import StandardScaler scaled_pinguins = StandardScaler().fit_transform(pinguins.loc[:,'culmen_length_mm':'body_mass_g']) # + colab={"base_uri": "https://localhost:8080/", "height": 268} id="N-DzmjDxR27Q" outputId="a8a653d3-d7c1-4c06-bed5-4f364eac5a1f" # plot af alle skalerede variable, som nu har gennemsnit ~ 0 og std ~ 1 for i in range(4): sns.kdeplot(scaled_pinguins[:,i]) # + colab={"base_uri": "https://localhost:8080/"} id="25cQ5cYZUZL8" outputId="7b538e20-dfcc-4bc3-dd37-af6db0b7055a" print(scaled_pinguins.shape) scaled_pinguins # + colab={"base_uri": "https://localhost:8080/"} id="6XsXSJ17UhpO" outputId="bf857c16-2df2-43d1-e0a8-e7b6d4f07829" # pinguin 1 kan representeres som en 4D række-vektor scaled_pinguins[0,:] # + [markdown] id="UCBSjDyLU46v" # Nu bruger vi noget, som vi måske kommer til at se på helt til sidst i Liniær Algebra, næmlig Principal Component Analysis eller PCA. # - læs mere om PCA og hvordan man [bygger det fra bunden](https://towardsdatascience.com/principal-component-analysis-pca-from-scratch-in-python-7f3e2a540c51)) # - Hvis du er meget interesseret - [læs her](https://jakevdp.github.io/PythonDataScienceHandbook/05.09-principal-component-analysis.html) # # Vi bruger 2 components (dvs. vores 4D vektorer bliver skrumpet til 2D hvor PCA forsøger at beholde så meget information som muligt # # # + id="SrPFncJyVlIA" # import PCA from sklearn.decomposition import PCA pca = PCA(n_components=2) # + id="GnOOM_bCWsmN" # Transform penguin matrix med PCA pca_pinguins = pca.fit_transform(scaled_pinguins) # + colab={"base_uri": "https://localhost:8080/"} id="48dOObkaWyok" outputId="4789f3eb-c739-4b12-ea94-fcd0160765e7" print(pca_pinguins.shape) pca_pinguins # + [markdown] id="lto_aTeTXmCd" # Nu bruger vi denne 2D matrix og plotter, hvor 1.kollonne = x; 2. kolonne = y; vi bruger farver fra pingvin-arter i vores start-data # + colab={"base_uri": "https://localhost:8080/", "height": 285} id="kDDXidYzX3dO" outputId="261432ef-45e5-46c7-9bb2-fa136278ca91" sns.scatterplot(x = pca_pinguins[:,0], y = pca_pinguins[:,1], hue = pinguins['species_short'] ) # + [markdown] id="SL4PG9bpYtvh" # Hvordan finder vi så en buddy for en given pingvin? # # - det er den, der er tættest på 🤖 # # **Eucledian Distance** # # ![](https://upload.wikimedia.org/wikipedia/commons/5/55/Euclidean_distance_2d.svg) # # **Vi kan også gå fra 2D til n-D** # # $d(\vec{u}, \vec{v}) = \| \vec{u} - \vec{v} \| = \sqrt{(u_1 - v_1)^2 + (u_2 - v_2)^2 ... (u_n - v_n)^2}$ # # fx # # Vi kan regne ED mellem # $\vec{u} = (2, 3, 4, 2)$ # # og # # $\vec{v} = (1, -2, 1, 3)$ # # $\begin{align} d(\vec{u}, \vec{v}) = \| \vec{u} - \vec{v} \| = \sqrt{(2-1)^2 + (3+2)^2 + (4-1)^2 + (2-3)^2} \\ d(\vec{u}, \vec{v}) = \| \vec{u} - \vec{v} \| = \sqrt{1 + 25 + 9 + 1} \\ d(\vec{u}, \vec{v}) = \| \vec{u} - \vec{v} \| = \sqrt{36} \\ d(\vec{u}, \vec{v}) = \| \vec{u} - \vec{v} \| = 6 \end{align}$ # + colab={"base_uri": "https://localhost:8080/"} id="x3wjkyNSbZQs" outputId="c7d05d60-bfd5-40db-a22e-7a805a5d9a8e" # hvor tæt er de første 2 print(scaled_pinguins[0,:]) print(scaled_pinguins[1,:]) # + id="_mz2osB3bteN" # kvardarod er ikke standard og skal importeres from math import sqrt # + colab={"base_uri": "https://localhost:8080/"} id="BzBDiwA5b0N5" outputId="e5037eca-a692-42fa-876e-dbd3dfdaf7d4" # manuelt sqrt((-0.89765322--0.82429023)**2 + (0.78348666-0.12189602)**2 + (-1.42952144--1.07240838)**2 + (-0.57122888--0.50901123)**2) # + colab={"base_uri": "https://localhost:8080/"} id="HxQe8Z9ccPN_" outputId="c7e49ec9-c0bb-475a-c6f8-44365e932bbf" # med numpy np.linalg.norm(scaled_pinguins[0,:] - scaled_pinguins[1,:]) # + colab={"base_uri": "https://localhost:8080/"} id="EM_049J_dJkI" outputId="b5f36850-d75d-4472-d96e-80f6c9f08731" np.linalg.norm(scaled_pinguins[0,:] - scaled_pinguins[2,:]) # + colab={"base_uri": "https://localhost:8080/", "height": 198} id="-3Lwqh29dMIx" outputId="8fca90bd-29e5-4ffb-918b-0f7c8cfdc8be" pinguins.iloc[:5,:] # + colab={"base_uri": "https://localhost:8080/", "height": 198} id="xjMpQZnNeJyQ" outputId="a77e1cdd-260f-4988-aab9-291fa8ca64ed" pinguins.iloc[-5:,:] # + colab={"base_uri": "https://localhost:8080/"} id="Ii5Y1wYVdmna" outputId="9a4f2212-9690-4d6f-a838-485324b2d50d" np.linalg.norm(scaled_pinguins[0,:] - scaled_pinguins[333,:]) # + colab={"base_uri": "https://localhost:8080/"} id="Z6CQr2Tndvzv" outputId="ee1f4e2c-b5e6-46d2-e201-0f9fc2e03288" np.linalg.norm(scaled_pinguins[0,:] - scaled_pinguins[331,:]) # + id="9g_AO6cYIdgM" import matplotlib.pyplot as plt # + colab={"base_uri": "https://localhost:8080/", "height": 272} id="yhmIrMP3HmM6" outputId="17962171-c1a7-469e-b27f-3814f9d9dcf1" # This code draws the x and y axis as lines. points = [0,1,2,333,331] fig, ax = plt.subplots() ax.scatter(pca_pinguins[[points],0], pca_pinguins[[points],1]) plt.axhline(0, c='black', lw=0.5) plt.axvline(0, c='black', lw=0.5) plt.xlim(-2,3) plt.ylim(-1,1) plt.quiver(0, 0, pca_pinguins[0,0], pca_pinguins[0,1], angles='xy', scale_units='xy', scale=1, color='blue') plt.quiver(0, 0, pca_pinguins[1,0], pca_pinguins[1,1], angles='xy', scale_units='xy', scale=1, color='green') plt.quiver(0, 0, pca_pinguins[2,0], pca_pinguins[2,1], angles='xy', scale_units='xy', scale=1, color='yellow') plt.quiver(0, 0, pca_pinguins[333,0], pca_pinguins[333,1], angles='xy', scale_units='xy', scale=1, color='violet') plt.quiver(0, 0, pca_pinguins[331,0], pca_pinguins[331,1], angles='xy', scale_units='xy', scale=1, color='black') for i in points: ax.annotate(str(i), (pca_pinguins[i,0], pca_pinguins[i,1])) # + [markdown] id="qcA7adNNdILA" # Man kunne nu enten skrive noget, som gentager denne beregning for alle kombinationer...eller # + id="YXX3jMVRdHSo" from sklearn.metrics.pairwise import euclidean_distances # + id="NVlWIKGdeloD" euclidean_matrix = euclidean_distances(scaled_pinguins) # + colab={"base_uri": "https://localhost:8080/"} id="LX4ci5xHeqig" outputId="3cb5fa32-a0e2-4d9b-82bf-655e741d0ccd" print(euclidean_matrix.shape) euclidean_matrix # + colab={"base_uri": "https://localhost:8080/"} id="BU-Ip2hKe6fV" outputId="0a4aaa63-f8a6-4d0a-fcf0-2cc15abb2e98" np.argmin(euclidean_matrix[0,:]) # + colab={"base_uri": "https://localhost:8080/"} id="_O6M3-WYfmm8" outputId="8c9566bc-74e6-45c6-9a3b-ddea96c33e88" np.argsort(euclidean_matrix[0,:])[:3] # + colab={"base_uri": "https://localhost:8080/"} id="rH-5gnNVfrxu" outputId="911ad0e5-10d6-4904-e92b-896ec59eedb7" scaled_pinguins[[0,139,16],:] # + colab={"base_uri": "https://localhost:8080/"} id="hSmQ_FEwf294" outputId="b1405902-22c1-4344-9a7b-2a08533adbdc" euclidean_distances(scaled_pinguins[[0,139,16],:]) # + [markdown] id="fCYzvrtAsdKk" # ### Python fresh-up # # - Simple datatyper # - Grundlæggende matematiske operationer # - Lister # - Funktioner # - Control Flow # # + [markdown] id="CURX7B8MErQB" # #### Simple datatyper # - Integers - hele tal **6** # - Floating-Point Numbers - decimaltal **3.2** # - Boolean - digital data type / bit **True / False** # - String - text **Roman* # + colab={"base_uri": "https://localhost:8080/"} id="B6REsp_PDnSu" outputId="3e138df2-e9b0-4e0f-eaab-3c5f02b305c8" i = 6 print(i, type(i)) # + colab={"base_uri": "https://localhost:8080/"} id="zGSsNf0HDnSu" outputId="75c15b62-e30b-44bc-8a78-5be5eb496863" x = 3.2 print(x, type(x)) # + colab={"base_uri": "https://localhost:8080/"} id="hUEiXuqFFyEJ" outputId="8454ee6e-955c-42cc-e166-1433b61eda69" t = i == 6 print(t, type(t)) # + colab={"base_uri": "https://localhost:8080/"} id="4XLozuqmDnSv" outputId="630c4b45-0c6d-45cb-8e6a-8fddb1da5985" s = 'Hello' print(s, type(s)) # + [markdown] id="OUbaOzCNGhZ8" # #### Grundlæggende matematiske operationer # + colab={"base_uri": "https://localhost:8080/"} id="BQTuxXmTDnSw" outputId="19919b20-ea44-42fb-d58e-f6eb59963141" a = 2.0 b = 3.0 print(a+b, a*b, a-b, a/b, a**2, a+b**2, (a+b)**2) # + colab={"base_uri": "https://localhost:8080/"} id="itozIzkcG8HE" outputId="6bc88aec-c370-47c1-89eb-c492d70e0907" c = a + b print(c) # + colab={"base_uri": "https://localhost:8080/"} id="Bge7gG0WPJrD" outputId="97201f1c-f1b2-4171-e1e9-eaf1866a983a" a + b == c # + colab={"base_uri": "https://localhost:8080/"} id="PhCZm_-jPMfC" outputId="11997fda-31df-44ad-c44c-111d4920a176" a + b < c # + [markdown] id="cWONRfDiP9Ka" # #### Lister # # man kan pakke alt i en liste :-) # + colab={"base_uri": "https://localhost:8080/"} id="ruE3G1bkQBjl" outputId="48f0909a-937a-488e-df84-70894b8becfa" l = ['Eskil', 1.0, sqrt] type(l) # + colab={"base_uri": "https://localhost:8080/"} id="8EqId_HKQLga" outputId="ede367bf-ebd8-49ef-f2fe-2b4714f4bb44" l[2] # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="GbE9PAtlQOQ1" outputId="c9f0309d-e154-45f5-a3e7-3b71e56df706" l[0] # + id="kcJfx16YQRjc" l.append('Roman') # + colab={"base_uri": "https://localhost:8080/"} id="HGGJ8RYqQTc2" outputId="aa5a677e-343e-4762-ff03-f005f6321e88" l # + id="sC4vvCtwQUpG" l.extend(['Marie',37]) # + colab={"base_uri": "https://localhost:8080/"} id="_86NPdToQb5M" outputId="c9e1499a-1055-4a78-fd19-1f073bb0e233" l # + colab={"base_uri": "https://localhost:8080/"} id="jvB_UegHQdN4" outputId="40f84402-1c1d-4c02-eb11-aa61128fc72c" l.pop(2) # + colab={"base_uri": "https://localhost:8080/"} id="-chlUUU7Qk_Q" outputId="dc4773aa-0671-4373-9c00-3e042acb54a9" l # + [markdown] id="9OX29mYHQ4Hg" # #### Funktioner # # Funktioner har (normalt) in og outputs. $a$ og $b$ er vores input her og funktionen producerer $\sqrt{a^2 + b^2}$ som output. # # Vi prøver lige ... # # $\begin{align} a^2 + b^2 = c^2 \rightarrow c = \sqrt{a^2 + b^2} \end{align}$ # + id="uoljW9OmQ5zk" def pythagoras(a, b): return sqrt(a**2 + b**2) # + colab={"base_uri": "https://localhost:8080/"} id="z9h9q8EDTLm5" outputId="65c0e21d-c3ac-4cf3-cf71-ed8a7785185b" pythagoras(1,2) # + id="_uJ4UQ0XTVit" # Hvis man gør det rigtigt, så er det en god ide at kommentere hvad der sker. # Her er det en no-brainer men funktioner kan blive indviklede og # det er good-practice at skrive "docstrings" til en anden eller en selv (i) def pythagoras(a, b): """ Computes the length of the hypotenuse of a right triangle Arguments a, b: the two lengths of the right triangle """ return sqrt(a**2 + b**2) # + [markdown] id="5Pr3ESb0TvT2" # ##### Mini-assignment # * Lav en funktion, som tager to punkter $(x_1, y_1), (x_2, y_2)$ på en linje og beregner hældning $a$ # # $$ y = ax + b$$ # # $$ a = \frac{y_2- y_1}{x_2 - x_1}$$ # + colab={"base_uri": "https://localhost:8080/", "height": 285} id="fh4kJ7IVUyRa" outputId="9e58a2ee-f197-4bd0-b8ea-60cb9378f154" plt.plot((1,2), (2,3), 'ro-') plt.plot((1,2), (2,2), 'bo-') plt.plot((2,2), (2,3), 'bo-') # + id="qy97WE5rVyea" # + id="TvFUIXjQWWTT" # slope(1,2,2,3) # + [markdown] id="XC1zOcA4QsWr" # #### Control flow # + id="ygJEozBgQuPz" def isNegative(n): if n < 0: return True else: return False # + [markdown] id="kMAqK3nWXsit" # ##### Mini-assignment # * Lav en funktion `KtoC` som regner Kelvin om til Celcius # $$ C = K - 273.15 \quad \text{ved} \quad C\geq - 273.15$$ # Funktionen udgiver `None` hvis $C < -273.15$ # + colab={"base_uri": "https://localhost:8080/"} id="l2f5BtM3Y7Tx" outputId="290d6fe3-409e-441f-92ee-ea80cbf4bd1b" list(range(10)) # + colab={"base_uri": "https://localhost:8080/"} id="sIgL1stnYyqH" outputId="d6de92c2-c008-4545-c61e-4fb78aaeae29" # for-loop even = [] # tom liste for i in range(10): even.append(i*2) print(even) # + colab={"base_uri": "https://localhost:8080/"} id="LbP_odmPZJz_" outputId="1af00975-1691-4517-f8a8-ee07f4785678" # list-comprehension even = [2*i for i in range(10)] print(even) # + [markdown] id="CTY9Xdb_ZZRr" # ##### Mini-assignment # 1. Beregn summen af integers 1 ... 100 ved at bruge `sum`, list-comprehension, for-loop # 2. Beregn summen af integers 1 ... 100 ved at bruge partial-sum formula # $$ \sum_{k=1}^n k = 1 + 2 + \cdots + (n-1) + n = \frac{n(n+1)}{2}$$ # + [markdown] id="gvY6us0dt0DB" # ### Matematik fresh-up # alle øvelser taget fra https://tutorial.math.lamar.edu/Problems/Alg/Preliminaries.aspx # # Erfaringen viser, at det er en god idé at få sig en god routine med at løse matematiske problemer. # # - Integer Exponents # - Rational Exponents # - Radicals # - Polynomials # # Vi arbejder old-school med papir men bruger også `SymPy` for at tjekke vores løsninger # + [markdown] id="tRD-SXDjxo_e" # #### Integer Exponents # # $- {6^2} + 4 \cdot {3^2}$ # # ${\left( {2{w^4}{v^{ - 5}}} \right)^{ - 2}}$ (løsning med kun positive eksponenter!) # + id="-NwimLZgygAm" from sympy import * # + colab={"base_uri": "https://localhost:8080/", "height": 37} id="ltgcIXThyk5H" outputId="dc0aa6f3-9b63-4cba-c796-b344696013f2" simplify(-6**2+4*3**2) # + colab={"base_uri": "https://localhost:8080/", "height": 55} id="opLp_rD7ysbv" outputId="16def955-5d63-4793-cadd-92ccc83fdbec" w, v = symbols('w v') simplify((2*w**4*v**-5)**-2) # + [markdown] id="8hJszpNc1n6M" # #### Rational Exponents # ${\left( { - 125} \right)^{\frac{1}{3}}}$ # # ${\left( {{a^3}\,{b^{ - \,\,\frac{1}{4}}}} \right)^{\frac{2}{3}}}$ # + colab={"base_uri": "https://localhost:8080/", "height": 37} id="d8rY974e8O9D" outputId="7cd643af-6995-47b5-c0d9-f5454e958bfa" simplify(-125**(1/3), rational=True) # + colab={"base_uri": "https://localhost:8080/", "height": 65} id="mbGwQ_wg4pNm" outputId="23af48ff-11d3-47f0-8f05-70abab0a0a7f" a, b = symbols('a b') simplify((a**3*b**(-1/4))**(2/3), rational=True) # + [markdown] id="PiUROziJbJPC" # #### Radicals # $$\begin{array}{c} \sqrt[7]{y}\\ \sqrt[3]{{{x^2}}} \\ \sqrt[3]{{ - 512}} \\ \sqrt x \left( {4 - 3\sqrt x } \right)\end{array}$$ # + id="PvmM6IODdVQT" x, y, z = symbols('x, y , z') # + colab={"base_uri": "https://localhost:8080/", "height": 39} id="aiwT042tLbY7" outputId="4917bee7-9e17-487f-e2dc-a8252178ccfd" simplify((x**2)**(1/3), rational=True) # + colab={"base_uri": "https://localhost:8080/", "height": 37} id="gmRpNR1WdPHg" outputId="12d9b8cb-5ec3-480c-e892-192f63dc7958" simplify(-512**(1/3), rational=True) # + colab={"base_uri": "https://localhost:8080/", "height": 39} id="W565yrskdwn8" outputId="3a7669ea-c88e-4df4-cc29-b08c4d022b7c" simplify(sqrt(x)*(4 - 3*sqrt(x)), rational = True) # + [markdown] id="P3ek1Cjafwui" # #### Polynomials # # $$(4{x^3} - 2{x^2} + 1) + (7{x^2} + 12x)$$ # # + colab={"base_uri": "https://localhost:8080/", "height": 39} id="9VNfxEeMfLDI" outputId="8dc54221-0c7a-4b24-8d05-aba386f118e0" simplify((4*x**3-2*x**2+1)+(7*x**2+12*x))
_notebooks/test/_2021-04-19-intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![alt text for screen readers](UTCClogo.png "Univerity of the Thai Chamber of Commerce") # # <center>Chapter 4 : Python Control Flow</center> # # ## ฟังก์ชั่น range() # # * ใช้กำหนดขอบเขตของข้อมูล # - range( stop ) : ช่วงข้อมูลตั้งแต่ 0 ถึง stop – 1 # - range(start,stop,step) : ช่วงข้อมูลตั้งแต่ 0 โดยค่าเพิ่มขึ้นครั้งละ step หยุดข้อมูลเมื่อถึง stop – 1 # * โดยที่ start คือ ค่าเริ่มต้น # * stop คือ ขอบเขตบนของช่วงข้อมูล แต่ไม่รวมค่าที่ stop # * step คือ ระยะห่างช่วงข้อมูลแต่ละช่วง # # **ตัวอย่าง** # # + # ตัวอย่างคำสั่งฟังก์ชั่น range() range_5 = range(5) # ทดลองแสดงผลผ่านคำสั่ง print print( range_5) # แสดงผลออกเป็น range(5) # แปลงให้เป็นข้อมูลประเภท List ก่อนแล้วจึงแสดงผล print( list(range_5)) # + # สร้างช่วงข้อมูลในกรณีต่าง ๆ พร้อมแสดงผล range_0to5 = range(0,5) # ช่วงข้อมูลคือ 0, 1, 2, 3, 4 print(list(range_0to5)) range_1to10 = range(1,10) # ช่วงข้อมูลคือ 1, 2, 3, …, 7, 8, 9 print(list(range_1to10)) range_1to10Step2 = range(1,10,2) # ช่วงข้อมูลคือ 1, 3, 5, 7, 9 print(list(range_1to10Step2)) range_10to1StepNeg2 = range(10,1,-2) # ช่วงข้อมูลคือ 10, 8, 6, 4, 2 print(list(range_10to1StepNeg2)) range_10toNeg10Step1 = range(10,-10,1) # ไม่มีข้อมูลเพราะ Stop มีค่าน้อยกว่า Start print(list(range_10toNeg10Step1)) range_10toNeg10StepNeg1 = range(10,-10,-1) # ช่วงข้อมูลคือ ช่วงข้อมูลคือ 10, 9, 8, 7, …. , -7, -8, -9 print(list(range_10toNeg10StepNeg1)) # - # ## การใช้ลูปแบบ for-in # # * คำสั่งให้ทำซ้ำตามจำนวนรอบที่กำหนด # # - for ตัวแปร in ช่วงข้อมูล : # - program code 1 # - program code 2 # # # * ช่วงข้อมูล มักใช้ค่าที่ได้จากฟังก์ชั่น range() # * ตัวแปร คือ ตัวแปรแต่ละตัวในช่วงข้อมูลที่กำหนดให้แก่ลูป for-in # # **ตัวอย่าง** # # + # ทดสอบการทำงานของลูปแบบ for-in for i in range(0,5) : print("Value of i = ",i) print("Hello world " + str(i) ) # ฟ้งก์ชั่น str() มีเพื่อเปลี่ยวตัวแปร i ให้เป็นข้อมูลประเภท String print("จบการทำงานของลูป for-in") # - # ## การใช้ลูปแบบ while # # * การทำซ้ำแบบกำหนดเงื่อนไข # # - while เงื่อนไข : # - program code 1 # - program code 2 # # # # * ลูปแบบ while จะตรวจสอบเงื่อนไขก่อนทำซ้ำ # * วนลูปเมื่อเงื่อนไขเป็น True # * หยุดทำซ้ำเมื่อเงื่อนไขเป็น False # + # ทดสอบการทำงานของลูปแบบ while i = 0 # ทดลองเปลี่ยนค่า i เริ่มต้นเป็น 5 แล้วดูความแตกต่าง while i < 5 : print("Value of i = ",i) print("Hello world " + str(i) ) # ฟ้งก์ชั่น str() มีเพื่อเปลี่ยวตัวแปร i ให้เป็นข้อมูลประเภท String i = i + 1 print("จบการทำงานของลูป while") # - # ## คำสั่ง continue # * สั่งให้เริ่มลูปถัดไปทันที # * หยุดทำคำสั่งในลูปปัจจุบันทุกคำสั่งที่อยู่หลังคำสั่ง ```continue``` # # **ตัวอย่างเช่น** # # + for x in range(0,5) : if x== 3 : continue # เมื่อ x มีค่าเท่ากับ 3 แล้วจะหยุดลูปปัจจุบันจากนั้นลูปถัดไปจะเริ่มทำงาน print(x) print('จบการทำงาน') # - # ## คำสั่ง break # * สั่งให้หยุดลูปทั้งหมดทันที # * หยุดกระทำทุกคำสั่งในลูปปัจจุบัน # # **ตัวอย่างเช่น** # + for x in range(0,5) : if x== 3 : break # เมื่อ x มีค่าเท่ากับ 3 แล้วจะยุติลูปทันที print(x) print('จบการทำงาน') # - # ## การใช้ else ในลูป # * Python สามารถเพิ่ม else เป็น feature ในลูป for-in และ ลูป while # * กรณีลูปแบบ for-int # - for ตัวแปร in ช่วงข้อมูล : # - program code 1 # - else : # - program code 2 # # * กรณีลูปแบบ while # - while เงื่อนไข : # - program code 1 # - else : # - program code 2 # # * เมื่อโปรแกรมทำงานในลูป for/while สำเร็จแล้วจึงทำงานต่อในบล็อค else # * หากลูปถูกหยุดด้วยคำสั่ง break แล้วบล็อก else จะไม่ทำงาน # # + # กรณีลูป for-in พร้อมด้วยบล็อค else for x in range(6): # ทดลองเปลี่่ยนเงื่อนไขเป็น range(3) print(x) if x == 5 : break # เมื่อคำสั่ง break ทำงานจะยุติการทำงานของลูปทันที else: print("Finally finished!") # หากคำสั่ง break ไม่ทำงานแล้วบล็อค else จะทำงานต่อจากลูป while # + # กรณีลูป while พร้อมด้วยบล็อค else x = 0 while x < 6 : # ทดลองเปลี่่ยนเงื่อนไขเป็น x<3 print(x) if x == 4 : break # เมื่อคำสั่ง break ทำงานจะยุติการทำงานของลูปทันที x = x + 1 else: print("Finally finished!") # หากคำสั่ง break ไม่ทำงานแล้วบล็อค else จะทำงานต่อจากลูป while
Chapter4_Loop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <table width="100%"> <tr> # <td style="background-color:#ffffff;"> # <a href="http://qworld.lu.lv" target="_blank"><img src="..\images\qworld.jpg" width="35%" align="left"> </a></td> # <td style="background-color:#ffffff;vertical-align:bottom;text-align:right;"> # prepared by <NAME> (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>) # </td> # </tr></table> # <table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table> # $ \newcommand{\bra}[1]{\langle #1|} $ # $ \newcommand{\ket}[1]{|#1\rangle} $ # $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ # $ \newcommand{\dot}[2]{ #1 \cdot #2} $ # $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ # $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ # $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ # $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ # $ \newcommand{\mypar}[1]{\left( #1 \right)} $ # $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ # $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ # $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ # $ \newcommand{\onehalf}{\frac{1}{2}} $ # $ \newcommand{\donehalf}{\dfrac{1}{2}} $ # $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ # $ \newcommand{\vzero}{\myvector{1\\0}} $ # $ \newcommand{\vone}{\myvector{0\\1}} $ # $ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $ # $ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ # $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ # $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ # $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ # $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ # $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ # $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ # <h2> One Bit </h2> # # [Watch Lecture](https://youtu.be/kn53Qvl-h28) # # In daily life, we use decimal number system. It is also called base-10 system, because we have 10 digits: # # $ 0,~1,~2,~3,~4,~5,~6,~7,~8, \mbox{ and } 9 $. # # In computer science, on the other hand, the widely used system is binary, which has only two digits: # # $ 0 $ and $ 1 $. # # Bit (or binary digit) is the basic unit of information used in computer science and information theory. # # It can also be seen as the smallest "useful" memory unit, which has two states named 0 and 1. # # At any moment, a bit can be in either state 0 or state 1. # <h3> Four operators </h3> # # How many different operators can be defined on a single bit? # # <i>An operator, depending on the current state of the bit, updates the state of bit (the result may be the same state).</i> # # We can apply four different operators to a single bit: # <ol> # <li> Identity: $ I(0) = 0 $ and $ I(1) = 1 $ </li> # <li> Negation: $ NOT(0) = 1 $ and $ NOT(1) = 0 $ </li> # <li> Constant (Zero): $ ZERO(0) = 0 $ and $ ZERO(1) = 0 $ </li> # <li> Constant (One): $ ONE(0) = 1 $ and $ ONE(1) = 1 $ </li> # </ol> # The first operator is called IDENTITY, because it does not change the content/value of the bit. # # The second operator is named NOT, because it negates (flips) the value of bit. # # <i>Remark that 0 and 1 also refer to Boolean values False and True, respectively, and, False is the negation of True, and True is the negation of False.</i> # # The third (resp., fourth) operator returns a constant value 0 (resp., 1), whatever the input is. # <h3> Table representation </h3> # # We can represent the transitions of each operator by a table: # # $ # I = \begin{array}{lc|cc} # & & initial & states \\ # & & \mathbf{0} & \mathbf{1} \\ \hline # final & \mathbf{0} & \mbox{goes-to} & \emptyset \\ # states & \mathbf{1} & \emptyset & \mbox{goes-to} \end{array} , # $ # where # - the header (first row) represents the initial values, and # - the first column represents the final values. # # We can also define the transitions numerically: # - we use 1 if there is a transition between two values, and, # - we use 0 if there is no transition between two values. # # $ # I = \begin{array}{lc|cc} # & & initial & states \\ # & & \mathbf{0} & \mathbf{1} \\ \hline # final & \mathbf{0} & 1 & 0 \\ # states & \mathbf{1} & 0 & 1 \end{array} # $ # The values in <b>bold</b> are the initial and final values of the bits. The non-bold values represent the transitions. # <ul> # <li> The top-left non-bold 1 represents the transtion $ 0 \rightarrow 0 $. </li> # <li> The bottom-right non-bold 1 represents the transtion $ 1 \rightarrow 1 $. </li> # <li> The top-right non-bold 0 means that there is no transition from 1 to 0. </li> # <li> The bottom-left non-bold 0 means that there is no transition from 0 to 1. </li> # </ul> # The reader may think that the values 0 and 1 are representing the transitions as True (On) and False (Off), respectively. # # Similarly, we can represent the other operators as below: # # $ # NOT = \begin{array}{lc|cc} & & initial & states \\ & & \mathbf{0} & \mathbf{1} \\ \hline final & \mathbf{0} & 0 & 1 \\ # states & \mathbf{1} & 1 & 0 \end{array} # ~~~ # ZERO = \begin{array}{lc|cc} & & initial & states \\ & & \mathbf{0} & \mathbf{1} \\ \hline final & \mathbf{0} & 1 & 1 \\ # states & \mathbf{1} & 0 & 0 \end{array} # ~~~ # ONE = \begin{array}{lc|cc} & & initial & states \\ & & \mathbf{0} & \mathbf{1} \\ \hline final & \mathbf{0} & 0 & 0 \\ # states & \mathbf{1} & 1 & 1 \end{array} # . # $ # <h3> Task 1 </h3> # # Convince yourself with the correctness of each table. # <h3> Reversibility and Irreversibility </h3> # # After applying Identity or NOT operator, we can easily determine the initial value by checking the final value. # <ul> # <li> In the case of Identity operator, we simply say the same value. </li> # <li> In the case of NOT operator, we simply say the other value, i.e., if the final value is 0 (resp., 1), then we say 1 (resp., 0). </li> # </ul> # # However, we cannot know the initial value by checking the final value after applying ZERO or ONE operator. # # Based on this observation, we can classify the operators into two types: <i>Reversible</i> and <i>Irreversible</i>. # <ul> # <li> If we can recover the initial value(s) from the final value(s), then the operator is called reversible like Identity and NOT operators. </li> # <li> If we cannot know the initial value(s) from the final value(s), then the operator is called irreversible like ZERO and ONE operators. </li> # </ul> # # <b> This classification is important, as the quantum evolution operators are reversible </b> (as long as the system is closed). # # The Identity and NOT operators are two fundamental quantum operators.
bronze/B03_One_Bit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Reading discussion - Scientific Python, IPython, Jupyter # # ## Point 1 # # "IPython is built and developed with the general principle of making life easier for scientists and students, but I believe that there are certain dangers in this as a result of my personal experience. If too much computation is abstracted away, then one can take the easy way out and miss out on learning fundamental concepts. Nowadays, one can simply call the appropriate methods in scikitlearn to do machine learning. In fact, my manager at my internship told me that a lot of folks who are trying to get into data science these days don’t even understand the algorithms/methods that they are using. Fifteen years ago, when scikitlearn didn’t exist, one needed to code the actual machine learning algorithm if he/she wanted to use it. This process forces the coder to deepen his/her fundamental understanding of the algorithm itself. The fact that we don’t need to do garbage collection in Python is a time-saver, but we miss out on the opportunity to learn about how our data is stored in memory. In fact, it’s possible for a newbie who has only coded in Python to not even be aware of what garbage collection is! In contrast, a C++ programmer has to become familiar with garbage collection and malloc by necessity. Actually, UC Berkeley’s entire computer science curriculum has embraced Python wholeheartedly; it is used in most of the upper division courses that I have taken. **As a result, I haven’t used C and C++ enough. This has caused my understanding of pointers and memory allocation, which I believe are still important for any computer programmer to know, to remain minimal.** One counter-argument would be to assert that teachers should insist that students learn the fundamental knowledge. In CS 189, the machine learning class here at Berkeley, we had to implement linear regression and a neural network in Numpy and Scipy in order to understand how they work. But, people often just want to finish a task in the most efficient way possible and will certainly find the most efficient way of doing so, especially if they are just searching online for the answers (through Stack Overflow, Quora, other online forums, etc.)" # # ### Discussion # # [Data Science from Scratch](http://proquest.safaribooksonline.com/book/databases/9781491901410), by <NAME>. # # The [Online Python Tutor](http://www.pythontutor.com) is an excellent tool for visualizing the structure of algorithms in a variety of languages. We can even use it live in the notebook thanks to the [tutormagic](https://github.com/kikocorreoso/tutormagic) extension, which I've installed (you should go ahead and install it locally as well, as per the instructions on the site): # %load_ext tutormagic # %%tutor -l python3 x = [1,2,3] y = x x.append('hi') # This is the quicksort algorithm as it used to be described in an old version of the [Wikipedia page](http://en.wikipedia.org/wiki/Quicksort) (today the descriptions are more complicated): # # function quicksort(array) # var list less, greater # if length(array) <= 1 # return array # select and remove a pivot value pivot from array # for each x in array # if x <= pivot then append x to less # else append x to greater # return concatenate(quicksort(less), pivot, quicksort(greater)) # # We can turn this into Python and visualize it: # + # %%tutor -l python3 -h 600 def qsort(lst): """Return a sorted copy of the input list.""" if len(lst) <= 1: return lst pivot, rest = lst[0], lst[1:] less_than = [ lt for lt in rest if lt < pivot ] greater_equal = [ ge for ge in rest if ge >= pivot ] return qsort(less_than) + [pivot] + qsort(greater_equal) qsort([3, 10, -9, 1, 7]) # - # The Python [dis module](https://docs.python.org/3.6/library/dis.html) lets you analyze the internal structure of Python bytecode, which is what the interpreter actually executes: import dis dis.dis(""" x = [1,2,3] y = x x.append('hi') """) # ### XTensor # # `xtensor` is a C++ library meant for numerical analysis with multi-dimensional array expressions. Here is a [live demo](https://beta.mybinder.org/v2/gh/QuantStack/xtensor/0.11.1?filepath=notebooks/xtensor.ipynb). # ## Point 2 # # "Python has its drawbacks. For instance, Python has still not developed well-constructed function for parallel and distributed computing. And also what concerns me is that while lowering the standard for access to data analysis, Python may also lower the standard for preciseness and strictness of academic research. In jupyter notebook, we still have problem inserting academic citations. Anyone can post their research or article on platform like GitHub. The publication is no longer a very formal process as before. With the rapid rise of interactive computing systems, this issue also requires people's attention." # # # ### Discussion # # - [Dask](https://dask.pydata.org): an excellent library for distributed computing in Python. # # - For numerically-oriented parallel computing, [MPI4Py](http://mpi4py.readthedocs.io) provides Python access to the complete MPI APIs. # # - Not very actively developed anymore, but can be interesting in certain contexts: [ipyparallel](http://ipyparallel.readthedocs.io). If anyone is interested in how ipyparallel and mpi4py can be combined to interactively steer and introspect parallel codes, see me at office hours. # # - Preprints: the [ArXiV](https://arxiv.org). In-progress, non-peer-reviewed research hasn't been the death of physics, and these ideas are now picking up momentum in other areas: [BioRxiv](http://www.biorxiv.org), [OSF Preprints framework](https://osf.io/preprints). # # - Citations: not a completely solved problem, but [<NAME>'s cite2c](https://github.com/takluyver/cite2c) is a step in the right direction, and we're working on improvements here. # # <p class="space"></p> # # # ## Point 3 # # "All in all, the article makes some good points (and to be fair it is out-of-date), but I think it makes a weak/strawman case when comparing Python to C/C++/Fortran/Mathematica/Matlab, because the former three are non-interactive, lower-level, the latter two are high-level interactive, and base Python (as mentioned here, without mentioning IPython engine/shell, much less Jupyter notebooks) is high-level and non-interactive. So really the only competitor that comes to mind for high-level non-interactive (at least with extensions) languages (that existed at the time, i.e. not Julia, which hasn't even had a stable release yet anyway) is R, but comparisons with R are mostly ignored and/or glossed over and/or inaccurate/unfair. (E.g. with regards to data visualization, statistical algorithms, and most importantly data manipulation, for which Python is absolutely terrible/not even worth using without pandas, and even then Python still doesn't have any super-killer like dplyr on top of R's better built-in features for data management/manipulation)." # # ### Discussion # # - Indeed, it's out of date: I couldn't find a current version of that article, which would be great to have. Much has changed since that article was written. The Stack Overflow team published recently two interesting posts: # # * [The Incredible Growth of Python](https://stackoverflow.blog/2017/09/06/incredible-growth-python) # * [Why is Python Growing So Quickly?](https://stackoverflow.blog/2017/09/14/python-growing-quickly) # # This image is particularly telling: # # <img width= 50% src=https://zgab33vy595fw5zq-zippykid.netdna-ssl.com/wp-content/uploads/2017/09/related_tags_over_time-1-1024x1024.png></img> # # - There's certainly nothing quite like the [TidyVerse](https://www.tidyverse.org) in Python. Note that at the time of writing of the article, dplyr (and much of the tidyverse) didn't exist (its [initial commit](https://github.com/tidyverse/dplyr/commit/80dc69b144711ec095db1d62cf0b73e09560eaf0) is dated Oct 28, 2012). But it's certainly true that the R machinery is exceptionally powerful, and many of its tools interoperate with power and elegance. # # In today's data science, research and industry environments, both R and Python play a role. Each has areas of particular strength, each has weaknesses, in some areas they overlap enough to be nearly interchangeable, in others they compete and feed each other. In the end, a good scientist should know more than one tool, and know when to pick the most appropriate for the job. I hope this course will teach you enough Python to know when to use it, and when not to! # # <p class="space"></p> # # ## Point 4 # # "I hate the fact the Statistics department here seems set on using R in most classes instead of Python along with packages like NumPy, Sympy, etc, and having taken Stat 133 I have seen time wasted teaching the unintuitive R and RStudio to make R markdown files instead of converting to Python. I didn’t have any issues with these readings, Python is much less of a pain than high-level languages used in the CS department and I think should replace R and Matlab in statistics, numerical analaysis, and engineering classes." # # # ### Discussion # # See above 😀! # + nbsphinx="hidden" language="html" # <style> # .space { # margin-bottom: 5cm; # } # # .big-space { # margin-bottom: 8cm; # } # </style>
lectures/04-reading2-discussion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (system-wide) # language: python # metadata: # cocalc: # description: Python 3 programming language # priority: 100 # url: https://www.python.org/ # name: python3 # --- # #### Exercise #: Simulate Monty Hall Game for 1000 times. Use a barplot and discuss whether players are better off sticking to their initial choice, or switching doors? import numpy as np import pandas as pd import matplotlib.pyplot as plt def othergoat(x): #Define a function to return "the other goat"! if x == "Goat 1": return "Goat 2" elif x == "Goat 2": return "Goat 1" # + Doors = np.array(["Car","Goat 1","Goat 2"]) #Define a list for objects behind the doors goats = np.array(["Goat 1" , "Goat 2"]) #Define a list for goats! def MHgame(): #Function to simulate the Monty Hall Game #For each guess, return ["the guess","the revealed", "the remaining"] userguess=np.random.choice(Doors) #randomly selects a door as userguess if userguess == "Goat 1": return [userguess, "Goat 2","Car"] if userguess == "Goat 2": return [userguess, "Goat 1","Car"] if userguess == "Car": revealed = np.random.choice(goats) return [userguess, revealed,othergoat(revealed)] # - # Check and see if the MHgame function is doing what it is supposed to do: for i in np.arange(1): a =MHgame() print(a) print(a[0]) print(a[1]) print(a[2]) c1 = [] #Create an empty list for the userguess c2 = [] #Create an empty list for the revealed c3 = [] #Create an empty list for the remaining for i in np.arange(1000): #Simulate the game for 1000 rounds - or any other number of rounds you desire game = MHgame() c1.append(game[0]) #In each round, add the first element to the userguess list c2.append(game[1]) #In each round, add the second element to the revealed list c3.append(game[2]) #In each round, add the third element to the remaining list #Create a data frame (gamedf) with 3 columns ("Guess","Revealed", "Remaining") and 1000 (or how many number of rounds) rows gamedf = pd.DataFrame({'Guess':c1, 'Revealed':c2, 'Remaining':c3}) gamedf # + # Get the count of each item in the first and 3rd column original_car =gamedf[gamedf.Guess == 'Car'].shape[0] remaining_car =gamedf[gamedf.Remaining == 'Car'].shape[0] original_g1 =gamedf[gamedf.Guess == 'Goat 1'].shape[0] remaining_g1 =gamedf[gamedf.Remaining == 'Goat 1'].shape[0] original_g2 =gamedf[gamedf.Guess == 'Goat 2'].shape[0] remaining_g2 =gamedf[gamedf.Remaining == 'Goat 2'].shape[0] # + # Let's plot a grouped barplot # set width of bar barWidth = 0.25 # set height of bar bars1 = [original_car,original_g1,original_g2] bars2 = [remaining_car,remaining_g1,remaining_g2] # Set position of bar on X axis r1 = np.arange(len(bars1)) r2 = [x + barWidth for x in r1] # Make the plot plt.bar(r1, bars1, color='darkorange', width=barWidth, edgecolor='white', label='Original Guess') plt.bar(r2, bars2, color='midnightblue', width=barWidth, edgecolor='white', label='Remaining Door') # Add xticks on the middle of the group bars plt.xlabel('Item', fontweight='bold') plt.xticks([r + barWidth/2 for r in range(len(bars1))], ['Car', 'Goat 1', 'Goat 2']) # Create legend & Show graphic plt.legend() plt.show() # - # <font color=crimson>__According to the plot, it is statitically beneficial for the players to switch doors because the initial chance for being correct is only 1/3__</font>
1-Lessons/Lesson10/OriginalPowerpoint/src/.ipynb_checkpoints/Monty Hall Game_TC&FF-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Simple Binary Classification with defaults # # In this notebook we will train a Wide and Deep model and simply a "Deep" model using the well known adult dataset # + import numpy as np import pandas as pd import torch from pytorch_widedeep.preprocessing import WidePreprocessor, TabPreprocessor from pytorch_widedeep.training import Trainer from pytorch_widedeep.models import Wide, TabMlp, WideDeep from pytorch_widedeep.metrics import Accuracy, Precision from pytorch_widedeep.datasets import load_adult # - df = load_adult(as_frame=True) df.head() # For convenience, we'll replace '-' with '_' df.columns = [c.replace("-", "_") for c in df.columns] # binary target df["income_label"] = (df["income"].apply(lambda x: ">50K" in x)).astype(int) df.drop("income", axis=1, inplace=True) df.head() df.drop(["fnlwgt", "educational_num"], axis=1, inplace=True) # ### Preparing the data # Define wide, crossed and deep tabular columns wide_cols = [ "workclass", "education", "marital_status", "occupation", "relationship", "race", "gender", "native_country", ] crossed_cols = [("education", "occupation"), ("native_country", "occupation")] cat_embed_cols = [ "workclass", "education", "marital_status", "occupation", "relationship", "race", "gender", "capital_gain", "capital_loss", "native_country", ] continuous_cols = ["age", "hours_per_week"] cat_embed_cols # TARGET target_col = "income_label" target = df[target_col].values # let's see what the preprocessors do # wide wide_preprocessor = WidePreprocessor(wide_cols=wide_cols, crossed_cols=crossed_cols) X_wide = wide_preprocessor.fit_transform(df) # + # # wide_preprocessor has an attribute called encoding_dict with the encoding dictionary # wide_preprocessor.encoding_dict # - # deeptabular tab_preprocessor = TabPreprocessor( embed_cols=cat_embed_cols, continuous_cols=continuous_cols ) X_tab = tab_preprocessor.fit_transform(df) # check the docs to understand the useful attributes that the tab_preprocessor has. For example, # as well as an encoding dictionary, tab_preprocessor has an attribute called cat_embed_input # that specifies the categortical columns that will be represented as embeddings, the number # of different categories per feature, and the dimension of the embeddings as defined by some # of the internal rules of thumb that the preprocessor has (have a look to the docs) tab_preprocessor.cat_embed_input print(X_wide) print(X_wide.shape) print(X_tab) print(X_tab.shape) # ### Defining the model wide = Wide(input_dim=np.unique(X_wide).shape[0], pred_dim=1) tab_mlp = TabMlp( column_idx=tab_preprocessor.column_idx, cat_embed_input=tab_preprocessor.cat_embed_input, cat_embed_dropout=0.1, continuous_cols=continuous_cols, mlp_hidden_dims=[400, 200], mlp_dropout=0.5, mlp_activation="leaky_relu", ) # Let's first find out how a linear model performs wide # Before being passed to the Trainer, the models need to be "constructed" with the ``WideDeep`` constructor class. For the particular case of the wide/linear model, not much really happens lin_model = WideDeep(wide=wide) lin_model lin_trainer = Trainer( model=lin_model, objective="binary", optimizers=torch.optim.AdamW(lin_model.parameters(), lr=0.01), metrics=[Accuracy, Precision], ) lin_trainer.fit(X_wide=X_wide, target=target, n_epochs=4, batch_size=128, val_split=0.2) # Bear in mind that `wide` is a linear model where the non-linearities are captured via the crossed columns. For the crossed-columns to be effective one needs proper business knowledge. There is no magic formula to produce them # # Let's have a look to the tabular model by itself tab_model = WideDeep(deeptabular=tab_mlp) tab_model # You can see how the `WideDeep` class has added a final prediction layer that collects the activations from the last layer of the model and plugs them into the output neuron. If this was a multiclass classification problem, the prediction dimension (i.e. the size of that final layer) needs to be specified via the `pred_dim` when instantiating the `WideDeep` class, as we will see later tab_trainer = Trainer( model=tab_model, objective="binary", optimizers=torch.optim.AdamW(tab_model.parameters(), lr=0.001), metrics=[Accuracy, Precision], ) tab_trainer.fit(X_tab=X_tab, target=target, n_epochs=4, batch_size=128, val_split=0.2) # The best result I ever obtained with `LightGBM` on this dataset is 0.8782...so we are pretty close. # # Let's combine the `wide` and `tab_mlp` components see if it helps wide = Wide(input_dim=np.unique(X_wide).shape[0], pred_dim=1) tab_mlp = TabMlp( column_idx=tab_preprocessor.column_idx, cat_embed_input=tab_preprocessor.cat_embed_input, cat_embed_dropout=0.1, continuous_cols=continuous_cols, mlp_hidden_dims=[400, 200], mlp_dropout=0.5, mlp_activation="leaky_relu", ) wd_model = WideDeep(wide=wide, deeptabular=tab_mlp) wd_trainer = Trainer( model=wd_model, objective="binary", optimizers=torch.optim.AdamW(wd_model.parameters(), lr=0.001), metrics=[Accuracy, Precision], ) wd_trainer.fit( X_wide=X_wide, X_tab=X_tab, target=target, n_epochs=4, batch_size=128, val_split=0.2 ) # For this particular case, the combination of both did not lead to better results that using just the tab_mlp model. # # Note that we have use a `TabMlp` model, but we could use any other model in the library using the same syntax from pytorch_widedeep.models import TabTransformer # The parameters for the `TabTransformer` are this # # ``` # column_idx: Dict[str, int], # cat_embed_input: Union[List[Tuple[str, int]], NoneType] = None, # cat_embed_dropout: float = 0.1, # use_cat_bias: bool = False, # cat_embed_activation: Union[str, NoneType] = None, # full_embed_dropout: bool = False, # shared_embed: bool = False, # add_shared_embed: bool = False, # frac_shared_embed: float = 0.25, # continuous_cols: Union[List[str], NoneType] = None, # cont_norm_layer: str = None, # embed_continuous: bool = False, # cont_embed_dropout: float = 0.1, # use_cont_bias: bool = True, # cont_embed_activation: Union[str, NoneType] = None, # input_dim: int = 32, # n_heads: int = 8, # use_qkv_bias: bool = False, # n_blocks: int = 4, # attn_dropout: float = 0.2, # ff_dropout: float = 0.1, # transformer_activation: str = 'gelu', # mlp_hidden_dims: Union[List[int], NoneType] = None, # mlp_activation: str = 'relu', # mlp_dropout: float = 0.1, # mlp_batchnorm: bool = False, # mlp_batchnorm_last: bool = False, # mlp_linear_first: bool = True, # ``` # # Please, see the documentation for details on each one of them, for now let's see how one could use a `TabTransformer` model in a few lines of code tab_transformer = TabTransformer( column_idx=tab_preprocessor.column_idx, cat_embed_input=tab_preprocessor.cat_embed_input, cat_embed_dropout=0.1, continuous_cols=continuous_cols, embed_continuous=True, cont_embed_dropout=0.2, use_cont_bias=True, cont_embed_activation="leaky_relu", n_heads=4, ff_dropout=0.2, mlp_dropout=0.5, mlp_activation="leaky_relu", ) tab_model = WideDeep(deeptabular=tab_transformer) tab_model tab_trainer = Trainer( model=tab_model, objective="binary", optimizers=torch.optim.AdamW(tab_model.parameters(), lr=0.001), metrics=[Accuracy, Precision], ) tab_trainer.fit(X_tab=X_tab, target=target, n_epochs=1, batch_size=128, val_split=0.2)
examples/notebooks/03_Binary_Classification_with_Defaults.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/JOSEPHINEGEND/Sendy-Logistics-Cape-to-Nairo-Datathon/blob/master/We_are_the_best.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="5Qe6aFGUEFDX" colab_type="text" # ### R Insights # - CRISP-DM # - Business Understanding # - Data Understanding # - Data Preparation # - Modelling # - Deployment # # ### Research Question # # --- # # # Predict the time for delivery # # ### Libraries to use # - pacman # - tidyverse # - caret # - skimr # # ### Data Cleaning # - Rename columns - Using the gsub function # - Remove white spaces # - Encoding categorical variables # - Check for distribution of categorical variables # - # # ### Feature Wngineering # - Data accuracy # - Create features from time # - Distribution of the target variable # - Normalise the data # - Check for anomalies # - Missing data # - Precipitation and Temperature # - Can impute using # - Mean # - Outliers # - Impute # - Drop # - Create a new column with True False to reflect whether there was an outlier # - Skewed distributions # - Investigate the zero delivery time # - Set a threshold of two minutes or more # - Check for measures of measures of central tendacy # - Normalise variables # # ### Bivariate analysis # - Temperature and distance have a postive relationship # #### Variable Selction # ** # # ### Modelling # - Bias and variance tradeoff # - Shuffle the data # - Create train, test and validation sets # - Regression models # # ### Cross-validation # - compare crossval errors with submission # # ### Parameter tuning # - Randomsearch # - Gridsearch # # ### Metrics of Success # - RME - Most common # - MAE - Robust with data that has outliers # #### Try different models and compare the rmse's # # ## Python Insights # - Hacking # - MLXNET # - Blending # - mirror/mean encoding # # # + id="eTHRZ671znAC" colab_type="code" colab={} import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler from sklearn.model_selection import train_test_split, KFold, StratifiedKFold, GridSearchCV, RandomizedSearchCV from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, GradientBoostingRegressor from sklearn.metrics import mean_squared_error import requests from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA from sklearn.linear_model import LinearRegression from io import StringIO import lightgbm as lgb import lightgbm as lgbb from xgboost import XGBRegressor from lightgbm import LGBMRegressor import hyperopt import datetime import datetime as dt import warnings warnings.filterwarnings('ignore') # + id="rHtOKBtEWsRG" colab_type="code" outputId="76e5ee03-b6ad-4b14-dc6f-2c5ef7d74db8" colab={"base_uri": "https://localhost:8080/", "height": 351} # !pip install catboost # + id="UW9IPm8XWzNK" colab_type="code" colab={} from catboost import CatBoostRegressor # + id="HZTlKrrlzm9B" colab_type="code" colab={} train = 'https://drive.google.com/file/d/1ZP9pFBATu38l97Tut5hKLvpzKRLFXX_P/view?usp=sharing' test = 'https://drive.google.com/file/d/1_aElMoEIRs55avOafA7U1_YXEuaDBXLh/view?usp=sharing' submission = 'https://drive.google.com/file/d/1mqXS8euMqF9_bhTEU6O9cLHoX2FI_5HD/view?usp=sharing' dictionary = 'https://drive.google.com/file/d/1juqltwSs6OXQgJJEhTxD7Gm443fnLpCp/view?usp=sharing' riders = 'https://drive.google.com/file/d/19-aVgAcKRxX_Tk9StUQMNeAUVi0ZTo9K/view?usp=sharing' def read_csv(url): url = 'https://drive.google.com/uc?export=download&id=' + url.split('/')[-2] csv_raw = requests.get(url).text csv = StringIO(csv_raw) return csv train = pd.read_csv(read_csv(train)) tes = pd.read_csv(read_csv(test)) sub = pd.read_csv(read_csv(submission)) dictionary = pd.read_csv(read_csv(dictionary)) riders = pd.read_csv(read_csv(riders)) # + [markdown] id="FM_S0--VaHXW" colab_type="text" # ### Cleaning column names # + id="RVcpphrdg8pK" colab_type="code" colab={} df = train.copy() test = tes.copy() y = train[['User Id', 'Time from Pickup to Arrival']] df, test = df.align(test, join = 'inner', axis = 1) df.columns = df.columns.str.lower().str.replace(' ', '_').str.replace('-', '_').str.replace('=', '_') df.columns = df.columns.str.replace('__', '_') df.columns = df.columns.str.replace('(', '').str.replace(')', '') df.columns = df.columns.str.replace('__', '_') test.columns = df.columns.str.lower().str.replace(' ', '_').str.replace('-', '_').str.replace('=', '_') test.columns = df.columns.str.replace('__', '_') test.columns = df.columns.str.replace('(', '').str.replace(')', '') test.columns = df.columns.str.replace('__', '_') df['separator'] = 1 test['separator'] = 0 tt = pd.concat([df, test]) riders.columns = riders.columns.str.lower().str.replace(' ', '_') comb = tt.merge(riders, how = 'left', on = 'rider_id') #Stripping the order no to have whole numbers comb['order_no']= comb['order_no'].str.replace('Order_No_', '') comb['order_no']=comb['order_no'].astype(int) comb.sort_values(by=['order_no'], ascending= True) comb['temp'] = comb['temperature'].apply(lambda x: True if x == np.nan else False) #Forward Fill Temprature Column comb['temperature'].fillna(df.temperature.mean(), inplace= True) #Backward Fill Temprature column #comb['temperature'].fillna(method='bfill', inplace= True) #Filling in the Precipitation column with 0 comb['precipitation_in_millimeters'].fillna(0, inplace= True) time_cols = ['placement_time', 'confirmation_time', 'pickup_time', 'arrival_at_pickup_time'] for col in time_cols: comb[col] = pd.to_datetime(comb[col]) comb[col.split('_')[0] + '_hour'] = comb[col].dt.hour #comb[col.split('_')[0] + '_minute'] = comb[col].dt.minute #comb[col] = pd.to_datetime(comb[col], format = '%H:%M:%S', errors = 'coerce') #comb[col] = [time.time() for time in comb[col]] from sklearn.cluster import KMeans kmeans = KMeans(n_clusters = 15, init ='k-means++') kmeans.fit(comb[['pickup_lat', 'pickup_long']]) # Compute k-means clustering. comb['pickup_cluster_label'] = kmeans.fit_predict(comb[['pickup_lat', 'pickup_long']]) centers1 = kmeans.cluster_centers_ # Coordinates of cluster centers. labels1 = kmeans.predict(comb[['pickup_lat', 'pickup_long']]) # Labels of each point comb['pickup_cluster_label'] = comb['pickup_cluster_label'].astype('category') #comb = comb.drop(columns=['pickup_lat', 'pickup_long']) kmeans = KMeans(n_clusters = 15, init ='k-means++') kmeans.fit(comb[['destination_lat', 'destination_long']]) # Compute k-means clustering. comb['destination_cluster_label'] = kmeans.fit_predict(comb[['destination_lat', 'destination_long']]) centers = kmeans.cluster_centers_ # Coordinates of cluster centers. labels = kmeans.predict(comb[['destination_lat', 'destination_long']]) # Labels of each point comb['destination_cluster_label'] =comb['destination_cluster_label'].astype('category') #comb = comb.drop(columns=['destination_lat', 'destination_long']) #Peak offpeak 7 - 10 or 4 - 8 hour_cols = ['placement_hour', 'confirmation_hour', 'pickup_hour', 'arrival_hour'] for col in hour_cols: comb[col.split('_')[0] + '_peak'] = comb[col].apply(lambda x: 1 if ((x >= 7) & (x <= 9)) | ((x >= 16) & (x <= 19)) else 0) time_cols = ['placement_time', 'confirmation_time', 'pickup_time'] for col in time_cols: comb['arrival_minus_' + col.split('_')[0]] = (comb.arrival_at_pickup_time - comb[col]).astype('timedelta64[s]') time_cols = ['placement_time', 'confirmation_time'] for col in time_cols: comb['pickup_minus_' + col.split('_')[0]] = (comb.pickup_time - comb[col]).astype('timedelta64[s]') comb['confir_minus_placement'] = (comb.confirmation_time - comb.placement_time).astype('timedelta64[s]') cat_cols = ['platform_type', 'personal_or_business', 'placement_day_of_month', 'placement_weekday_mo_1', 'placement_hour', 'confirmation_hour', 'pickup_hour', 'arrival_hour', 'pickup_cluster_label', 'destination_cluster_label', 'placement_peak', 'confirmation_peak', 'pickup_peak', 'arrival_peak'] for col in cat_cols: comb[col] = comb[col].astype('category') cols_drop = ['order_no', 'user_id', 'vehicle_type', 'placement_time', 'confirmation_day_of_month', 'confirmation_weekday_mo_1', 'confirmation_time', 'arrival_at_pickup_day_of_month', 'arrival_at_pickup_weekday_mo_1', 'arrival_at_pickup_time', 'pickup_day_of_month', 'pickup_weekday_mo_1', 'pickup_time', 'rider_id'] comb.drop(cols_drop, axis = 1, inplace = True) train = comb[comb.separator == 1] test = comb[comb.separator == 0] train.drop('separator', axis = 1, inplace = True) test.drop('separator', axis = 1, inplace = True) train['y'] = y['Time from Pickup to Arrival'] # + id="zqIOCfDQk5xN" colab_type="code" colab={} tra = train.copy() tes = test.copy() # + id="EAPk7AJeYUOO" colab_type="code" colab={} y = tra.y tra = pd.get_dummies(tra) X = tra.drop('y', axis = 1) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 101) cat = CatBoostRegressor() cat.fit(X_train, y_train) y_pred = cat.predict(X_test) print(np.sqrt(mean_squared_error(y_test, y_pred))) tes = pd.get_dummies(tes) pred = cat.predict(tes) sub_pred = pd.DataFrame({"Order_No":sub['Order_No'], "Time from Pickup to Arrival": pred }) sub_pred.to_csv("111.csv", index=False)
We_are_the_best.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Grouping by cloudiness # # Let's group Sentinel-2 images by how cloudy they are, and composite them into low-cloud and high-cloud composites. We'll look at a spot renowned for its clouds: Seattle. # # `groupby` works on more than just dates; you can group by any metadata value on an Image. # # You could even use it to group by `cloud_fraction`. # Though deriving any possible practical use of this is left as an exercise to the reader, it's still kind of fun to look at. import descarteslabs.workflows as wf wf.map.center = [47.6857302113159, -122.19406127929689] wf.map.zoom = 9 wf.map s2 = wf.ImageCollection.from_id('sentinel-2:L1C', start_datetime="2019-01-01") grouped = s2.groupby(lambda img: img.properties['cloud_fraction'] // 0.25) # cloud_fraction is 0-1, so this should give us up to 5 groups low = grouped.groups[0].median(axis="images") high = grouped.groups[3].mean(axis="images") # notice we use `median` on clear images, `mean` on cloudy low.pick_bands('red green blue').visualize('low', scales=[[0, 0.4], [0, 0.4], [0, 0.4]]) high.pick_bands('red green blue').visualize('high', scales=[[0, 1], [0, 1], [0, 1]]) # ------- # # How many images fall into each group? group_lengths = grouped.groups.items().map( lambda group_imgs: group_imgs[1].length() ) group_lengths.compute(wf.map.geocontext()) # What fraction of images have `cloud_fraction` <= 0.25? clear_days_fraction = group_lengths[0] / (group_lengths.reduce(lambda accum, x: accum + x)) clear_days_fraction.compute(wf.map.geocontext()) # Indeed, it's cloudy a lot in Seattle.
examples/clouds.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 8.12.3 Other Search Functions; Accessing Matches # ### Function search—Finding the First Match Anywhere in a String import re result = re.search('Python', 'Python is fun') result.group() if result else 'not found' result2 = re.search('fun!', 'Python is fun') result2.group() if result2 else 'not found' # ### Ignoring Case with the Optional flags Keyword Argument result3 = re.search('Sam', 'SAM WHITE', flags=re.IGNORECASE) result3.group() if result3 else 'not found' # ### Metacharacters that Restrict Matches to the Beginning or End of a String result = re.search('^Python', 'Python is fun') result.group() if result else 'not found' result = re.search('^fun', 'Python is fun') result.group() if result else 'not found' result = re.search('Python$', 'Python is fun') result.group() if result else 'not found' result = re.search('fun$', 'Python is fun') result.group() if result else 'not found' # ### Function findall and finditer—Finding All Matches in a String contact = '<NAME>, Home: 555-555-1234, Work: 555-555-4321' re.findall(r'\d{3}-\d{3}-\d{4}', contact) for phone in re.finditer(r'\d{3}-\d{3}-\d{4}', contact): print(phone.group()) # ### Capturing Substrings in a Match text = 'Char<NAME>, e-mail: <EMAIL>' pattern = r'([A-Z][a-z]+ [A-Z][a-z]+), e-mail: (\w+@\w+\.\w{3})' result = re.search(pattern, text) result.groups() result.group() result.group(1) result.group(2) ########################################################################## # (C) Copyright 2019 by Deitel & Associates, Inc. and # # Pearson Education, Inc. All Rights Reserved. # # # # DISCLAIMER: The authors and publisher of this book have used their # # best efforts in preparing the book. These efforts include the # # development, research, and testing of the theories and programs # # to determine their effectiveness. The authors and publisher make # # no warranty of any kind, expressed or implied, with regard to these # # programs or to the documentation contained in these books. The authors # # and publisher shall not be liable in any event for incidental or # # consequential damages in connection with, or arising out of, the # # furnishing, performance, or use of these programs. # ##########################################################################
examples/ch08/snippets_ipynb/08_12.03.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbgrader={} # # Project Euler: Problem 1 # + [markdown] nbgrader={} # If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. # # Find the sum of all the multiples of 3 or 5 below 1000. # + deletable=false nbgrader={"checksum": "6cff4e8e53b15273846c3aecaea84a3d", "solution": true} # YOUR CODE HERE thousand_add = [i for i in range(1000) if i % 3 == 0 or i % 5 ==0] print(sum(thousand_add)) print(thousand_add) # + deletable=false nbgrader={"checksum": "6e498cbe102f8b3c1bc4ebd777bcc952", "grade": true, "grade_id": "projecteuler1", "points": 10} # This cell will be used for grading, leave it at the end of the notebook.
assignments/assignment01/ProjectEuler1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1>Regular Expression</h1> # <h3>Introduction to Regular Expressions # </h3> # <ul><li> Regular expression are used for pattern matching.</li> # <li>Regular expressions are very useful when dealing with semi-structured data i.e. when fields in file are not having well defined delimiters like comma etc.</li> # <li>It is a powerful language to match patterns in data.</li> # <li>“re” module in python provides support for regular expressions.</li> # </ul> # import re patternObject = re.compile('[\d]*') patternObject.match("Specify Search String here") # <ul><li>There are two kinds of patterns in regular expression:</li> # <ol><li><b> Literals:</b></li> # <ul> # <li>Literal values are used to match precise pattern.</li> # <li>Literal characters are any character not listed as a metacharacter.</li> # </ul> # <li><b>Metacharacter:</b></li> # <ul> # <li>Metacharcter have special meaning;like, a dot will match any single character.</li> # <li>Here's the complete list of metacharacters: # <p><b> < ( [ { \ ^ - = $ ! | ] } ) ? * + . > </b></p> # <p> Regular expression support patterns much more flexible than simply using a dot to match any character.</p> # <p>The following explains how to use character classes to restrict which characters are matched.</p> # # # <h3>Character Classes</h3> # * [057] &nbsp; Matches any single digit that is either 0,5 or 7 # * [0-9] &nbsp; Matches any single digit between 0 and 9 # * [3-6] &nbsp; Matches any single digit between 3 and 6 # * [a-z] &nbsp; Matches any single lowercase letter # * [C-F] &nbsp; Matches any single uppercase letter between C and F # # import re p = re.compile('[C-F][3-6]') p p.match("D3") # <h3>Predefined Character Classes</h3> # * \\d &nbsp; Matches any single digit # * \\w &nbsp; Matches any word character # * \\s &nbsp; Matches any whitespace character(space,tab,newline,etc.) import re p = re.compile('\d\d\d\w') p p.match("314d") # <h3>Negated Predefined Character Classes</h3> # * \\D &nbsp; Matches any single non-digit character # * \\W &nbsp; Matches any non-word character # * \\S &nbsp; Matches any non-whitespace character import re p = re.compile('\D\D\W') p p.match("ZX#") # <h3>Matching Quantifiers</h3> # * {5} &nbsp; Preceding character may occur exactly five times # * {0,6} &nbsp; Preceding character may occur between zero and six times # * ? &nbsp; Preceding character is optional (may occur zero or one times) # * \+ &nbsp; Preceding character may occur one or more times # * \* &nbsp; Preceding character may occur zero or more times # # # <h3>Boundary Matching Metacharacters</h3> # * ^ &nbsp; Matches only at the begining of a string # * \$ &nbsp; Matches only at the ending of a string # #
Functional_Thinking/Lab/21B-Regular_Expressions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python(PythonData) # language: python # name: pythondata # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt la_df = pd.read_csv("LA_15-17_Merged.csv") chi_df = pd.read_csv("Chicago_15-17_Merged.csv") la_df chi_df len(chi_df) len(la_df) plt.plot(la_df["Date"],la_df["Temp (F) avg"]) plt.plot(chi_df["Date"],chi_df["Temp (F) avg"],alpha=0.5) plt.show() # + plt.scatter(la_df["Temp (F) avg"], la_df["Total"],s=10,marker="x",alpha=0.2,label="LA") plt.scatter(chi_df["Temp (F) avg"], chi_df["Total"],s=10,marker="+",alpha=0.2,label="Chicago") # calc the LA trendline z1 = np.polyfit(la_df["Temp (F) avg"], la_df["Total"], 1) p1 = np.poly1d(z1) plt.plot(la_df["Temp (F) avg"],p1(la_df["Temp (F) avg"]),"r--") # calc the CH trendline z2 = np.polyfit(chi_df["Temp (F) avg"], chi_df["Total"], 1) p2 = np.poly1d(z2) plt.plot(chi_df["Temp (F) avg"],p2(chi_df["Temp (F) avg"]),"b--") plt.title("Number of Crimes Committed in Temperatures (F)") plt.xlabel("Temperature (F)") plt.ylabel("Number of Crimes") plt.legend() plt.show() # - #LA scatter temp by crime type plt.scatter(la_df["Temp (F) avg"], la_df["Consensual"],s=10,marker="+") plt.scatter(la_df["Temp (F) avg"], la_df["White Collar"],s=10,marker="x") plt.scatter(la_df["Temp (F) avg"], la_df["Violent"],s=10,marker="x") plt.scatter(la_df["Temp (F) avg"], la_df["Property"],s=10,marker="x") plt.scatter(la_df["Temp (F) avg"], la_df["Disturbance"],s=10,marker="x") plt.title("Category of Crime committed in Different Temperatures (F)") plt.xlabel("Temperature (F)") plt.ylabel("Number of Crimes Commited") plt.legend() plt.show() #CH scatter temp by crime type plt.scatter(chi_df["Temp (F) avg"], chi_df["Consensual"],s=10,marker="+") plt.scatter(chi_df["Temp (F) avg"], chi_df["White Collar"],s=10,marker="x") plt.scatter(chi_df["Temp (F) avg"], chi_df["Violent"],s=10,marker="x") plt.scatter(chi_df["Temp (F) avg"], chi_df["Property"],s=10,marker="x") plt.scatter(chi_df["Temp (F) avg"], chi_df["Disturbance"],s=10,marker="x") plt.title("Category of Crime committed in Different Temperatures (F)") plt.xlabel("Temperature (F)") plt.ylabel("Number of Crimes Commited") plt.legend() plt.show() chi_temp = chi_df.groupby("Temp (F) avg").mean() plt.plot(chi_temp["Total"],linestyle="--") plt.plot(chi_temp["Violent"]) plt.plot(chi_temp["Property"]) plt.plot(chi_temp["Disturbance"]) plt.plot(chi_temp["Consensual"]) plt.plot(chi_temp["White Collar"]) plt.title("Total amount of Crimes by Category v Temperature (F)") plt.xlabel("Temperature (F)") plt.ylabel("Crime Count") plt.legend() plt.show() plt.stackplot(chi_temp.index,[chi_temp["Violent"],chi_temp["Property"],chi_temp["Consensual"], chi_temp["White Collar"],chi_temp["Disturbance"]]) plt.legend() plt.show() la_temp = la_df.groupby("Temp (F) avg").mean() plt.plot(la_temp["Total"], linestyle="--") plt.plot(la_temp["Violent"]) plt.plot(la_temp["Property"]) plt.plot(la_temp["Disturbance"]) plt.plot(la_temp["Consensual"]) plt.plot(la_temp["White Collar"]) plt.title("Total amount of Crimes by Category v Temperature (F)") plt.xlabel("Temperature (F)") plt.ylabel("Crime Count") plt.legend() plt.show() plt.stackplot(la_temp.index,[la_temp["Violent"],la_temp["Property"],la_temp["Consensual"], la_temp["White Collar"],la_temp["Disturbance"]]) plt.legend() plt.show() # + x1 = list(chi_df["Event"].value_counts().index) chi_df["Clear"]=[x.find("Clear")>=0 for x in chi_df["Event"]] chi_df["Thunder"]=[x.find("Thunder")>=0 for x in chi_df["Event"]] chi_df["Fog"]=[x.find("Fog")>=0 for x in chi_df["Event"]] chi_df["Rain"]=[x.find("Rain")>=0 for x in chi_df["Event"]] chi_df["Hail"]=[x.find("Hail")>=0 for x in chi_df["Event"]] #chi_df.loc[chi_df["Thunder"]==True] chi_df.head(15) # - chi_thunder = chi_df.loc[chi_df["Thunder"]==True] plt.scatter(chi_thunder["Temp (F) avg"],chi_thunder["Total"],s=5) plt.scatter(chi_thunder["Temp (F) avg"],chi_thunder["Violent"],s=5) plt.scatter(chi_thunder["Temp (F) avg"],chi_thunder["Property"],s=5) plt.scatter(chi_thunder["Temp (F) avg"],chi_thunder["Consensual"],s=5) plt.scatter(chi_thunder["Temp (F) avg"],chi_thunder["White Collar"],s=5) plt.scatter(chi_thunder["Temp (F) avg"],chi_thunder["Disturbance"],s=5) plt.title("Crime Types in Temperature (F)") plt.ylabel("Amount of Crimes") plt.xlabel("Temperature (F)") plt.legend() plt.show() # + chi_thunder = chi_df.loc[chi_df["Thunder"]==True] chi_rain = chi_df.loc[chi_df["Rain"]==True] chi_fog = chi_df.loc[chi_df["Fog"]==True] chi_hail = chi_df.loc[chi_df["Hail"]==True] chi_clear = chi_df.loc[chi_df["Clear"]==True] chi_w_dfs = [chi_thunder, chi_rain, chi_fog, chi_hail, chi_clear] chi_w_labels = ["Thunder", "Rain", "Fog", "Hail", "Clear"] for x in range(len(chi_w_dfs)): df = chi_w_dfs[x] l = chi_w_labels[x] plt.scatter(df["Temp (F) avg"],df["Total"], s=5, label=l) plt.title("Crime Types in Temperature (F)") plt.ylabel("Amount of Crimes") plt.xlabel("Temperature (F)") plt.legend() plt.show() # - la_df["Year"] = la_df["Date"].map(lambda x: x[:4]) la_df.groupby("Year").sum() chi_df["Year"] = chi_df["Date"].map(lambda x: x[:4]) chi_df.groupby("Year").sum() la_std = la_df.groupby("Temp (F) avg").std().fillna(0) plt.plot(la_std.index, la_temp["Total"],c="b") plt.plot(la_std.index, la_temp["Total"]+la_std["Total"],c="b",linestyle="--",alpha=0.5) plt.plot(la_std.index, la_temp["Total"]-la_std["Total"],c="b",linestyle="--",alpha=0.5) plt.show() la_df.loc[la_df["Temp (F) avg"]==48] plt.plot(la_df["Date"],la_df["Total"],alpha=.8) plt.plot(chi_df["Date"],chi_df["Total"],alpha=.8) plt.xticks([]) plt.show() plt.plot(chi_df["Date"],chi_df["Total"]) plt.xticks([]) plt.show() # + #CH scatter temp by crime type plt.scatter(chi_df["Temp (F) avg"], chi_df["Violent"],s=10,marker="x") plt.scatter(chi_df["Temp (F) avg"], chi_df["Property"],s=10,marker="x") # calc the Violent trendline z1 = np.polyfit(chi_df["Temp (F) avg"], chi_df["Violent"], 1) p1 = np.poly1d(z1) plt.plot(chi_df["Temp (F) avg"],p1(chi_df["Temp (F) avg"]),"r--") # calc the Violent trendline z2 = np.polyfit(chi_df["Temp (F) avg"], chi_df["Property"], 1) p2 = np.poly1d(z2) plt.plot(chi_df["Temp (F) avg"],p2(chi_df["Temp (F) avg"]),"r--") plt.title("Category of Crime committed in Different Temperatures (F)") plt.xlabel("Temperature (F)") plt.ylabel("Number of Crimes Commited") plt.legend() plt.show() # + #LA scatter temp by crime type plt.scatter(la_df["Temp (F) avg"], la_df["Violent"],s=10,marker="x") #plt.scatter(la_df["Temp (F) avg"], la_df["Property"],s=10,marker="x") # calc the Violent trendline z1 = np.polyfit(la_df["Temp (F) avg"], la_df["Violent"], 1) p1 = np.poly1d(z1) plt.plot(la_df["Temp (F) avg"],p1(la_df["Temp (F) avg"]),"r--") # calc the Property trendline #z2 = np.polyfit(la_df["Temp (F) avg"], la_df["Property"], 1) #p2 = np.poly1d(z2) #plt.plot(la_df["Temp (F) avg"],p2(la_df["Temp (F) avg"]),"r--") plt.title("Category of Crime committed in Different Temperatures (F)") plt.xlabel("Temperature (F)") plt.ylabel("Number of Crimes Commited") plt.legend() plt.show() # - la_weather= la_df.groupby("Event").sum() count_events=la_df.groupby("Event").count() la_totals=pd.concat([la_weather, count_events["Total"]],axis =1) la_totals.columns=['Temp (F) avg', 'Consensual', 'Disturbance','Property','Violent', 'White Collar', 'Total Crime', 'Total Weather Events'] la_totals # + #LA scatter crime type by event plt.scatter(la_totals["Total Weather Events"], la_totals["Violent"],s=10,marker="x") plt.scatter(la_totals["Total Weather Events"], la_totals["Property"],s=10,marker="x") # calc the Violent trendline #z1 = np.polyfit(la_totals["Total Weather Events"], la_totals["Violent"], 1) #p1 = np.poly1d(z1) #plt.plot(la_totals["Total Weather Events"],p1(la_totals["Total Weather Events"]),"r--") # calc the Violent trendline #z2 = np.polyfit(la_totals["Total Weather Events"], la_totals["Property"], 1) #p2 = np.poly1d(z2) #plt.plot(la_totals["Total Weather Events"],p2(la_totals["Total Weather Events"]),"r--") plt.title("Number of Crimes committed in Different Weather Events") plt.xlabel("Count of Weather Events") plt.ylabel("Number of Crimes Commited") plt.legend() plt.show() # -
Some_Plots_Michael.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt from matplotlib import style style.use('ggplot') import pickle import pandas as pd import numpy as np import math #Import Modules import random from sklearn import svm df=pd.read_csv("ex8a.txt", sep=' ', names = ["Label", "x1", "x2"]) x1=[] x2=[] y1=[] #Extract Data y2=[] z1=[] z2=[] arr=[] for i in range(len(df["x1"])): arr.append(0) if int(df["Label"][i])==1: s=[] q=str(df["x2"][i]) k=str(df["x1"][i]) x1.append(float(k[2:len(k)])) x2.append(float(q[2:len(q)])) s.append(float(k[2:len(k)])) s.append(float(q[2:len(q)])) z1.append(s) z2.append(1) else: s=[] q=str(df["x2"][i]) k=str(df["x1"][i]) y1.append(float(k[2:len(k)])) y2.append(float(q[2:len(q)])) s.append(float(k[2:len(k)])) s.append(float(q[2:len(q)])) z1.append(s) z2.append(-1) # + ker=np.zeros((len(z2),len(z2))) #Creating a Gram Matrix def kernel(k1,k2): s=100*(np.linalg.norm(np.array(k1)-np.array(k2))**2) return np.exp(-s) for i in range(len(z2)): for j in range(len(z2)): ker[i][j]+=kernel(z1[i],z1[j]) with open("dic.pickle","wb") as f: pickle.dump(ker,f) # - with open("dic.pickle","rb") as f: #Loading the pickle file ker=pickle.load(f) def kernel(k1,k2): #Rbf Kernel Function s=100*(np.linalg.norm(np.array(k1)-np.array(k2))**2) return np.exp(-s) def svm_algorithm(j,y,a,b): #Running the kernel c=0 for i in range(len(y)): c+=(a[i]*y[i]*(ker[i,j])) return (c+b) def svm_algorithmplot(x_test,x,y,a,b): #Running the kernel for visualisation c=0 for i in range(len(y)): c+=(a[i]*y[i]*(kernel(x_test,x[i]))) return (c+b) def maxandmin(y1,y2,a1,a2,c): #SMO Min and Max Calculator if y1!=y2: k=[max(0,a2-a1),min(c,c+a2-a1)] else: k=[max(0,a2+a1-c),min(c,a2+a1)] return k def smo_optimization(x,y,arr,bias,c,maxpass,tol=0.001): #SMO Algorithm a=arr b=bias iter=0 while (iter<maxpass): numalphas=0 z=len(y) for i in range(z): s=svm_algorithm(i,y,a,b)-y[i] if ((y[i]*s < -tol and a[i]<c) or (y[i]*s >tol and a[i]>0)): k=random.randint(0,z-1) t=svm_algorithm(k,y,a,b)-y[k] ai_old=a[i] ak_old=a[k] d=maxandmin(y[i],y[k],a[i],a[k],c) if (d[0]==d[1]): continue neta=(2*ker[i,k])-ker[i,i]-ker[k,k] if neta>=0: continue a[k]=a[k]-((y[k]*(s-t))/neta) if (a[k]>d[1]): a[k]=d[1] elif (a[k]<d[0]): a[k]=d[0] else: a[k]=a[k] if abs(a[k]-ak_old)<0.00001: continue a[i]=a[i]-(y[i]*y[k]*(a[k]-ak_old)) b1=b-s-(y[i]*(a[i]-ai_old)*ker[i,i])-(y[k]*(a[k]-ak_old)*ker[i,k]) b2=b-t-(y[i]*(a[i]-ai_old)*ker[i,k])-(y[k]*(a[k]-ak_old)*ker[k,k]) if (a[i]>0 and a[i]<c): b=b1 elif (a[k]>0 and a[k]<c): b=b2 else: b=(b1+b2)/2.0 numalphas+=1 if numalphas==0: iter+=1 else: iter=0 return ([a,b]) sumo=smo_optimization(z1,z2,arr,0,1,20) with open("alphas.pickle","wb") as f: pickle.dump(sumo,f) with open("alphas.pickle","rb") as f: sumo=pickle.load(f) accuracy=0 ks=[] for i in range(len(z2)): ts= svm_algorithm(i,z2,sumo[0],sumo[1]) if ts>0: ks.append(1) else: ks.append(-1) for i in range(len(z2)): if (ks[i]==z2[i]): accuracy+=1 print ("The Accuracy of the Support Vector Machine is",(accuracy/len(z2)*100),'%') z1=np.array(z1) #Preparing for Visualization z2=np.array(z2) h = .02 # step size in the mesh # create a mesh to plot in x_min, x_max = z1[:, 0].min() - 1, z1[:, 0].max() + 1 y_min, y_max = z1[:, 1].min() - 1, z1[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) ss=[] st=np.c_[xx.ravel(), yy.ravel()] for i in range(len(st)): tt= svm_algorithmplot(st[i],z1,z2,sumo[0],sumo[1]) if tt>0: ss.append(1) else: ss.append(-1) with open("visuals.pickle","wb") as f: pickle.dump(ss,f) with open("visuals.pickle","rb") as f: ss=pickle.load(f) Z=np.array(ss) plt.scatter( z1[:, 0], z1[:, 1],c=np.array(z2),cmap=plt.cm.Spectral,label="points") Z=Z.reshape(xx.shape) plt.contour(xx, yy, Z, cmap=plt.cm.Wistia,label="boundary") plt.xlabel('Feature 1 (x1)') plt.ylabel('Feature 2 (x2)') plt.xlim(xx.min(),xx.max()) plt.ylim(yy.min(),yy.max()) plt.title('Support Vector Machines') plt.legend() plt.show()
SVM/SVMrbfkernel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib widget from util import get_path import pandas as pd import networkx as nx import numpy as np import matplotlib.pyplot as plt from extract_graph import generate_nx_graph, transform_list, generate_skeleton, generate_nx_graph_from_skeleton, from_connection_tab, from_nx_to_tab from node_id import whole_movement_identification, second_identification import ast from plotutil import plot_t_tp1, compress_skeleton from scipy import sparse from sparse_util import dilate, zhangSuen from realign import realign, realign_final from datetime import datetime,timedelta from node_id import orient import pickle from matplotlib.widgets import CheckButtons import scipy.io as sio import imageio from pymatreader import read_mat from matplotlib import colors from copy import deepcopy,copy from collections import Counter import cv2 import imageio import matplotlib.pyplot as plt import numpy as np from skimage.filters import frangi from skimage.morphology import thin from skimage import data, filters from random import choice import scipy.sparse import os from time import time from extract_graph import dic_to_sparse, from_sparse_to_graph, generate_nx_graph, prune_graph, from_nx_to_tab, from_nx_to_tab_matlab,sparse_to_doc, connections_pixel_list_to_tab, transform_list, clean_degree_4 from time import sleep from pycpd import RigidRegistration, DeformableRegistration import open3d as o3d from cycpd import rigid_registration import numpy as np np.random.choice([1,2,3],5) # + from util import get_path import pandas as pd import networkx as nx import numpy as np import matplotlib.pyplot as plt from extract_graph import generate_nx_graph, transform_list, generate_skeleton, generate_nx_graph_from_skeleton, from_connection_tab, from_nx_to_tab from node_id import whole_movement_identification, second_identification import ast from plotutil import plot_t_tp1, compress_skeleton from scipy import sparse from sparse_util import dilate, zhangSuen from realign import realign, realign_final from datetime import datetime,timedelta from node_id import orient import pickle from matplotlib.widgets import CheckButtons import scipy.io as sio import imageio from pymatreader import read_mat from matplotlib import colors from copy import deepcopy,copy from collections import Counter import cv2 import imageio import matplotlib.pyplot as plt import numpy as np from skimage.filters import frangi from skimage.morphology import thin from skimage import data, filters from random import choice import scipy.sparse import os from time import time from extract_graph import dic_to_sparse, from_sparse_to_graph, generate_nx_graph, prune_graph, from_nx_to_tab, from_nx_to_tab_matlab,sparse_to_doc, connections_pixel_list_to_tab, transform_list, clean_degree_4 from time import sleep from pycpd import RigidRegistration, DeformableRegistration import open3d as o3d from cycpd import rigid_registration import sys i = 16 plate = 9 directory = "/scratch/shared/mrozemul/Fiji.app/" listdir=os.listdir(directory) list_dir_interest=[name for name in listdir if name.split('_')[-1]==f'Plate{0 if plate<10 else ""}{plate}'] ss=[name.split('_')[0] for name in list_dir_interest] ff=[name.split('_')[1] for name in list_dir_interest] dates_datetime=[datetime(year=int(ss[i][:4]),month=int(ss[i][4:6]),day=int(ss[i][6:8]),hour=int(ff[i][0:2]),minute=int(ff[i][2:4])) for i in range(len(list_dir_interest))] dates_datetime.sort() dates_datetime_chosen=dates_datetime[i:i+2] dates = [f'{0 if date.month<10 else ""}{date.month}{0 if date.day<10 else ""}{date.day}_{0 if date.hour<10 else ""}{date.hour}{0 if date.minute<10 else ""}{date.minute}' for date in dates_datetime_chosen] dilateds=[] skels = [] skel_docs = [] for date in dates: directory_name=f'2020{date}_Plate{0 if plate<10 else ""}{plate}' path_snap='/scratch/shared/mrozemul/Fiji.app/'+directory_name skel_info = read_mat(path_snap+'/Analysis/skeleton.mat') skel = skel_info['skeleton'] skels.append(skel) skel_doc = sparse_to_doc(skel) skel_docs.append(skel_doc) skeleton1,skeleton2 = skel_docs[0],skel_docs[1] skelet_pos = np.array(list(skeleton1.keys())) samples = np.random.choice(skelet_pos.shape[0],len(skeleton2.keys())//100) X = np.transpose(skelet_pos[samples,:]) skelet_pos = np.array(list(skeleton2.keys())) samples = np.random.choice(skelet_pos.shape[0],len(skeleton2.keys())//100) Y = np.transpose(skelet_pos[samples,:]) reg = rigid_registration(**{'X': np.transpose(X.astype(float)), 'Y': np.transpose(Y.astype(float)),'scale': False}) out = reg.register() Rfound = reg.R[0:2,0:2] tfound= np.dot(Rfound,reg.t[0:2]) nx_graph1,pos1 = generate_nx_graph(from_sparse_to_graph(skeleton1)) nx_graph2,pos2 = generate_nx_graph(from_sparse_to_graph(skeleton2)) pruned1 = prune_graph(nx_graph1) pruned2 = prune_graph(nx_graph2) t_init=-tfound Rot_init= Rfound X = np.transpose(np.array([pos1[node] for node in pruned1 if pruned1.degree(node)==3])) Y = np.transpose(np.array([pos2[node] for node in pruned2 if pruned2.degree(node)==3])) fig=plt.figure(figsize=(10,9)) ax = fig.add_subplot(111) ax.scatter(X[0,:],X[1,:]) ax.scatter(Y[0,:],Y[1,:]) Xex = np.transpose(np.transpose(np.dot(Rot_init,X))+t_init) fig=plt.figure(figsize=(10,9)) ax = fig.add_subplot(111) ax.scatter(Xex[0,:],Xex[1,:]) ax.scatter(Y[0,:],Y[1,:]) X = np.insert(X, 2, values=0, axis=0) Y = np.insert(Y, 2, values=0, axis=0) print(X.shape,Y.shape) vectorX = o3d.utility.Vector3dVector(np.transpose(X)) vectorY = o3d.utility.Vector3dVector(np.transpose(Y)) source = o3d.geometry.PointCloud(vectorX) target = o3d.geometry.PointCloud(vectorY) threshold = 200 trans_init = np.asarray([[Rot_init[0,0], Rot_init[0,1], 0, t_init[0]], [Rot_init[1,0], Rot_init[1,1], 0, t_init[1]], [0, 0, 1, 0], [0.0, 0.0, 0.0, 1.0]]) reg_p2p = o3d.registration.registration_icp( source, target, threshold, trans_init, o3d.registration.TransformationEstimationPointToPoint()) print(reg_p2p) Rfound = reg_p2p.transformation[0:2,0:2] tfound = reg_p2p.transformation[0:2,3] print(Rfound,tfound) X,Y=X[0:2,:],Y[0:2,:] Yrep=np.transpose(np.transpose(np.dot(Rfound,X))+tfound) fig=plt.figure(figsize=(10,9)) ax = fig.add_subplot(111) ax.scatter(np.transpose(Yrep)[:,0],np.transpose(Yrep)[:,1]) ax.scatter(np.transpose(Y)[:,0],np.transpose(Y)[:,1]) sio.savemat(path_snap+'/Analysis/transform.mat',{'R' : Rfound,'t' : tfound}) # - transform = sio.loadmat(path_snap+'/Analysis/transform.mat') R,t = transform['R'],transform['t'] R,t plate = 9 directory = "/scratch/shared/mrozemul/Fiji.app/" listdir=os.listdir(directory) list_dir_interest=[name for name in listdir if name.split('_')[-1]==f'Plate{0 if plate<10 else ""}{plate}'] ss=[name.split('_')[0] for name in list_dir_interest] ff=[name.split('_')[1] for name in list_dir_interest] dates_datetime=[datetime(year=int(ss[i][:4]),month=int(ss[i][4:6]),day=int(ss[i][6:8]),hour=int(ff[i][0:2]),minute=int(ff[i][2:4])) for i in range(len(list_dir_interest))] dates_datetime.sort() dates_datetime_chosen=dates_datetime[12:14] dates = [f'{0 if date.month<10 else ""}{date.month}{0 if date.day<10 else ""}{date.day}_{0 if date.hour<10 else ""}{date.hour}{0 if date.minute<10 else ""}{date.minute}' for date in dates_datetime_chosen] dilateds=[] skels = [] skel_docs = [] graph_pos=[] contours=[] half_circles=[] for date in dates: directory_name=f'2020{date}_Plate{0 if plate<10 else ""}{plate}' path_snap='/scratch/shared/mrozemul/Fiji.app/'+directory_name skel_info = read_mat(path_snap+'/Analysis/skeleton.mat') skel = skel_info['skeleton'] contour = skel_info['contour'] half_circle = skel_info['half_circle'] skels.append(skel) skel_doc = sparse_to_doc(skel) skel_docs.append(skel_doc) contours.append(contour) half_circles.append(half_circle) skeleton1,skeleton2 = skel_docs[0],skel_docs[1] skels[0].shape factor = 10 shape=skels[0].shape final_picture = np.zeros(shape=(shape[0]//factor,shape[1]//factor)) for pixel in skeleton1.keys(): x=pixel[0]//factor y=pixel[1]//factor final_picture[x,y]+=1 factor = 10 shape=skels[0].shape final_picture = np.zeros(shape=(shape[0]//factor,shape[1]//factor)) for pixel in skeleton1.keys(): x=pixel[0]//factor y=pixel[1]//factor final_picture[x,y]+=1 skelet_posX = np.transpose(np.array(final_picture.nonzero())) final_picture = np.zeros(shape=(shape[0]//factor,shape[1]//factor)) for pixel in skeleton2.keys(): x=pixel[0]//factor y=pixel[1]//factor final_picture[x,y]+=1 skelet_posY = np.transpose(np.array(final_picture.nonzero())) samples = np.random.choice(skelet_posX.shape[0],5000) X = np.transpose(skelet_posX[samples,:]) samples = np.random.choice(skelet_posY.shape[0],5000) Y = np.transpose(skelet_posY[samples,:]) # + # skelet_pos = np.array(list(skeleton1.keys())) # samples = np.random.choice(skelet_pos.shape[0],5000) # X = np.transpose(skelet_pos[samples,:]) # skelet_pos = np.array(list(skeleton2.keys())) # samples = np.random.choice(skelet_pos.shape[0],5000) # Y = np.transpose(skelet_pos[samples,:]) # - fig=plt.figure(figsize=(10,9)) ax = fig.add_subplot(111) ax.scatter(X[0,:],X[1,:]) ax.scatter(Y[0,:],Y[1,:]) # + jupyter={"outputs_hidden": true} reg = rigid_registration(**{'X': np.transpose(X.astype(float)), 'Y': np.transpose(Y.astype(float)),'scale': False}) out = reg.register() Rfound = reg.R[0:2,0:2] tfound= np.dot(Rfound,reg.t[0:2]) # - fig=plt.figure(figsize=(10,9)) ax = fig.add_subplot(111) Yrep=np.transpose(np.transpose(np.dot(Rfound,np.transpose(np.transpose(X))))-tfound) ax.scatter(np.transpose(Y)[:,0],np.transpose(Y)[:,1]) ax.scatter(np.transpose(Yrep)[:,0],np.transpose(Yrep)[:,1]) Rfound,tfound t_init=-tfound*factor Rot_init= Rfound skelet_pos = np.array(list(skeleton1.keys())) X = np.transpose(skelet_pos) skelet_pos = np.array(list(skeleton2.keys())) Y = np.transpose(skelet_pos) fig=plt.figure(figsize=(10,9)) ax = fig.add_subplot(111) ax.scatter(X[0,:],X[1,:]) ax.scatter(Y[0,:],Y[1,:]) Xex = np.transpose(np.transpose(np.dot(Rot_init,X))+t_init) fig=plt.figure(figsize=(10,9)) ax = fig.add_subplot(111) ax.scatter(Xex[0,:],Xex[1,:]) ax.scatter(Y[0,:],Y[1,:]) X = np.insert(X, 2, values=0, axis=0) Y = np.insert(Y, 2, values=0, axis=0) print(X.shape,Y.shape) vectorX = o3d.utility.Vector3dVector(np.transpose(X)) vectorY = o3d.utility.Vector3dVector(np.transpose(Y)) source = o3d.geometry.PointCloud(vectorX) target = o3d.geometry.PointCloud(vectorY) threshold = 200 trans_init = np.asarray([[Rot_init[0,0], Rot_init[0,1], 0, t_init[0]], [Rot_init[1,0], Rot_init[1,1], 0, t_init[1]], [0, 0, 1, 0], [0.0, 0.0, 0.0, 1.0]]) reg_p2p = o3d.registration.registration_icp( source, target, threshold, trans_init, o3d.registration.TransformationEstimationPointToPoint()) print(reg_p2p) Rfound2 = reg_p2p.transformation[0:2,0:2] tfound2 = reg_p2p.transformation[0:2,3] print(Rfound2,tfound2) X,Y=X[0:2,:],Y[0:2,:] Yrep=np.transpose(np.transpose(np.dot(Rfound2,X))+tfound2) fig=plt.figure(figsize=(10,9)) ax = fig.add_subplot(111) ax.scatter(np.transpose(Yrep)[:,0],np.transpose(Yrep)[:,1]) ax.scatter(np.transpose(Y)[:,0],np.transpose(Y)[:,1]) nx_graph1,pos1 = generate_nx_graph(from_sparse_to_graph(skeleton1)) nx_graph2,pos2 = generate_nx_graph(from_sparse_to_graph(skeleton2)) pruned1 = prune_graph(nx_graph1) pruned2 = prune_graph(nx_graph2) t_init=-tfound Rot_init= Rfound X = np.transpose(np.array([pos1[node] for node in pruned1 if pruned1.degree(node)==3])) Y = np.transpose(np.array([pos2[node] for node in pruned2 if pruned2.degree(node)==3])) fig=plt.figure(figsize=(10,9)) ax = fig.add_subplot(111) ax.scatter(X[0,:],X[1,:]) ax.scatter(Y[0,:],Y[1,:]) Xex = np.transpose(np.transpose(np.dot(Rot_init,X))+t_init) fig=plt.figure(figsize=(10,9)) ax = fig.add_subplot(111) ax.scatter(Xex[0,:],Xex[1,:]) ax.scatter(Y[0,:],Y[1,:]) X = np.insert(X, 2, values=0, axis=0) Y = np.insert(Y, 2, values=0, axis=0) print(X.shape,Y.shape) vectorX = o3d.utility.Vector3dVector(np.transpose(X)) vectorY = o3d.utility.Vector3dVector(np.transpose(Y)) source = o3d.geometry.PointCloud(vectorX) target = o3d.geometry.PointCloud(vectorY) threshold = 200 trans_init = np.asarray([[Rot_init[0,0], Rot_init[0,1], 0, t_init[0]], [Rot_init[1,0], Rot_init[1,1], 0, t_init[1]], [0, 0, 1, 0], [0.0, 0.0, 0.0, 1.0]]) reg_p2p = o3d.registration.registration_icp( source, target, threshold, trans_init, o3d.registration.TransformationEstimationPointToPoint()) print(reg_p2p) Rfound = reg_p2p.transformation[0:2,0:2] tfound = reg_p2p.transformation[0:2,3] print(Rfound,tfound) X,Y=X[0:2,:],Y[0:2,:] Yrep=np.transpose(np.transpose(np.dot(Rfound,X))+tfound) fig=plt.figure(figsize=(10,9)) ax = fig.add_subplot(111) ax.scatter(np.transpose(Yrep)[:,0],np.transpose(Yrep)[:,1]) ax.scatter(np.transpose(Y)[:,0],np.transpose(Y)[:,1])
amftrack/notebooks/development/First_realignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="ojm_6E9f9Kcf" # # Wen CNN # # Simulate the CNN approach of Wen et al. 2019. # # Notebook 101 ran to completion on PC. Now run more epochs on CoLab. # + colab={"base_uri": "https://localhost:8080/"} id="RmPF4h_YI_sT" outputId="562ab8f8-8daa-47c6-85bd-604150a0f15c" import time def show_time(): t = time.time() print(time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))) show_time() # + id="VQY7aTj29Kch" import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.utils import shuffle from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score from keras.models import Sequential from keras.layers import Conv2D,MaxPooling2D from keras.layers import Dense,Embedding,Dropout from keras.layers import Flatten,TimeDistributed from keras.losses import BinaryCrossentropy from keras.callbacks import ModelCheckpoint from keras.models import load_model # + colab={"base_uri": "https://localhost:8080/"} id="xUxEB53HI_sk" outputId="6f456605-7b53-459a-e3dd-0531531cece6" import sys IN_COLAB = False try: from google.colab import drive IN_COLAB = True except: pass if IN_COLAB: print("On Google CoLab, mount cloud-local file, get our code from GitHub.") PATH='/content/drive/' #drive.mount(PATH,force_remount=True) # hardly ever need this drive.mount(PATH) # Google will require login credentials DATAPATH=PATH+'My Drive/data/' # must end in "/" import requests r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py') with open('RNA_describe.py', 'w') as f: f.write(r.text) from RNA_describe import ORF_counter r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/GenCodeTools.py') with open('GenCodeTools.py', 'w') as f: f.write(r.text) from GenCodeTools import GenCodeLoader r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/KmerTools.py') with open('KmerTools.py', 'w') as f: f.write(r.text) from KmerTools import KmerTools else: print("CoLab not working. On my PC, use relative paths.") DATAPATH='data/' # must end in "/" sys.path.append("..") # append parent dir in order to use sibling dirs from SimTools.RNA_describe import ORF_counter from SimTools.GenCodeTools import GenCodeLoader from SimTools.KmerTools import KmerTools BESTMODELPATH=DATAPATH+"BestModel-Wen" # saved on cloud instance and lost after logout LASTMODELPATH=DATAPATH+"LastModel-Wen" # saved on Google Drive but requires login # + [markdown] id="8buAhZRfI_sp" # ## Data Load # + colab={"base_uri": "https://localhost:8080/"} id="h94xptH1tI82" outputId="11c3e0ae-c021-4505-bbc1-690013b6b90c" PC_TRAINS=8000 NC_TRAINS=8000 PC_TESTS=2000 NC_TESTS=2000 PC_LENS=(200,4000) NC_LENS=(200,4000) # Wen used 3500 for hyperparameter, 3000 for train PC_FILENAME='gencode.v38.pc_transcripts.fa.gz' NC_FILENAME='gencode.v38.lncRNA_transcripts.fa.gz' PC_FULLPATH=DATAPATH+PC_FILENAME NC_FULLPATH=DATAPATH+NC_FILENAME MAX_K = 3 # With K={1,2,3}, num K-mers is 4^3 + 4^2 + 4^1 = 84. # Wen specified 17x20 which is impossible. # The factors of 84 are 1, 2, 3, 4, 6, 7, 12, 14, 21, 28, 42 and 84. FRQ_CNT=84 ROWS=7 COLS=FRQ_CNT//ROWS SHAPE2D = (ROWS,COLS,1) EPOCHS=100 # 1000 # 200 SPLITS=5 FOLDS=5 # make this 5 for serious testing show_time() # + colab={"base_uri": "https://localhost:8080/"} id="VNnPagXjtI85" outputId="f9b743d0-ebd9-41a1-8bf5-18dc19ccf899" loader=GenCodeLoader() loader.set_label(1) loader.set_check_utr(True) pcdf=loader.load_file(PC_FULLPATH) print("PC seqs loaded:",len(pcdf)) loader.set_label(0) loader.set_check_utr(False) ncdf=loader.load_file(NC_FULLPATH) print("NC seqs loaded:",len(ncdf)) show_time() # + colab={"base_uri": "https://localhost:8080/"} id="ShtPw_fGtI9E" outputId="32e67c42-771e-4eeb-eaef-3a658bd5c721" def dataframe_length_filter(df,low_high): (low,high)=low_high # The pandas query language is strange, # but this is MUCH faster than loop & drop. return df[ (df['seqlen']>=low) & (df['seqlen']<=high) ] def dataframe_extract_sequence(df): return df['sequence'].tolist() pc_all = dataframe_extract_sequence( dataframe_length_filter(pcdf,PC_LENS)) nc_all = dataframe_extract_sequence( dataframe_length_filter(ncdf,NC_LENS)) show_time() print("PC seqs pass filter:",len(pc_all)) print("NC seqs pass filter:",len(nc_all)) # Garbage collection to reduce RAM footprint pcdf=None ncdf=None # + [markdown] id="CCNh_FZaI_sv" # ## Data Prep # + colab={"base_uri": "https://localhost:8080/"} id="V91rP2osI_s1" outputId="6735db3b-800e-4705-a0c5-4a93e2fec103" pc_train=pc_all[:PC_TRAINS] nc_train=nc_all[:NC_TRAINS] print("PC train, NC train:",len(pc_train),len(nc_train)) pc_test=pc_all[PC_TRAINS:PC_TRAINS+PC_TESTS] nc_test=nc_all[NC_TRAINS:NC_TRAINS+PC_TESTS] print("PC test, NC test:",len(pc_test),len(nc_test)) # Garbage collection pc_all=None nc_all=None # + colab={"base_uri": "https://localhost:8080/"} id="FfyPeInGI_s4" outputId="a082ff0a-524a-47bc-998b-b191ffe4dbc4" def prepare_x_and_y(seqs1,seqs0): len1=len(seqs1) len0=len(seqs0) total=len1+len0 L1=np.ones(len1,dtype=np.int8) L0=np.zeros(len0,dtype=np.int8) S1 = np.asarray(seqs1) S0 = np.asarray(seqs0) all_labels = np.concatenate((L1,L0)) all_seqs = np.concatenate((S1,S0)) # interleave (uses less RAM than shuffle) for i in range(0,len0): all_labels[i*2] = L0[i] all_seqs[i*2] = S0[i] all_labels[i*2+1] = L1[i] all_seqs[i*2+1] = S1[i] return all_seqs,all_labels # use this to test unshuffled X,y = shuffle(all_seqs,all_labels) # sklearn.utils.shuffle return X,y Xseq,y=prepare_x_and_y(pc_train,nc_train) #print(Xseq[:3]) #print(y[:3]) show_time() # + colab={"base_uri": "https://localhost:8080/"} id="LWLixZOfI_s7" outputId="01414908-7243-4c17-8f5f-e1eac184d61b" def seqs_to_kmer_freqs(seqs,max_K): tool = KmerTools() # from SimTools collection = [] for seq in seqs: counts = tool.make_dict_upto_K(max_K) # Last param should be True when using Harvester. counts = tool.update_count_one_K(counts,max_K,seq,True) # Given counts for K=3, Harvester fills in counts for K=1,2. counts = tool.harvest_counts_from_K(counts,max_K) fdict = tool.count_to_frequency(counts,max_K) freqs = list(fdict.values()) collection.append(freqs) return np.asarray(collection) Xfrq=seqs_to_kmer_freqs(Xseq,MAX_K) # Garbage collection Xseq = None show_time() # + colab={"base_uri": "https://localhost:8080/"} id="WBBY4rH3cwT2" outputId="d81b6c3d-37c1-400a-9710-95d998f0fb45" def reshape(frequency_matrix): seq_cnt,frq_cnt=Xfrq.shape # CNN inputs require a last dimension = numbers per pixel. # For RGB images it is 3. # For our frequency matrix it is 1. new_matrix = frequency_matrix.reshape(seq_cnt,ROWS,COLS,1) return new_matrix print("Xfrq") print("Xfrq type",type(Xfrq)) print("Xfrq shape",Xfrq.shape) Xfrq2D = reshape(Xfrq) print("Xfrq2D shape",Xfrq2D.shape) # + [markdown] id="dJ4XhrzGI_s-" # ## Build and train a neural network # + colab={"base_uri": "https://localhost:8080/"} id="o5NPW7zKI_tC" outputId="7ca976c4-8826-4d4e-d354-6da4519c43fc" def make_DNN(shape): dt=np.float32 print("make_DNN") print("input shape:",shape) WIDTH=(3,3) STRIDE=(1,1) dnn = Sequential() dnn.add(Conv2D(filters=32,kernel_size=WIDTH,strides=STRIDE,activation="relu",padding="same", input_shape=shape)) dnn.add(Conv2D(filters=64,kernel_size=WIDTH,strides=STRIDE,activation="relu",padding="same")) dnn.add(MaxPooling2D()) dnn.add(Flatten()) dnn.add(Dropout(0.25)) dnn.add(Dense(128,activation="sigmoid",dtype=dt)) dnn.add(Dropout(0.50)) dnn.add(Dense(1,activation="sigmoid",dtype=dt)) dnn.compile(optimizer='adam', # adadelta doesn't work as well loss=BinaryCrossentropy(from_logits=False), metrics=['accuracy']) # add to default metrics=loss dnn.build(input_shape=shape) return dnn model = make_DNN(SHAPE2D) print(model.summary()) # + id="7xBalIrXrEAS" def do_cross_validation(X,y,shape): cv_scores = [] fold=0 mycallbacks = [ModelCheckpoint( filepath=BESTMODELPATH, save_best_only=True, monitor='val_accuracy', mode='max')] # When shuffle=True, the valid indices are a random subset. splitter = KFold(n_splits=SPLITS,shuffle=True) model = None for train_index,valid_index in splitter.split(X): if fold < FOLDS: fold += 1 X_train=X[train_index] # inputs for training y_train=y[train_index] # labels for training X_valid=X[valid_index] # inputs for validation y_valid=y[valid_index] # labels for validation print("MODEL") # Call constructor on each CV. Else, continually improves the same model. model = model = make_DNN(shape) print("FIT") # model.fit() implements learning start_time=time.time() history=model.fit(X_train, y_train, epochs=EPOCHS, verbose=1, # ascii art while learning callbacks=mycallbacks, # called at end of each epoch validation_data=(X_valid,y_valid)) end_time=time.time() elapsed_time=(end_time-start_time) print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time)) # print(history.history.keys()) # all these keys will be shown in figure pd.DataFrame(history.history).plot(figsize=(8,5)) plt.grid(True) plt.gca().set_ylim(0,1) # any losses > 1 will be off the scale plt.show() return model # parameters at end of training # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="BM6UjBzrrEAV" outputId="a3c101bd-eaaf-439c-d747-9d00c5adf7d6" show_time() last_model = do_cross_validation(Xfrq2D,y,SHAPE2D) last_model.save(LASTMODELPATH) # + [markdown] id="OsytC9VUrEAX" # ## Test the neural network # + id="8hIqe1r1rEAa" def show_test_AUC(model,X,y): ns_probs = [0 for _ in range(len(y))] bm_probs = model.predict(X) ns_auc = roc_auc_score(y, ns_probs) bm_auc = roc_auc_score(y, bm_probs) ns_fpr, ns_tpr, _ = roc_curve(y, ns_probs) bm_fpr, bm_tpr, _ = roc_curve(y, bm_probs) plt.plot(ns_fpr, ns_tpr, linestyle='--', label='Guess, auc=%.4f'%ns_auc) plt.plot(bm_fpr, bm_tpr, marker='.', label='Model, auc=%.4f'%bm_auc) plt.title('ROC') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.legend() plt.show() print("%s: %.2f%%" %('AUC',bm_auc*100.0)) def show_test_accuracy(model,X,y): scores = model.evaluate(X, y, verbose=0) print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100)) # + colab={"base_uri": "https://localhost:8080/", "height": 469} id="tGf2PcxRC8jT" outputId="6ac6008c-be16-41d1-934d-cee0fef63334" print("Accuracy on test data.") print("Prepare...") show_time() Xseq,y=prepare_x_and_y(pc_test,nc_test) print("Extract K-mer features...") show_time() Xfrq=seqs_to_kmer_freqs(Xseq,MAX_K) Xfrq2D = reshape(Xfrq) print("Plot...") show_time() show_test_AUC(last_model,Xfrq2D,y) show_test_accuracy(last_model,Xfrq2D,y) show_time() # + id="-cSLTNfzrEAo"
Notebooks/Wen_CNN_106_colab.ipynb
/ -*- coding: utf-8 -*- / --- / jupyter: / jupytext: / text_representation: / extension: .q / format_name: light / format_version: '1.5' / jupytext_version: 1.14.4 / --- / + [markdown] cell_id="00001-a4b46aac-1fe9-49fe-99ff-c449f89c444e" deepnote_cell_type="markdown" tags=[] / **A) k-nak és d-nek van közös lánya:** / $\exists x(N(x)\land P(k,x)\land P(d,x))$ / / > Jó megoldás! / + [markdown] cell_id="00002-1c5aeb1a-6f45-4a23-9f9e-49c6a83bb3cf" deepnote_cell_type="markdown" tags=[] / **B) Csak a férfiak között vannak gyermektelenek:** / $\exists x(\nexists y(P(x,y))\Leftrightarrow F(x))$ / / > Így azt mondod, hogy létezik olyan személy, aki akkor és csak is akkor férfi, ha nincs gyereke. / + [markdown] cell_id="00003-4380f478-9ae3-4dda-b54d-52d5e29b4f8a" deepnote_cell_type="markdown" tags=[] / **C) Vannak egynemű testvérek:** / $\exists x(P(y,x) \land \exists z(P(y,z)) \land (F(x)\Leftrightarrow F(z)) \oplus (N(x) \Leftrightarrow N(z)))$ / / > * Használod az $y$ személyt, de ebben a kontextusban helytelenül. (Mivel nincs előre megadva) / > * Ha azt írod hogy $F(x) \Leftrightarrow F(z)$, akkor felesleges kikötni a másik oldalt is. Hisz a ha az egyikük nem férfi (tehát nő), akkor a másiknak is annak kell lennie. / > * Ha úgy definiálsz valakit hogy $\exists x(\dots)$, akkor az $x$ csak a zárójelen belül értelmezhető. / + [markdown] cell_id="00004-7d58979a-aaa9-4f07-9b3f-af8ba3b78046" deepnote_cell_type="markdown" tags=[] / **D) Minden nőnek van gyereke:<br>$\forall x(\exists y(P(x,y))\Leftrightarrow N(x))$** / / > * Így azt mondod, hogy ha valakinek van gyereke az csak is akkor lehet, ha az illető nő. *(márpedig reméljük hogy a fogantatás után nem kaszaboltak le minden apukát)* / > * Az eredeti állítás semmit nem mond a férfiakról. / > * Implikációt érdemes használni, hisz azzal tehetsz megállapítást csak az egyik nemre is. / + [markdown] cell_id="00005-1a11f64c-c5a7-4c69-a6a2-5f44adfd80e8" deepnote_cell_type="markdown" tags=[] / **E) k-nak van lány és fiú unokája is:** / $\exists x(P(y,x)\Leftrightarrow P(k,y)\land N(x))\land \exists a (P(z,a)\Leftrightarrow P(k,z)\land (F(a)))$ / / > Használod az $y$ személyt, de ebben a kontextusban helytelenül. (Mivel nincs előre megadva) / + [markdown] created_in_deepnote_cell=true deepnote_cell_type="markdown" tags=[] / <a style='text-decoration:none;line-height:16px;display:flex;color:#5B5B62;padding:10px;justify-content:end;' href='https://deepnote.com?utm_source=created-in-deepnote-cell&projectId=978e47b7-a961-4dca-a945-499e8b781a34' target="_blank"> / <img alt='Created in deepnote.com' style='display:inline;max-height:16px;margin:0px;margin-right:7.5px;' src='data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iODBweCIgaGVpZ2h0PSI4MHB4IiB2aWV3Qm94PSIwIDAgODAgODAiIHZlcnNpb249IjEuMSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayI+CiAgICA8IS0tIEdlbmVyYXRvcjogU2tldGNoIDU0LjEgKDc2NDkwKSAtIGh0dHBzOi8vc2tldGNoYXBwLmNvbSAtLT4KICAgIDx0aXRsZT5Hcm91cCAzPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IkxhbmRpbmciIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxnIGlkPSJBcnRib2FyZCIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoLTEyMzUuMDAwMDAwLCAtNzkuMDAwMDAwKSI+CiAgICAgICAgICAgIDxnIGlkPSJHcm91cC0zIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgxMjM1LjAwMDAwMCwgNzkuMDAwMDAwKSI+CiAgICAgICAgICAgICAgICA8cG9seWdvbiBpZD0iUGF0aC0yMCIgZmlsbD0iIzAyNjVCNCIgcG9pbnRzPSIyLjM3NjIzNzYyIDgwIDM4LjA0NzY2NjcgODAgNTcuODIxNzgyMiA3My44MDU3NTkyIDU3LjgyMTc4MjIgMzIuNzU5MjczOSAzOS4xNDAyMjc4IDMxLjY4MzE2ODMiPjwvcG9seWdvbj4KICAgICAgICAgICAgICAgIDxwYXRoIGQ9Ik0zNS4wMDc3MTgsODAgQzQyLjkwNjIwMDcsNzYuNDU0OTM1OCA0Ny41NjQ5MTY3LDcxLjU0MjI2NzEgNDguOTgzODY2LDY1LjI2MTk5MzkgQzUxLjExMjI4OTksNTUuODQxNTg0MiA0MS42NzcxNzk1LDQ5LjIxMjIyODQgMjUuNjIzOTg0Niw0OS4yMTIyMjg0IEMyNS40ODQ5Mjg5LDQ5LjEyNjg0NDggMjkuODI2MTI5Niw0My4yODM4MjQ4IDM4LjY0NzU4NjksMzEuNjgzMTY4MyBMNzIuODcxMjg3MSwzMi41NTQ0MjUgTDY1LjI4MDk3Myw2Ny42NzYzNDIxIEw1MS4xMTIyODk5LDc3LjM3NjE0NCBMMzUuMDA3NzE4LDgwIFoiIGlkPSJQYXRoLTIyIiBmaWxsPSIjMDAyODY4Ij48L3BhdGg+CiAgICAgICAgICAgICAgICA8cGF0aCBkPSJNMCwzNy43MzA0NDA1IEwyNy4xMTQ1MzcsMC4yNTcxMTE0MzYgQzYyLjM3MTUxMjMsLTEuOTkwNzE3MDEgODAsMTAuNTAwMzkyNyA4MCwzNy43MzA0NDA1IEM4MCw2NC45NjA0ODgyIDY0Ljc3NjUwMzgsNzkuMDUwMzQxNCAzNC4zMjk1MTEzLDgwIEM0Ny4wNTUzNDg5LDc3LjU2NzA4MDggNTMuNDE4MjY3Nyw3MC4zMTM2MTAzIDUzLjQxODI2NzcsNTguMjM5NTg4NSBDNTMuNDE4MjY3Nyw0MC4xMjg1NTU3IDM2LjMwMzk1NDQsMzcuNzMwNDQwNSAyNS4yMjc0MTcsMzcuNzMwNDQwNSBDMTcuODQzMDU4NiwzNy43MzA0NDA1IDkuNDMzOTE5NjYsMzcuNzMwNDQwNSAwLDM3LjczMDQ0MDUgWiIgaWQ9IlBhdGgtMTkiIGZpbGw9IiMzNzkzRUYiPjwvcGF0aD4KICAgICAgICAgICAgPC9nPgogICAgICAgIDwvZz4KICAgIDwvZz4KPC9zdmc+' > </img> / Created in <span style='font-weight:600;margin-left:4px;'>Deepnote</span></a>
members/dani/sebi_jav.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="d8hVsUB-BKGN" # # Concatenar y apendizar data sets # + [markdown] id="5zHnpji-BKGR" # ## El ejemplo del vino blanco y el vino tinto # + [markdown] id="oOqIG4SJBKGS" # Data Set Information: # # These data are the results of a chemical analysis of wines grown in the same region in Italy but derived from three different cultivars. The analysis determined the quantities of 13 constituents found in each of the three types of wines. # # I think that the initial data set had around 30 variables, but for some reason I only have the 13 dimensional version. I had a list of what the 30 or so variables were, but a.) I lost it, and b.), I would not know which 13 variables are included in the set. # # The attributes are (dontated by <NAME>, riclea '@' anchem.unige.it ) # # 1. Alcohol # 2. Malic acid # 3. Ash # 4. Alcalinity of ash # 5. Magnesium # 6. Total phenols # 7. Flavanoids # 8. Nonflavanoid phenols # 9. Proanthocyanins # 10. Color intensity # 11. Hue # 12. OD280/OD315 of diluted wines # 13. Proline # # In a classification context, this is a well posed problem with "well behaved" class structures. A good data set for first testing of a new classifier, but not very challenging. # # # Attribute Information: # # All attributes are continuous # # No statistics available, but suggest to standardise variables for certain uses (e.g. for us with classifiers which are NOT scale invariant) # # NOTE: 1st attribute is class identifier (1-3) # + id="UXlFXcqiBKGS" import pandas as pd # + id="-i4hdOKOBKGT" outputId="cc91d97d-73db-4ada-bace-7af5fe758a7b" red_wine = pd.read_csv("../datasets/wine/winequality-red.csv", sep=";") red_wine.head() # + id="Nk1xsua_BKGU" outputId="a5a7dfc1-3dba-4628-e785-c0f6871d617c" red_wine.columns.values # + id="UUjc9zu3BKGU" outputId="765b5246-95e8-4ffb-b6ad-ed8d7d3356c6" red_wine.shape # + id="cu94wWmzBKGV" outputId="856fbd07-0ee3-4193-c666-ed1913247765" white_wine = pd.read_csv("../datasets/wine/winequality-white.csv", sep = ";") white_wine.head() # + id="7xtjp9rxBKGV" outputId="575d39d0-477f-44d0-a15a-720dfcb63e3f" white_wine.columns.values # + id="7sM76lwhBKGV" outputId="2aa967e1-16b3-47f9-a678-8184122384c8" white_wine.shape # + [markdown] id="bF8WBU0OBKGV" # En python, tenemos dos tipos de ejes, # * axis = 0 denota el eje horizontal # * axis = 1 denota el eje vertical # + id="mGy_f0m6BKGW" #Aqui concatenamos los datasets #0 para junatrlos horizontalmente wine_data = pd.concat([red_wine, white_wine], axis = 0) # + id="rWdvSaIGBKGW" outputId="22b91d2f-6aab-411e-d21d-6b9701ad9cb7" wine_data.shape # + id="Y9DnVpTZBKGW" outputId="aa9f21be-0946-481e-c1f3-31f636f6bea8" wine_data.head() # + id="c1ffyXTYBKGW" data1 = wine_data.head(10) data2 = wine_data[300:310] data3 = wine_data.tail(10) # + id="FtjFbN0QBKGX" wine_scramble = pd.concat([data1, data2, data3], axis = 0) # + id="ywPh2yfPBKGX" outputId="01729b80-2876-41ab-9b84-9e2f99436576" wine_scramble # + id="xQe55n07BKGX" outputId="2aa29513-30a9-494a-e79a-837cee733bf3" wine_scramble = pd.concat([data2, data1, data3], axis = 0) wine_scramble # + [markdown] id="14QUaPbjBKGX" # ## Datos distribuidos # + id="p-fx1NgYBKGY" outputId="ce97838c-8ae1-4166-fc01-d36a1a5e6bb8" import pandas as pd data = pd.read_csv("../datasets/distributed-data/001.csv") data.head() # + id="9S-GJqYTBKGY" outputId="79d18e06-3ae6-4873-8fc8-39c219dd71ab" data.shape # + [markdown] id="grOqeGo_BKGY" # * Importar el primer fichero # * Hacemos un bucle para ir recorriendo todos y cada uno de los ficheros. # * Importante tener una consistencia en el nombre de los ficheros # * Importamos los ficheros uno a uno # * Cada uno de ellos debe apendizarse (añadirse al final) del primer fichero que ya habíamos cargado # * Repetimos el bucle hasta que no queden ficheros # + id="jMtx-9LxBKGY" filepath = "../datasets/distributed-data/" data = pd.read_csv("../datasets/distributed-data/001.csv") final_length = len(data) for i in range(2,333): if i < 10: filename = "00" + str(i) if 10 <= i < 100: filename = "0" + str(i) if i >= 100: filename = str(i) file = filepath + filename + ".csv" temp_data = pd.read_csv(file) final_length += len(temp_data) data = pd.concat([data, temp_data], axis = 0) # + id="orhU4-D5BKGZ" outputId="d81eeefa-5e29-4ad0-e137-60052e5a2912" data.shape # + id="e7YDINpZBKGZ" outputId="b1a6c577-b2ac-48ed-b61a-5de6c1d3ce73" data.tail() # + id="jQ0JBTWOBKGZ" outputId="1ce5a6ab-3f01-4b30-d4f8-121264f19c21" data.head() # + id="NYc6GCJQBKGZ" outputId="d74f112d-e02f-4c76-e7b5-d77f5f3ae62d" final_length == data.shape[0] # + [markdown] id="aJH5tzNvBKGZ" # # Joins de datasets # + id="0rs9eu8XBKGa" filepath = "../datasets/athletes/" # + id="hBck11fiBKGa" data_main = pd.read_csv(filepath + "Medals.csv", encoding= "ISO-8859-1") # + id="iWeLUyUxBKGa" outputId="f7b11880-42ee-41e2-cd6b-a788503237a1" data_main.head() # + id="E8GZTjH9BKGa" outputId="0a08204a-b787-47ac-d357-3825a0031e8b" #Filtramos por los atelatas únicos del dataset en formato lista a = data_main["Athlete"].unique().tolist() len(a) # + id="CLxZNS-DBKGa" outputId="519af661-812e-46ed-922f-b1af16dc5e0b" data_main.shape # + id="Y5cE7O39BKGb" data_country = pd.read_csv(filepath + "Athelete_Country_Map.csv", encoding = "ISO-8859-1") # + id="aBlEZz-KBKGb" outputId="086785ea-a855-49e5-baa3-012ea2f16215" data_country.head() # + id="2KR_oxtdBKGb" outputId="4c6f4e8d-33c0-423d-a79e-b6972d898c72" len(data_country) # + id="K1s4IBhkBKGb" outputId="41dc5597-d521-4896-88f8-bdbb0b3f511d" data_country[data_country["Athlete"] == "Aleksand<NAME>"] # + id="z3djgR0qBKGb" data_sports = pd.read_csv(filepath + "Athelete_Sports_Map.csv", encoding="ISO-8859-1") # + id="aNofkzIABKGc" outputId="3ed73bc8-f973-4cc3-fd59-0f095239a68c" data_sports.head() # + id="l0JHoFCYBKGc" outputId="48990f5d-0014-4e7e-cbbd-88c0fd6c8ac3" len(data_sports) # + id="Sy54JzTOBKGc" outputId="5abe828d-ce2a-4976-910e-ce8f1a03b984" data_sports[(data_sports["Athlete"]=="Chen Jing") | (data_sports["Athlete"]=="<NAME>") | (data_sports["Athlete"]=="<NAME>") ] # + id="YCsYQLfhBKGc" #Filtramos el data set para eliminar los duplicados #Esto se hace con "drop_duplicates" data_country_dp = data_country.drop_duplicates(subset="Athlete") # + id="MFso7fLgBKGc" outputId="0c5521f0-fed4-4da2-eaf5-bfff00b6b0a6" len(data_country_dp)==len(a) # + id="qO6485uuBKGc" #Hay que especificar el data set de la izquierda, derecha #Así como las claves de izquierda y derecha #si no se especifica, tenemos un inner join data_main_country = pd.merge(left = data_main, right = data_country_dp, left_on="Athlete", right_on = "Athlete") # + id="RajM4BXjBKGd" outputId="13e46a6a-7d96-435a-cf10-87e0bf07cef5" data_main_country.head() # + id="VgscxkLZBKGd" outputId="4497c75c-6aaa-439a-da61-e580505c6aeb" data_main_country.shape # + id="W-YaivY1BKGd" outputId="107c7bba-911d-412e-b047-4c639803af31" data_main_country[data_main_country["Athlete"] == "Aleksandar Ciric"] # + id="wuKdsIqnBKGd" data_sports_dp = data_sports.drop_duplicates(subset="Athlete") # + id="e1AL5_8ZBKGd" outputId="7b07d540-ef81-492d-e4dc-5d2340a4d599" len(data_sports_dp)==len(a) # + id="JDWuEk9lBKGe" data_final = pd.merge(left=data_main_country, right=data_sports_dp, left_on="Athlete", right_on="Athlete") # + id="9rfxsWy7BKGe" outputId="2d9932bc-3819-4b84-8a3d-849c4e9a1055" data_final.head() # + id="1VMGKJPFBKGe" outputId="9b23fd58-337f-40c6-a2d6-3ce980dc443b" data_final.shape # + [markdown] id="8KYsVZyTBKGe" # ## Tipos de Joins # + id="G9b6Q2ElBKGe" from IPython.display import Image import numpy as np # + [markdown] id="Clhep85OBKGf" # **Inner Join <= A (Left Join), B (Right Join) <= Outer Join** # + id="5K0bVKaRBKGf" out_athletes = np.random.choice(data_main["Athlete"], size = 6, replace = False) # + id="ZsGV7PRHBKGf" outputId="23afb324-0d7a-4df7-ae35-c9350eadcc4e" out_athletes # + id="bY3DTBENBKGh" data_country_dlt = data_country_dp[(~data_country_dp["Athlete"].isin(out_athletes)) & (data_country_dp["Athlete"] != "<NAME>")] data_sports_dlt = data_sports_dp[(~data_sports_dp["Athlete"].isin(out_athletes)) & (data_sports_dp["Athlete"] != "<NAME>")] data_main_dlt = data_main[(~data_main["Athlete"].isin(out_athletes)) & (data_main["Athlete"] != "<NAME>")] # + id="3z_D2HlHBKGh" outputId="841628e9-89cd-4716-fc66-05e896c5aa50" len(data_country_dlt) # + id="hbNtQ3lCBKGh" outputId="577d1e7f-c3ce-4136-c04a-e5be84a1ae5c" len(data_sports_dlt) # + id="_PTMWaOuBKGh" outputId="6c35d84b-291e-4660-d151-1ca99aad7101" len(data_main_dlt) # + [markdown] id="fVA2TLKyBKGi" # ## Inner Join # * Devuelve un data frame con las filas que tienen valor tanto en el primero como en el segundo data frame que estamos uniendo # * El número de filas será igual al número de filas **comunes** que tengas ambos data sets # * Data Set A tiene 60 filas # * Data Set B tiene 50 filas # * Ambos comparten 30 filas # * Entonces A Inner Join B tendrá 30 filas # * En términos de teoría de conjuntos, se trata de la intersección de los dos conjuntos # + id="3_99SKQmBKGi" outputId="16ba2eb1-8c42-4139-b6e1-aae6861a874e" Image(filename="resources/inner-join.png") # + id="nP_2fcR-BKGi" # data_main contiene toda la info # data_country_dlt le falta la info de 7 atletas merged_inner = pd.merge(left = data_main, right = data_country_dlt, how = "inner", left_on = "Athlete", right_on = "Athlete") # + id="mF65AUuVBKGj" outputId="44059d57-bbf1-4289-c087-9c827ddcea88" len(merged_inner) # + id="aBEd_bamBKGj" outputId="da88e060-d987-4c84-8b6d-42d1d8daf515" merged_inner.head() # + [markdown] id="04atQHndBKGj" # ## Left Join # * Devuelve un data frame con las filas que tuvieran valor en el dataset de la izquierda, sin importar si tienen correspondencia en el de la derecha o no. # * Las filas del data frame final que no correspondan a ninguna fila del data frame derecho, tendrán NAs en las columnas del data frame derecho. # * El número de filas será igual al número de filas del data frame izquierdo # * Data Set A tiene 60 filas # * Data Set B tiene 50 filas # * Entonces A Left Join B tendrá 60 filas # * En términos de teoría de conjuntos, se trata del propio data set de la izquierda quien, además tiene la intersección en su interior. # + id="n1INbHxaBKGj" outputId="94b5ab0f-5a14-40b4-f4c3-4deb46f5372e" Image(filename="resources/left-join.png") # + id="UcxHM37dBKGj" outputId="3ada14c5-fcd1-4090-f86d-cbfb148711c5" merged_left = pd.merge(left = data_main, right = data_country_dlt, how = "left", left_on = "Athlete", right_on = "Athlete") len(merged_left) # + id="s_XjP-alBKGk" outputId="1f9e22b7-7813-4bda-c5f0-a02af90b9ed1" merged_left.head() # + [markdown] id="R9Yu70FpBKGk" # ## Right Join # * Devuelve un data frame con las filas que tuvieran valor en el dataset de la derecha, sin importar si tienen correspondencia en el de la izquierda o no. # * Las filas del data frame final que no correspondan a ninguna fila del data frame izquierdo, tendrán NAs en las columnas del data frame izquierdo. # * El número de filas será igual al número de filas del data frame derecho # * Data Set A tiene 60 filas # * Data Set B tiene 50 filas # * Entonces A Right Join B tendrá 50 filas # * En términos de teoría de conjuntos, se trata del propio data set de la derecha quien, además tiene la intersección en su interior. # + id="Fk5hsW6yBKGk" outputId="d5cee972-ea26-4959-df07-1a3e6569e63c" Image(filename="resources/right-join.png") # + id="7dV46gjkBKGk" outputId="bf45958a-06c9-4f13-b363-b5080e943b1f" merged_right = pd.merge(left = data_main_dlt, right = data_country_dp, how = "right", left_on = "Athlete", right_on = "Athlete") len(merged_right) # + id="Q6adbWnjBKGk" outputId="af448757-ec2a-4909-e33a-1a5e45975afa" merged_right.tail(10) # + [markdown] id="-dVAOvadBKGl" # ## Outer Join # * Devuelve un data frame con todas las filas de ambos, reemplazando las ausencias de uno o de otro con NAs en la región específica.. # * Las filas del data frame final que no correspondan a ninguna fila del data frame derecho (o izquierdo), tendrán NAs en las columnas del data frame derecho (o izquierdo). # * El número de filas será igual al máximo número de filas de ambos data frames # * Data Set A tiene 60 filas # * Data Set B tiene 50 filas # * Ambos comparten 30 filas # * Entonces A Outer Join B tendrá 60 + 50 - 30 = 80 filas # * En términos de teoría de conjuntos, se trata de la unión de conjuntos. # + id="ApT0LQl0BKGl" outputId="84a9781b-dc3c-4365-a615-c702813165be" Image(filename="resources/outer-join.png") # + id="fT-S4MaTBKGl" data_country_jb = data_country_dlt.append( { "Athlete": "<NAME>", "Country": "España" },ignore_index = True ) # + id="aTiVNnUKBKGl" outputId="350f1f6a-c467-456a-8cd5-2d5efffb94e9" merged_outer = pd.merge(left = data_main, right=data_country_jb, how = "outer", left_on = "Athlete", right_on="Athlete") len(merged_outer) # + id="9P1xWFqEBKGm" outputId="04416638-2377-40b3-fbae-6ee65ead4baa" merged_outer.head() # + id="639-XVWQBKGm" outputId="cbb87f90-fd55-46a7-f66f-91a2f369a6b7" merged_outer.tail() # + id="Aau6pcdzBKGm" outputId="33a1ace2-486c-4023-ed8a-abc2095469d5" len(data_main) # + id="wG0cFSBNBKGm" outputId="6b829401-475e-4688-c696-52bedef28adb" len(data_main_dlt) # + id="em_QiWleBKGn" outputId="48aaa01f-a0fe-4a0e-c544-cc8e58c08d7e" len(data_country_dp) # + id="3I9E3FYEBKGn" outputId="90e53991-be62-49fd-deeb-7ddf46d7c544" len(data_country_dlt) # + id="uO0mBWdCBKGn" outputId="1bc685ac-467d-4b23-e7df-20b63332402c" len(merged_inner) # + id="SU9aKylTBKGn" outputId="eeb367bd-44f1-4656-a379-c4b2d59b22fe" len(merged_left) # + id="xGP6V26YBKGn" outputId="d8aec7ae-4f2f-4078-c07e-e3e6a064e703" len(merged_right) # + id="8eGipESdBKGn" outputId="874cbf66-796a-483c-a21f-12b8670ef91b" len(merged_outer) # + id="9xOYzY6XBKGo"
notebooks/T2 - 4 - Data Cleaning - Concatenación de datos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Solving equations # You will learn about working with matrices and linear algebra (**scipy.linalg**), including solving systems of linear equations. You will learn to find roots of linear and non-linear equations both numerically (**scipy.optimize**) and symbolically (**sympy**). # **Note:** The algorithms written here are meant to be illustrative. The scipy implementations are always both the *fastest* and the *safest* choice. # **Links:** # # 1. **scipy.linalg:** [overview](https://docs.scipy.org/doc/scipy/reference/linalg.html) + [tutorial](https://docs.scipy.org/doc/scipy/reference/tutorial/linalg.html) # 2. **sympy:** [overview](https://docs.sympy.org/latest/index.html) + [tutorial](https://docs.sympy.org/latest/tutorial/index.html#tutorial) # 3. **scipy.optimize:** [overview](https://docs.scipy.org/doc/scipy/reference/optimize.html) + [turtorial](https://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html) # + import numpy as np import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import ipywidgets as widgets import time from scipy import linalg from scipy import optimize import sympy as sm from IPython.display import display # local module for linear algebra # %load_ext autoreload # %autoreload 2 import numecon_linalg # - # # Systems of linear equations # ## Introduction # We consider **matrix equations** with $n$ equations and $n$ unknowns: # # $$ # \begin{aligned} # Ax = b \Leftrightarrow # \begin{bmatrix}a_{11} & a_{12} & \cdots & a_{1n}\\ # a_{21} & a_{22} & \cdots & a_{2n}\\ # \vdots & \vdots & \ddots & \vdots\\ # a_{n1} & a_{n2} & \cdots & a_{nn} # \end{bmatrix}\cdot\begin{bmatrix}x_{1}\\ # x_{2}\\ # \vdots\\ # x_{n} # \end{bmatrix} & = \begin{bmatrix}b_{1}\\ # b_{2}\\ # \vdots\\ # b_{n} # \end{bmatrix} # \end{aligned} # $$ # # where $A$ is a square parameter matrix, $b$ is a parameter vector, and $x$ is the vector of unknowns. # A specific **example** could be: # # $$ # \begin{aligned} # Ax = b \Leftrightarrow # \begin{bmatrix} # 3 & 2 & 0 \\ # 1 & -1 & 0 \\ # 0 & 5 & 1 # \end{bmatrix} \cdot # \begin{bmatrix} # x_1 \\ # x_2 \\ # x_3 # \end{bmatrix} \,=\, # \begin{bmatrix} # 2 \\ # 4 \\ # -1 # \end{bmatrix} # \end{aligned} # $$ # **How to solve this?** A = np.array([[3.0, 2.0, 0.0], [1.0, -1.0, 0], [0.0, 5.0, 1.0]]) b = np.array([2.0, 4.0, -1.0]) # Trial-and-error: # + Ax = A@[2,-1,9] # @ is matrix multiplication print('A@x: ',Ax) if np.allclose(Ax,b): print('solution found') else: print('solution not found') # - # **Various matrix operations:** A.T # transpose np.diag(A) # diagonal np.tril(A) # lower triangular matrix np.triu(A) # upper triangular matrix B = A.copy() np.fill_diagonal(B,0) # fill diagonal with zeros print(B) linalg.inv(A) # inverse linalg.eigvals(A) # eigen values # ## Direct solution with Gauss-Jordan elimination # Consider the column stacked matrix: # # $$ # X=[A\,|\,b]=\begin{bmatrix}a_{11} & a_{12} & \cdots & a_{1n} & b_{1}\\ # a_{21} & a_{22} & \cdots & a_{2n} & b_{2}\\ # \vdots & \vdots & \ddots & \vdots & \vdots\\ # a_{n1} & a_{n2} & \cdots & a_{nn} & b_{n} # \end{bmatrix} # $$ # Find the **row reduced echelon form** by performing row operations, i.e. # # 1. Multiply row with constant # 2. Swap rows # 3. Add one row to another row, # until the $A$ part of the matrix is the identity matrix. # **Manually:** # + # a. stack X = np.column_stack((A,b)) print('stacked:\n',X) # b. row operations X[0,:] += 2*X[1,:] X[0,:] /= 5.0 X[1,:] -= X[0,:] X[1,:] *= -1 X[2,:] -= 5*X[1,:] print('row reduced echelon form:\n',X) # c. print result (the last column in X in row reduced echelon form) print('solution',X[:,-1]) # - # **General function:** Y = np.column_stack((A,b)) numecon_linalg.gauss_jordan(Y) print('solution',Y[:,-1]) # which can also be used to find the inverse if we stack with the identity matrix instead, # + # a. construct stacked matrix Z = np.hstack((A,np.eye(3))) print('stacked:\n',Z) # b. apply gauss jordan elimination numecon_linalg.gauss_jordan(Z) # b. find inverse inv_Z = Z[:,3:] # last 3 columns of Z in row reduced echelon form print('inverse:\n',inv_Z) assert np.allclose(Z[:,3:]@A,np.eye(3)) # - # ## Iteative Gauss-Seidel (+) # We can always decompose $A$ into additive lower and upper triangular matrices, # # $$ # A=L+U=\begin{bmatrix}a_{11} & 0 & \cdots & 0\\ # a_{21} & a_{22} & \cdots & 0\\ # \vdots & \vdots & \ddots & \vdots\\ # a_{n1} & a_{n2} & \cdots & a_{nn} # \end{bmatrix}+\begin{bmatrix}0 & a_{12} & \cdots & a_{1n}\\ # 0 & 0 & \cdots & a_{2n}\\ # \vdots & \vdots & \ddots & \vdots\\ # 0 & 0 & \cdots & 0 # \end{bmatrix} # $$ # # such that # # $$ # Ax=b\Leftrightarrow Lx=b-Ux # $$ # **Algorithm:** `gauss_seidel()` # # 1. Choose tolerance $\epsilon > 0$, guess on $x_0$, and set $n=1$. # 2. Find $x_n$ by solving \\( Lx_n = y \equiv (b-Ux_{n-1}) \\). # 3. If $|x_n-x_{n-1}|_{\infty} < \epsilon$ stop, else $n=n+1 $ and return to step 2. # > **Note:** Step 2 is very easy because the equation can be solved directly by *forward substitution*: # > # > $x_1 = \frac{y_1}{a_{11}}$ # > # > $x_2 = \frac{(y_2 - a_{21} x_1)}{a_{22}}$ # > # > $x_3 = \frac{(y_3 - a_{31} x_1 - a_{32} x_2)}{a_{33}}$ # > # > etc. # **Apply Gauss-Seidel:** x0 = np.array([1,1,1]) x = numecon_linalg.gauss_seidel(A,b,x0) print('solution',x) # > **Note:** Convergence is not ensured unless the matrix is *diagonally dominant* or *symmetric* and *positive definite*. x = numecon_linalg.gauss_seidel(A,b,x0,do_print=True) # ## Scipy functions # **Option 1:** Use `.solve()` (scipy chooses what happens). x1 = linalg.solve(A, b) print(x1) assert np.all(A@x1 == b) # **Option 2:** Compute `.inv()` first and then solve. Ainv = linalg.inv(A) x2 = Ainv@b print(x2) # > **Note:** Computing the inverse is normally not a good idea due to numerical stability. # **Option 3:** Compute LU decomposition and then solve. LU,piv = linalg.lu_factor(A) # decomposition (factorization) x3 = linalg.lu_solve((LU,piv),b) print(x3) # **Detail:** `piv` contains information on a numerical stable reordering. # ## Comparisons # # 1. `linalg.solve()` is the best choice for solving once. # 2. `linalg.lu_solve()` is the best choice when solving for multipe $b$'s for a fixed $A$ (the LU decomposition only needs to be done once). # 3. Gauss-Seidel is an alternative when e.g. only an approximate solution is needed. # ## Details on LU factorization (+) # # When $A$ is *regular* (invertible), we can decompose it into a *lower unit triangular matrix*, $L$, and an *upper triangular matrix*, $U$: # # $$ # A= L\cdot U = \begin{bmatrix}1 & 0 & \cdots & 0\\ # l_{21} & 1 & \cdots & 0\\ # \vdots & \vdots & \ddots & \vdots\\ # l_{n1} & l_{n2} & \cdots & 1 # \end{bmatrix}\cdot\begin{bmatrix}u_{11} & u_{12} & \cdots & u_{1n}\\ # 0 & u_{22} & \cdots & u_{2n}\\ # \vdots & \vdots & \ddots & \vdots\\ # 0 & 0 & \cdots & u_{nn} # \end{bmatrix} # $$ # # where it can be shown that we can compute the elements by # # $$ # \begin{aligned} # u_{ij} &= a_{ij} - \sum_{k=1}^{i-1} u_{kj} l_{ik} \\ # l_{ij} &= \frac{1}{u_{jj}} \big( a_{ij} - \sum_{k=1}^{j-1} u_{kj} l_{ik} \big) # \end{aligned} # $$ # # This implies that the equation system can be written # # $$ # L(Ux) = b # $$ # **Algorithm:** `lu_solve()` # # 1. Perform LU decomposition (factorization) # 2. Solve $Ly = b$ for $y$ (by *forward substitution*) where $y = Ux$ # 3. Solve $Ux = y$ for $x$ (by *backward substitution*) L,U = numecon_linalg.lu_decomposition(A) # step 1 y = numecon_linalg.solve_with_forward_substitution(L,b) # step 2 x = numecon_linalg.solve_with_backward_substitution(U,y) # step 3 print('L:\n',L) print('\nU:\n',U) print('\nsolution:',x) # **Relation to scipy:** # # 1. Scipy use pivoting to improve numerical stability. # 2. Scipy is implemented much much better than here. # ## Sparse matrices (+) # **Sparse matrix:** A matrix with many zeros. Letting the computer know where they are is extremely valuable. # # **Documentation:** [basics](https://docs.scipy.org/doc/scipy/reference/sparse.html) + [linear algebra](https://docs.scipy.org/doc/scipy/reference/sparse.linalg.html#module-scipy.sparse.linalg) # **Create a sparse matrix**, where most elements are on the diagonal: # + from scipy import sparse import scipy.sparse.linalg S = sparse.lil_matrix((1000, 1000)) # 1000x1000 matrix with zeroes S.setdiag(np.random.rand(1000)) # some values on the diagonal S[200, :100] = np.random.rand(100) # some values in a row S[200:210, 100:200] = S[200, :100] # and the same value in some other rows # - # Create a plot of the values in the matrix: S_np = S.toarray() # conversion to numpy fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.matshow(S_np,cmap=plt.cm.binary); # **Solve it in four different ways:** # # 1. Like it was not sparse # 2. Using the sparsity # 3. Using the sparsity + explicit factorization # 4. Iterative solver (similar to Gauss-Seidel) # + k = np.random.rand(1000) # random RHS # a. solve t0 = time.time() x = linalg.solve(S_np,k) print(f'{"solve":12s}: {time.time()-t0:.5f} secs') # b. solve with spsolve t0 = time.time() x_alt = sparse.linalg.spsolve(S.tocsr(), k) print(f'{"spsolve":12s}: {time.time()-t0:.5f} secs') assert np.allclose(x,x_alt) # c. solve with explicit factorization t0 = time.time() S_solver = sparse.linalg.factorized(S.tocsc()) x_alt = S_solver(k) print(f'{"factorized":12s}: {time.time()-t0:.5f} secs') assert np.allclose(x,x_alt) # d. solve with iterative solver (bicgstab) t0 = time.time() x_alt,_info = sparse.linalg.bicgstab(S,k,x0=1.001*x,tol=10**(-8)) print(f'{"bicgstab":12s}: {time.time()-t0:.5f} secs') assert np.allclose(x,x_alt),x-x_alt # - # **Conclusion:** # # 1. Using the sparsity can be very important. # 2. Iterative solvers can be very very slow. # # Symbolically # ## Solve consumer problem # Consider solving the following problem: # # $$ # \max_{x_1,x_2} x_1^{\alpha} x_2^{\beta} \text{ s.t. } p_1x_1 + p_2x_2 = I # $$ # Define all symbols: x1 = sm.symbols('x_1') # x1 is a Python variable representing the symbol x_1 x2 = sm.symbols('x_2') alpha = sm.symbols('alpha') beta = sm.symbols('beta') p1 = sm.symbols('p_1') p2 = sm.symbols('p_2') I = sm.symbols('I') # Define objective and budget constraint: objective = x1**alpha*x2**beta objective budget_constraint = sm.Eq(p1*x1+p2*x2,I) budget_constraint # Solve in **four steps**: # # 1. **Isolate** $x_2$ from the budget constraint # 2. **Substitute** in $x_2$ # 3. **Take the derivative** wrt. $x_1$ # 4. **Solve the FOC** for $x_1$ # **Step 1: Isolate** x2_from_con = sm.solve(budget_constraint,x2) x2_from_con[0] # **Step 2: Substitute** objective_subs = objective.subs(x2,x2_from_con[0]) objective_subs # **Step 3: Take the derivative** foc = sm.diff(objective_subs,x1) foc # **Step 4: Solve the FOC** sol = sm.solve(sm.Eq(foc,0),x1) sol[0] # > An alternative is `sm.solveset()`, which will be the default in the future, but it is still a bit immature in my view. # **Task:** Solve the consumer problem with quasi-linear preferences, # # $$ \max_{x_1,x_2} \sqrt{x_1} + \gamma x_2 \text{ s.t. } p_1x_1 + p_2x_2 = I $$ # + # write your code here # + jupyter={"source_hidden": true} gamma = sm.symbols('gamma') objective_alt = sm.sqrt(x1) + gamma*x2 objective_alt_subs = objective_alt.subs(x2,x2_from_con[0]) foc_alt = sm.diff(objective_alt_subs,x1) sol_alt = sm.solve(foc_alt,x1) sol_alt[0] # - # ## Use solution # **LaTex:** Print in LaTex format: print(sm.latex(sol[0])) # **Turn into Python function:** # + _sol_func = sm.lambdify((p1,I,alpha,beta),sol[0]) def sol_func(p1,I=10,alpha=1,beta=1): return _sol_func(p1,I,alpha,beta) # test p1_vec = np.array([1.2,3,5,9]) demand_p1 = sol_func(p1_vec) print(demand_p1) # - # **Is demand always positive?** # Give the computer the **information** we have. I.e. that $p_1$, $p_2$, $\alpha$, $\beta$, $I$ are all strictly positive: for var in [p1,p2,alpha,beta,I]: sm.assumptions.assume.global_assumptions.add(sm.Q.positive(var)) sm.assumptions.assume.global_assumptions # **Ask** the computer a **question**: answer = sm.ask(sm.Q.positive(sol[0])) print(answer) # We need the assumption that $p_1 > 0$: sm.assumptions.assume.global_assumptions.remove(sm.Q.positive(p1)) answer = sm.ask(sm.Q.positive(sol[0])) print(answer) # To clear all assumptions we can use: sm.assumptions.assume.global_assumptions.clear() # ## Solving matrix equations (+) # $$ Ax = b $$ # **Remember:** print('A:\n',A) print('b:',b) # **Construct symbolic matrix:** A_sm = numecon_linalg.construct_sympy_matrix(['11','12','21','22','32','33']) # somewhat complicated function A_sm # **Find the inverse symbolically:** A_sm_inv = A_sm.inv() A_sm_inv # **Fill in the numeric values:** A_inv_num = numecon_linalg.fill_sympy_matrix(A_sm_inv,A) # somewhat complicated function x = A_inv_num@b print('solution:',x) # **Note:** The inverse multiplied by the determinant looks nicer... A_sm_det = A_sm.det() A_sm_det A_sm_inv_raw = sm.simplify(A_sm_inv*A_sm_det) A_sm_inv_raw # ## More features (mixed goodies) x = sm.symbols('x') # **Derivatives:** Higher order derivatives are also availible sm.Derivative('x**4',x,x) sm.diff('x**4',x,x) # Alternatively, expr = sm.Derivative('x**4',x,x) expr.doit() # **Integrals:** sm.Integral(sm.exp(-x), (x, 0, sm.oo)) sm.integrate(sm.exp(-x), (x, 0, sm.oo)) # **Limits:** c = sm.symbols('c') rho = sm.symbols('rho') sm.Limit((c**(1-rho)-1)/(1-rho),rho,1) sm.limit((c**(1-rho)-1)/(1-rho),rho,1) # **Integers:** X = sm.Integer(7)/sm.Integer(3) Y = sm.Integer(3)/sm.Integer(8) display(X) display(Y) Z = 3 (X*Y)**Z # **Simplify:** expr = sm.sin(x)**2 + sm.cos(x)**2 display(expr) sm.simplify(expr) # **Solve multiple equations at once:** # + x = sm.symbols('x') y = sm.symbols('y') Eq1 = sm.Eq(x**2+y-2,0) Eq2 = sm.Eq(y**2-4,0) sol = sm.solve([Eq1,Eq2],[x,y]) # print all solutions for xy in sol: print(f'(x,y) = ({xy[0]},{xy[1]})') # - # # Non-linear equations - one dimensional # ## Introduction # We consider **solving non-linear equations** on the form, # # $$ # f(x) = 0, x \in \mathbb{R} # $$ # # This is also called **root-finding**. # A specific **example** is: # # $$ # f(x) = 10x^3 - x^2 -1 # $$ # ## Derivative based methods # **Newton methods:** Assume you know the function value and derivatives at $x_0$. # A **first order** approximate value of the function at $x_1$ then is: # # $$ # f(x_1) \approx f(x_0) + f^{\prime}(x_0)(x_1-x_0) # $$ # # implying # # $$ # f(x_1) = 0 \Leftrightarrow x_1 = x_0 - \frac{f(x_0)}{f^{\prime}(x_0)} # $$ # This is called **Newtons method**. # An alternative is **Halleys method** (see [derivation](https://mathworld.wolfram.com/HalleysMethod.html)), which uses # # $$ # x_1 = x_0 - \frac{f(x_0)}{f^{\prime}(x_0)} \Big[ 1-\frac{f(x_0)}{f^{\prime}(x_0)}\frac{f^{\prime\prime}(x_0)}{2f^{\prime}(x_0)} \Big]^{-1} # $$ # # making use of information from the **second derivative**. # **Algorithm:** `find_root()` # # 1. Choose tolerance $\epsilon > 0$, guess on $x_0$ and set $n = 0$. # 2. Calculate $f(x_n)$, $f^{\prime}(x_n)$, and perhaps $f^{\prime\prime}(x_n)$. # 3. If $|f(x_n)| < \epsilon$ stop. # 4. Calculate $x_{n+1}$ using Newtons or Halleys formula (see above). # 5. Set $n = n + 1$ and return to step 2. def find_root(x0,f,fp,fpp=None,method='newton',max_iter=500,tol=1e-8,full_info=False): """ find root Args: x0 (float): initial value f (callable): function fp (callable): derivative fp (callable): second derivative method (str): newton or halley max_iter (int): maximum number of iterations tol (float): tolerance full_info (bool): controls information returned Returns: x (float/ndarray): root (if full_info, all x tried) i (int): number of iterations used fx (ndarray): function values used (if full_info) fpx (ndarray): derivative values used (if full_info) fppx (ndarray): second derivative values used (if full_info) """ # initialize x = np.zeros(max_iter) fx = np.zeros(max_iter) fpx = np.zeros(max_iter) fppx = np.zeros(max_iter) # iterate x[0] = x0 i = 0 while True: # step 2: evaluate function and derivatives fx[i] = f(x[i]) fpx[i] = fp(x[i]) if method == 'halley': fppx[i] = fpp(x[i]) # step 3: check convergence if abs(fx[i]) < tol or i >= max_iter: break # step 4: update x if method == 'newton': x[i+1] = x[i] - fx[i]/fpx[i] elif method == 'halley': a = fx[i]/fpx[i] b = a*fppx[i]/(2*fpx[i]) x[i+1] = x[i] - a/(1-b) # step 5: increment counter i += 1 # return if full_info: return x,i,fx,fpx,fppx else: return x[i],i # **Note:** The cell below contains a function for plotting the convergence. # + jupyter={"source_hidden": true} def plot_find_root(x0,f,fp,fpp=None,method='newton',xmin=-8,xmax=8,xn=100): # a. find root and return all information x,max_iter,fx,fpx,fppx = find_root(x0,f,fp,fpp=fpp,method=method,full_info=True) # b. compute function on grid xvec = np.linspace(xmin,xmax,xn) fxvec = f(xvec) # c. figure def _figure(i): # i. approximation if method == 'newton': fapprox = fx[i] + fpx[i]*(xvec-x[i]) elif method == 'halley': fapprox = fx[i] + fpx[i]*(xvec-x[i]) + fppx[i]/2*(xvec-x[i])**2 # ii. figure fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.plot(xvec,fxvec,label='function') # on grid ax.plot(x[i],fx[i],'o',color='black',label='current') # now ax.plot(xvec,fapprox,label='approximation') # approximation ax.axvline(x[i+1],ls='--',lw=1,color='black') # cross zero ax.plot(x[i+1],fx[i+1],'o',color='black',mfc='none',label='next')# next ax.legend(loc='lower right',facecolor='white',frameon=True) ax.set_ylim([fxvec[0],fxvec[-1]]) widgets.interact(_figure, i=widgets.IntSlider(description="iterations", min=0, max=max_iter-1, step=1, value=0) ); # - # ## Example f = lambda x: 10*x**3-x**2-1 fp = lambda x: 30*x**2-2*x fpp = lambda x: 60*x-2 x,i = find_root(-5,f,fp,method='newton') print(i,x,f(x)) plot_find_root(-5,f,fp,method='newton') x,i = find_root(-5,f,fp,fpp,method='halley') print(i,x,f(x)) plot_find_root(-5,f,fp,fpp,method='halley') # ## Numerical derivative # Sometimes, you might not have the **analytical derivative**. Then, you can instead use the **numerical derivative**. # + # a. function f = lambda x: 10*x**3 - x**2 -1 # b. numerical derivative (forward) stepsize = 1e-8 fp_approx = lambda x: (f(x+stepsize)-f(x))/stepsize # b. find root x0 = -5 x,i = find_root(x0,f,fp_approx,method='newton') print(i,x,f(x)) # - # **Question:** What happens if you increase the stepsize? # ## Another example # + g = lambda x: np.sin(x) gp = lambda x: np.cos(x) gpp = lambda x: -np.sin(x) x0 = -4.0 plot_find_root(x0,g,gp,gpp,method='newton') # - # **Question:** Is the initial value important? # **Sympy** can actually tell us that there are many solutions: x = sm.symbols('x') sm.solveset(sm.sin(x),) # ## Derivative free methods: Bisection # **Algorithm:** `bisection()` # # 1. Set $a_0 = a$ and $b_0 = b$ where $f(a)$ and $f(b)$ has oposite sign, $f(a_0)f(b_0)<0$ # 2. Compute $f(m_0)$ where $m_0 = (a_0 + b_0)/2$ is the midpoint. # 3. Determine the next sub-interval $[a_1,b_1]$: # * If $f(a_0)f(m_0) < 0$ (different signs) then $a_1 = a_0$ and $b_1 = m_0$ (i.e. focus on the range $[a_0,m_0]$). # * If $f(m_0)f(b_0) < 0$ (different signs) then $a_1 = m_0$ and $b_1 = b_0$ (i.e. focus on the range $[m_0,b_0]$). # 4. Repeat step 2 and step 3 until $f(m_n) < \epsilon$. def bisection(f,a,b,max_iter=500,tol=1e-6,full_info=False): """ bisection Solve equation f(x) = 0 for a <= x <= b. Args: f (callable): function a (float): left bound b (float): right bound max_iter (int): maximum number of iterations tol (float): tolerance on solution full_info (bool): controls information returned Returns: m (float/ndarray): root (if full_info, all x tried) i (int): number of iterations used a (ndarray): left bounds used b (ndarray): right bounds used fm (ndarray): funciton values at midpoints """ # test inputs if f(a)*f(b) >= 0: print("bisection method fails.") return None # step 1: initialize _a = a _b = b a = np.zeros(max_iter) b = np.zeros(max_iter) m = np.zeros(max_iter) fm = np.zeros(max_iter) a[0] = _a b[0] = _b # step 2-4: main i = 0 while i < max_iter: # step 2: midpoint and associated value m[i] = (a[i]+b[i])/2 fm[i] = f(m[i]) # step 3: determine sub-interval if abs(fm[i]) < tol: break elif f(a[i])*fm[i] < 0: a[i+1] = a[i] b[i+1] = m[i] elif f(b[i])*fm[i] < 0: a[i+1] = m[i] b[i+1] = b[i] else: print("bisection method fails.") return None i += 1 if full_info: return m,i,a,b,fm else: return m[i],i # **Same result** as before, but **trade-off** between more iterations and no evaluation of derivatives. m,i = bisection(f,-8,7) print(i,m,f(m)) # **Note:** The cell below contains a function for plotting the convergence. # + jupyter={"source_hidden": true} def plot_bisection(f,a,b,xmin=-8,xmax=8,xn=100): # a. find root and return all information res = bisection(f,a,b,full_info=True) if res == None: return else: m,max_iter,a,b,fm = res # b. compute function on grid xvec = np.linspace(xmin,xmax,xn) fxvec = f(xvec) # c. figure def _figure(i): # ii. figure fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.plot(xvec,fxvec) # on grid ax.plot(m[i],fm[i],'o',color='black',label='current') # mid ax.plot([a[i],b[i]],[fm[i],fm[i]],'--',color='black',label='range') # range ax.axvline(a[i],ls='--',color='black') ax.axvline(b[i],ls='--',color='black') ax.legend(loc='lower right',facecolor='white',frameon=True) ax.set_ylim([fxvec[0],fxvec[-1]]) widgets.interact(_figure, i=widgets.IntSlider(description="iterations", min=0, max=max_iter-1, step=1, value=0) ); plot_bisection(f,-8,3) # - # **Note:** Bisection is not good at the final convergence steps. Generally true for methods not using derivatives. # ## Scipy # Scipy, naturally, has better implementations of the above algorithms. # **Newton:** result = optimize.root_scalar(f,x0=-4,fprime=fp,method='newton') print(result) # **Halley:** result = optimize.root_scalar(f,x0=-4,fprime=fp,fprime2=fpp,method='halley') print(result) # **Bisect:** result = optimize.root_scalar(f,bracket=[-8,7],method='bisect') print(result) # The **best choice** is the more advanced **Brent-method**: result = optimize.root_scalar(f,bracket=[-8,7],method='brentq') print(result) # # Solving non-linear equations (multi-dimensional) # ## Introduction # We consider **solving non-linear equations** on the form, # # $$ # f(\boldsymbol{x}) = f(x_1,x_2,\dots,x_k) = \boldsymbol{0}, \boldsymbol{x} \in \mathbb{R}^k # $$ # A specific **example** is: # # $$ # h(\boldsymbol{x})=h(x_{1,}x_{2})=\begin{bmatrix}h_{1}(x_{1},x_{2})\\ # h_{2}(x_{1},x_{2}) # \end{bmatrix}=\begin{bmatrix}x_{1}+0.5(x_{1}-x_{2})^{3}-1\\ # x_{2}+0.5(x_{1}-x_{2})^{3} # \end{bmatrix}\in\mathbb{R}^{2} # $$ # # where the **Jacobian** is # # $$ # \nabla h(\boldsymbol{x})=\begin{bmatrix}\frac{\partial h_{1}}{\partial x_{1}} & \frac{\partial h_{1}}{\partial x_{2}}\\ # \frac{\partial h_{2}}{\partial x_{1}} & \frac{\partial h_{2}}{\partial x_{2}} # \end{bmatrix}=\begin{bmatrix}1+1.5(x_{1}-x_{2})^{2} & -1.5(x_{1}-x_{2})^{2}\\ # -1.5(x_{2}-x_{1})^{2} & 1+1.5(x_{2}-x_{1})^{2} # \end{bmatrix} # $$ # + def h(x): y = np.zeros(2) y[0] = x[0]+0.5*(x[0]-x[1])**3-1.0 y[1] = x[1]+0.5*(x[1]-x[0])**3 return y def hp(x): y = np.zeros((2,2)) y[0,0] = 1+1.5*(x[0]-x[1])**2 y[0,1] = -1.5*(x[0]-x[1])**2 y[1,0] = -1.5*(x[1]-x[0])**2 y[1,1] = 1+1.5*(x[1]-x[0])**2 return y # - # ## Newton's method # Same as Newton's method in one dimension, but with the following **update step**: # # $$ # \boldsymbol{x}_{n+1} = \boldsymbol{x_n} - [ \nabla h(\boldsymbol{x_n})]^{-1} f(\boldsymbol{x_n}) # $$ def find_root_multidim(x0,f,fp,max_iter=500,tol=1e-8): """ find root Args: x0 (float): initial value f (callable): function fp (callable): derivative max_iter (int): maximum number of iterations tol (float): tolerance Returns: x (float): root i (int): number of iterations used """ # initialize x = x0 i = 0 # iterate while i < max_iter: # step 2: function and derivatives fx = f(x) fpx = fp(x) # step 3: check convergence if max(abs(fx)) < tol: break # step 4: update x fpx_inv = linalg.inv(fpx) x = x - fpx_inv@fx # step 5: increment counter i += 1 return x,i # **Test algorithm:** x0 = np.array([0,0]) x,i = find_root_multidim(x0,h,hp) print(i,x,h(x)) # ## Scipy # There exist a lot of efficient algorithms for finding roots in multiple dimensions. The default scipy choice is something called *hybr*. # **With the Jacobian:** result = optimize.root(h,x0,jac=hp) print(result) print('\nx =',result.x,', h(x) =',h(result.x)) # **Without the Jacobian:** result = optimize.root(h,x0) print(result) print('\nx =',result.x,', h(x) =',h(result.x)) # # Summary # **This lecture:** # # 1. Solving matrix equations (directly, decomposition, iterative) # 2. Symbollic solutions (substitution, derivative, solution) # 3. Root-finding (one dimension, multiple dimensions, Newton's method, biscetion) # **Your work:** Play around with the code in this notebook before solving the problem set. Especially, try out the various scipy functions used. # **Next lecture:** Numerical optimization.
10/Solving_equations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # In this project, we will discuss: # - How to fetch random samples from the Dataset? # - isin # - between # - unique # - dropna # - replace # - duplicated # - drop_duplicates # - astype # - apply # - What is Univariate analysis? # - What is Bivariate analysis? # - Memory Optimization # import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('C:\Users\Hatem\Desktop\Work in Data\youtub\project1\data\data.csv') data.head() # # 1.Display Top 10 Rows of The Dataset data.head(10) # ## as we can see in the dataset salary >=50k and <=50k # ## country # # 2. Check Last 10 Rows of The Dataset data.tail(10) # # 3. Find Shape of Our Dataset (Number of Rows And Number of Columns) data.shape print("Number of Rows",data.shape[0]) print("Number of Columns",data.shape[1]) # # 4. Getting Information About Our Dataset Like Total Number Rows, # # Total Number of Columns, Datatypes of Each Column And Memory Requirement data.info() # # 5. Fetch Random samples from the Dataset data1=data.sample(frac=0.50,random_state=100) data1 # # 6.Check Null Values In The Dataset data.isnull().sum(axis=0) sns.heatmap(data.isnull()) # # 7. Perform Data Cleaning [Replace '?' with NaN] data.isin(['?']).sum() data.columns data['workclass']=data['workclass'].replace('?',np.nan) data['occupation']=data['occupation'].replace('?',np.nan) data['native-country']=data['native-country'].replace('?',np.nan) data.isin(['?']).sum() data.isnull().sum() sns.heatmap(data.isnull()) # # 8. Drop all The Missing Values per_missing=data.isnull().sum()*100/len(data) per_missing data.dropna(how='any',inplace=True) data.shape 48842-45222 # # 9. Check For Duplicate Data and Drop Them dup=data.duplicated().any() print("Are there any duplivated values in data",dup) data=data.drop_duplicates() data.shape 45222-45175 # # 10. Get Overall Statistics About The Dataframe data.describe(include='all') # # 11. Drop The Columns education-num, capital-gain and capital-loss data.columns data=data.drop(['educational-num','capital-gain','capital-loss'],axis=1) data.describe() # # Univariate Analysis # # 12. What Is The Distribution of Age Column? data.columns data['age'].describe() data['age'].hist() # # 13. Find Total Number of persons having age between 17 to 48 (inclusive) using between method sum((data['age']>=17) & (data['age']<=48)) sum(data['age'].between(17,48)) # # 14. What is The Distribution of workclass Column? data.columns data['workclass'].describe() # # 15.How many persons Having Bachelors or Masters degree ? data.columns filter1 = data['education']=='Bachelors' filter2 = data['education']=='Masters' len(data[filter1 | filter2]) # # 16. Bivariate Analysis data.columns sns.boxplot(x='income',y='age',data=data) # # 17. Replace Salary values ['<=50k' , '>=50k'] with 0 and 1 data.columns data['income'].unique() data['income'].value_counts() sns.countplot('income',data=data) def Salary_data(sal): if sal=='<=50k': return 0 else: return 1 data['enconded_salary']=data['income'].apply(Salary_data) data.head() # # 18.which workclass Getting the highest salary ? data.groupby('workclass')['fnlwgt'].mean().sort_values(ascending=False) # # 19. How as Better chance to get salary >50k male or female ? data.groupby('gender')['fnlwgt'].mean().sort_values(ascending=False) # # 20. Convert workclass columns Datatype to category Datatype data.info() data['workclass']=data['workclass'].astype('category') data.info()
4 company workers/project_3-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # HSV colorspace # ### Import resources # + import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 # %matplotlib inline # - # ### Read in RGB image # + # Read in the image image = mpimg.imread('images/car_green_screen2.jpg') plt.imshow(image) # - # ### RGB threshold # # Visualize the green threshold you defined in the previous, consistent green color case. # + # Define our color selection boundaries in RGB values lower_green = np.array([0,180,0]) upper_green = np.array([100,255,100]) # Define the masked area mask = cv2.inRange(image, lower_green, upper_green) # Mask the image to let the car show through masked_image = np.copy(image) masked_image[mask != 0] = [0, 0, 0] # Display it! plt.imshow(masked_image) # - # ### Convert to HSV # + # Convert to HSV hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) # HSV channels h = hsv[:,:,0] s = hsv[:,:,1] v = hsv[:,:,2] # Visualize the individual color channels f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10)) ax1.set_title('H channel') ax1.imshow(h, cmap='gray') ax2.set_title('S channel') ax2.imshow(s, cmap='gray') ax3.set_title('V channel') ax3.imshow(v, cmap='gray') # - # ### TODO: Mask the green area using HSV color space # + # Define our color selection boundaries in HSV values ## TODO: Change these thresholds # This initial threshold allows a certain low range for Hue (H) lower_hue = np.array([40,0,0]) upper_hue = np.array([80,255,255]) # Define the masked area mask = cv2.inRange(hsv, lower_hue, upper_hue) # Mask the image to let the car show through masked_image = np.copy(image) masked_image[mask != 0] = [0, 0, 0] # Display it! plt.imshow(masked_image) # -
1_1_Image_Representation/5_2. Green screen, HSV conversion.ipynb
-- --- -- jupyter: -- jupytext: -- text_representation: -- extension: .hs -- format_name: light -- format_version: '1.5' -- jupytext_version: 1.14.4 -- kernelspec: -- display_name: Haskell -- language: haskell -- name: haskell -- --- -- + import Control.Monad.Trans.Class import Control.Monad.Trans.Cont import Data.Function (fix) import Data.IORef setjmp = callCC $ return . fix main = flip runContT return $ do lift $ putStrLn "alpha" x <- lift $ newIORef 0 jmp <- setjmp lift $ putStrLn "beta" -- k lift $ putStrLn "gamma" -- j value <- lift $ readIORef x if value < 4 then do lift $ modifyIORef' x (+1) jmp else return value -- - main
callcc/CallCC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Side Notes # # This notebook is a collection of miscellaneous mini wealth of knowledge on my way learning Python. # ## Using Asterisks # # [Reference](https://www.geeksforgeeks.org/python-star-or-asterisk-operator/) # # Asterisks are far more than $a \times b$ symbol in Python. Read on for all use cases. # # ### Multiplication int1, int2 = 1, 2 print(int1 * int2) # In addition to numerical multiplication, arrays can also be multiplied by numbers. # # Moreover, multiplication is a primary operator, which calls `__mul__` (or `__rmul__`) method of a class. [Read on](#Overriding-Operators) for more information on this topic. arr1 = [0, 4, -4] print(arr1 * int2) # ### Unpacking # # *(Unpacking a function using positional argument)* arr2 = ["Apple", 10, "Banana", 4] print(arr2) print(*arr2) # Note that in the `print(*arr2)`, no formatting is applied. # ### Passing a variable number of parameters # # We can use `*var_name` or `**var_name` to pass a variable number of elements into a function. See the example below: # + def addition(*args): return sum(args) print(addition(1, 1, 4, 5, 14)) def eatApples(**prices): if "apple" in prices: print("Eaten all apples!") else: print("Sad, no apples to eat...") eatApples(banana=3, kiwi=6, cherry=4, coconut=10) # - # Combining the above two usages of asterisks, we can have the following code: # + def food(**kwargs): for items in kwargs: print(f"{kwargs[items]} is a {items}") foods = {'fruit' : 'cherry', 'vegetable' : 'potato', 'boy' : 'srikrishna'} food(**foods) # - # ## Overriding Operators # # [Reference](https://www.linuxtopia.org/online_books/programming_books/python_programming/python_ch24s04.html#:~:text=Numeric%20Type%20Special%20Methods%20%20%20%20method,self%20%2F%20other%20%208%20more%20rows%20) # # Just like C++, Python class can define in themselves how to perform numeric operations. # # | Method | Operator | # |:-|:-| # | **\_\_add\_\_**( self , other ) | self + other | # | **\_\_sub\_\_**( self , other ) | self - other | # | **\_\_mul\_\_**( self , other ) | self * other | # | **\_\_div\_\_**( self , other ) | self / other | # | **\_\_mod\_\_**( self , other ) | self % other | # | **\_\_divmod\_\_**( self , other ) | divmod ( self , other ) | # | **\_\_pow\_\_**( self , other , \[ modulo \] ) | self ** other or pow ( self , other , \[ modulo \] ) | # | **\_\_lshift\_\_**( self , other ) | self << other | # | **\_\_rshift\_\_**( self , other ) | self >> other | # | **\_\_and\_\_**( self , other ) | self and other | # | **\_\_xor\_\_**( self , other ) | self xor other | # | **\_\_or\_\_**( self , other ) | self or other | # # But this is only one side. ~~Python does not think that these operators satisfy reflexivity.~~ Thus we need to also define the following to make sure everything work as we expect. # # | Method | Operator | # |:-|:-| # | **\_\_radd\_\_**( self , other ) | other + self | # | **\_\_rsub\_\_**( self , other ) | other - self | # | **\_\_rmul\_\_**( self , other ) | other * self | # | **\_\_rdiv\_\_**( self , other ) | other / self | # | **\_\_rmod\_\_**( self , other ) | other % self | # | **\_\_rdivmod\_\_**( self , other ) | divmod ( other , self ) | # | **\_\_rpow\_\_**( self , other ) | other ** self or pow ( other , self ) | # | **\_\_rlshift\_\_**( self , other ) | other << self | # | **\_\_rrshift\_\_**( self , other ) | other >> self | # | **\_\_rand\_\_**( self , other ) | other and self | # | **\_\_rxor\_\_**( self , other ) | other xor self | # | **\_\_ror\_\_**( self , other ) | other or self | # + class Child: name="" def __init__(self, name): if type(name) == str: self.name = name else: raise(TypeError("argument name_ must be type str")) class Girl: name="" def __init__(self, name): if type(name) == str: self.name = name else: raise(TypeError("argument name_ must be type str")) ''' def __add__(self, other): print("Girl __add__") return Child(self.name + other.name) ''' def __radd__(self, other): print("Girl __radd__") return Child(self.name + other.name) class Boy: name="" def __init__(self, name): if type(name) == str: self.name = name else: raise(TypeError("argument name_ must be type str")) def __add__(self, other): print("Boy __add__") return Child(self.name + other.name) def __radd__(self, other): print("Boy __radd__") return Child(self.name + other.name) pBlack = Girl("小黑") pWhite = Boy("小白") print((pBlack+pWhite).name) # Line 1 print((pWhite+pBlack).name) # Line 2 print((pWhite+pBlack+pWhite).name) # Line 3 # - # When the `__add__()` of class `Girl` is commented, the behavior of the first print statement is a shown. However, when we uncomment the function, this statement will call `Girl.__add__()` instead. This is because `__add__` are always tried before `__radd__`. # # Similarly, because `Child` has neither `__add__()` nor `__radd__()`, the program then turn to `pWhite` (the second `pWhite` in line 3) for help. It is actually performing `Child + Boy` inside.
SIDE_NOTES.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Preface # # If you're a reasonable person, you might ask, "what is *mxnet-the-straight-dope*?" You might also ask, "why does it have such an ostentatious name?" Speaking to the former question, *mxnet-the-straight-dope* is an attempt to create a new kind of educational resource for deep learning. Our goal is to leverage the strengths of Jupyter notebooks to present prose, graphics, equations, and (importantly) code together in one place. If we're successful, the result will be a resource that could be simultaneously a book, course material, a prop for live tutorials, and a resource for plagiarising (with our blessing) useful code. To our knowledge, few available resources aim to teach either (1) the full breadth of concepts in modern machine learning or (2) interleave an engaging textbook with runnable code. We'll find out by the end of this venture whether or not that void exists for a good reason. # # Regarding the name, we are cognizant that the machine learning community and the ecosystem in which we operate have lurched into an absurd place. In the early 2000s, comparatively few tasks in machine learning had been conquered, but we felt that we understood *how* and *why* those models worked (with some caveats). By contrast, today's machine learning systems are extremely powerful and *actually work* for a growing list of tasks, but huge open questions remain as to precisely *why* they are so effective. # # This new world offers enormous opportunity, but has also given rise to considerable buffoonery. Research preprints like [the arXiv](http://arxiv.org) are flooded by clickbait, AI startups have sometimes received overly optimistic valuations, and the blogosphere is flooded with thought leadership pieces written by marketers bereft of any technical knowledge. Amid the chaos, easy money, and lax standards, we believe it's important not to take our models or the environment in which they are worshipped too seriously. Also, in order to both explain, visualize, and code the full breadth of models that we aim to address, it's important that the authors do not get bored while writing. # # ## Organization # # At present, we're aiming for the following format: aside from a few (optional) notebooks providing a crash course in the basic mathematical background, each subsequent notebook will both: # # 1. Introduce a reasonable number (perhaps one) of new concepts # 2. Provide a single self-contained working example, using a real dataset # # This presents an organizational challenge. Some models might logically be grouped together in a single notebook. # And some ideas might be best taught by executing several models in succession. # On the other hand, there's a big advantage to adhering to a policy of *1 working example, 1 notebook*: # This makes it as easy as possible for you to start your own research projects # by plagiarising our code. Just copy a single notebook and start modifying it. # # We will interleave the runnable code with background material as needed. # In general, we will often err on the side of making tools available before explaining them fully # (and we will follow up by explaining the background later). # For instance, we might use *stochastic gradient descent* # before fully explaining why it is useful or why it works. # This helps to give practitioners the necessary ammunition to solve problems quickly, # at the expense of requiring the reader to trust us with some decisions, at least in the short term. # Throughout, we'll be working with the MXNet library, # which has the rare property of being flexible enough for research # while being fast enough for production. # Our more advanced chapters will mostly rely # on MXNet's new high-level imperative interface ``gluon``. # Note that this is not the same as ``mxnet.module``, # an older, symbolic interface supported by MXNet. # # This book will teach deep learning concepts from scratch. # Sometimes, we'll want to delve into fine details about the models # that are hidden from the user by ``gluon``'s advanced features. # This comes up especially in the basic tutorials, # where we'll want you to understand everything that happens in a given layer. # In these cases, we'll generally present two versions of the example: # one where we implement everything from scratch, # relying only on NDArray and automatic differentiation, # and another where we show how to do things succinctly with ``gluon``. # Once we've taught you how a layer works, # we can just use the ``gluon`` version in subsequent tutorials. # # ## Learning by doing # # Many textbooks teach a series of topics, each in exhaustive detail. For example, <NAME>'s excellent textbook, [Pattern Recognition and Machine Learning](https://www.amazon.com/Pattern-Recognition-Learning-Information-Statistics/dp/0387310738), teaches each topic so thoroughly, that getting to the chapter on linear regression requires a non-trivial amount of work. When I (Zack) was first learning machine learning, this actually limited the book's usefulness as an introductory text. When I rediscovered it a couple years later, I loved it precisely for its thoroughness, and I hope you check it out after working through this material! But perhaps the traditional textbook aproach is not the easiest way to get started in the first place. # # Instead, in this book, we'll teach most concepts just in time. For # the fundamental preliminaries like linear algebra and probability, # we'll provide a brief crash course from the outset, # but we want you to taste the satisfaction of training your first model # before worrying about exotic probability distributions. # # ## Next steps # # If you're ready to get started, head over to [the introduction](../chapter01_crashcourse/introduction.ipynb) or go straight to [our basic primer on NDArray](./ndarray.ipynb), MXNet's workhorse data structure. # # For whinges or inquiries, [open an issue on GitHub.](https://github.com/zackchase/mxnet-the-straight-dope)
chapter01_crashcourse/preface.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/santafe1006/100knocks-preprocess/blob/master/zero2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="WM3pl7-0pMZU" import sys from common.time_layers import * from common.np import * from common.base_model import BaseModel # + id="h8-yd4msrPf_" class BetterRnnlm(BaseModel): def __init__(self, vocab_size=10000, wordvec_size=650, hidden_size=650, dropout_ratio=0.5): V, D, H = vocab_size, wordvec_size, hidden_size rn = np.random.randn embed_W = (rn(V, D) / 100).astype('f') lstm_Wx1 = (rn(D, 4*H) / np.sqrt(D)).astype('f') lstm_Wh1 = (rn(H, 4*H) / np.sqrt(H)).astype('f') lstm_Wx2 = (rn(H, 4*H) / np.sqrt(H)).astype('f') lstm_Wh2 = (rn(H, 4*H) / np.sqrt(H)).astype('f') lstm_b1 = np.zeros(4*H).astype('f') lstm_b2 = np.zeros(4*H).astype('f') affine_b = np.zeros(V).astype('f') self.layers = [ TimeEmbedding(embed_W), TimeDropout(dropout_ratio), TimeLSTM(lstm_Wx1, lstm_Wh1, lstm_b1, stateful=True), TimeDropout(dropout_ratio), TimeLSTM(lstm_Wx2, lstm_Wh2, lstm_b2, stateful=True), TimeDropout(dropout_ratio), TimeAffine(embed_W.T, affine_b) ] self.loss_layer = TimeSoftmaxWithLoss() self.lstm_layers = [self.layers[2], self.layers[4]] self.drop_layers = [self.layers[1], self.layers[3], self.layers[3]] self.params, self.grads = [], [] for layer in self.layers: self.params += layer.params self.grads += layer.grads def predict(self, xs, trani_flg=False): for layer in self.drop_layers: layer.train_flg = traing_flg for layer in self.layers: xs = layer.forward(xs) return xs def forward(self, xs, ts, train_flg=True): score = self.predict(xs, train_flg) loss = self.loss_layer.forward(score,ts) return loss def backward(self, dout=1): dout = self.loss_layer.backward(dout) for layer in reversed(self.layers): dout = layer.backward(dout) return dout def reset_state(self): for layer in self.lstm_layers: layer.reset_state() # + id="x0UBKmgMvKWb" from common import config config.GPU = True from common.optimizer import SGD from common.trainer import RnnlmTrainer from common.util import eval_perplexity from dataset import ptb # from better_rnnlm import BetterRnnlm # set hyper parameter batch_size = 20 wordvec_size = 650 hidden_size = 650 time_size = 35 lr = 20.0 max_epoch = 40 max_grad = 0.25 dropout = 0.5 # load learned data corpus, word_to_id, id_to_word = ptb.load_data('train') corpus_val, _, _ = ptb.load_data('val') corpus_test, _, _ = ptb.load_data('test') vocab_size = len(word_to_id) xs = corpus[:-1] ts = corpus[1:] model = BetterRnnlm(vocab_size, wordvec_size, hidden_size, dropout) optimizer = SGD(lr) trainer = RnnlmTrainer(model, optimizer) best_ppl = float('inf') for epoch in range(max_epoch): trainer.fit(xs, ts, max_epoch=1, batch_size=batch_size, time_size=time_size, max_grad=max_grad) model.reset_state() ppl = eval_perplexity(model, corpus_val) print('valid perplexity: ', ppl) if best_ppl > ppl: best_ppl = ppl model.save_params() else: lr /= 4.0 optimizer.lr = lr model.reset_state() print('-' * 50) # + id="IrWikhq0zdbg"
zero2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.5 64-bit # language: python # name: python37564bita61036862bae45bcafb550af5af65e79 # --- # #### Written by <a href="https://chandanshastri.github.io"><NAME>.</a> # # #### Example Program for handling User Input Errors. # # This program reads two integers and prints the sum. def add_integers(a,b): """ This function takes two integers as parameters and returns the sum.""" return a + b # + def main(): print("\nEnter two integers to get the sum :\n") # Exception handling using try-except blocks if the user enters anything other than integers. try : first_number = int(input("Enter first number : ")) second_number = int(input("Enter second number : ")) except : print("\nPlease Enter Integers..!!") answer = add_integers( first_number, second_number) print("\n\nThe Sum is : ", answer) # Calling the main function if __name__ == "__main__" : main() # -
sum_of_integers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ferdouszislam/Weather-WaterLevel-Prediction-ML/blob/main/Notebooks/brri-dataset/experimentations/classification/selected_algorithms/decisionTree_classification_as_reference.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="I95S5GeUMsc-" # # the imports in this cell are required when running on local device # import os, sys # sys.path.append(os.path.join('..', '..')) # from utils.applyML_util import train_classification, eval_classification # from utils.featureSelection_util import (pearson_correlation_fs, # seleckKBest_fs, selectSequential_fs) # + id="oxumaP1_Oxv4" # the imports in this cell are required when running from Cloud (Colab/Kaggle) # before running on cloud you nee to upload the .py files # from 'Notebooks/utils' directory from applyML_util import train_classification, eval_classification, showEvalutationGraph_classification from featureSelection_util import (pearson_correlation_fs, seleckKBest_fs, selectSequential_fs) # + [markdown] id="UJ4-Bu_iQt6r" # **Decision Tree Documentation link:** https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html # + id="1tVvVKjkQsmG" import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeClassifier # + id="K0b41I0RRkkE" # global random seed RAND_SEED = 42 # initial model with only random seed and not any hyper-parametes initial_model = DecisionTreeClassifier(random_state=RAND_SEED) # hyper-parameters max_depths = [x for x in range(1, 41)] criterions = ['gini', 'entropy'] splitters = ['best', 'random'] max_features = [None, 'sqrt'] class_weights = [None, 'balanced'] # dictonary of all hyperparameters param_grid = {'max_depth': max_depths, 'criterion': criterions, 'splitter': splitters, 'max_features': max_features, 'class_weight': class_weights} # variables needed for showEvalGraph_regression() function MODEL_CLASS = DecisionTreeClassifier class_label = 'Rainfall' x_axis_param_name = 'max_depth' x_axis_param_vals = max_depths # + [markdown] id="sMvRYb6AQLj9" # ## 1. Experimentation on the Weather Daily Dataset # + id="bNmGnNV1QPE8" # Load the train dataset weather_daily_train_df = pd.read_csv('https://raw.githubusercontent.com/ferdouszislam/Weather-WaterLevel-Prediction-ML/main/Datasets/brri-datasets/final-dataset/train/brri-weather_train_classification.csv') # Load the test set weather_daily_test_df = pd.read_csv('https://raw.githubusercontent.com/ferdouszislam/Weather-WaterLevel-Prediction-ML/main/Datasets/brri-datasets/final-dataset/test/brri-weather_test_classification.csv') # + [markdown] id="z0BMd6EBK6PQ" # ### 1.0 No technique # + colab={"base_uri": "https://localhost:8080/"} id="N8lrGQEjMmhO" outputId="abae1938-bc42-4d5f-aeb9-8cd79f51510c" # train model model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_train_df, cls=class_label) print(f'Selected hyperparameters: {selected_hyperparams}') # performance on the train set print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}') # + colab={"base_uri": "https://localhost:8080/", "height": 567} id="_D5Pz_IM2dUj" outputId="1256b018-8272-4f09-dddb-870f1afda062" # graph on train set performance # hyper-parameters selected by GridSearchCV selected_model_params = selected_hyperparams selected_model_params['random_state'] = RAND_SEED showEvalutationGraph_classification(MODEL_CLASS, weather_daily_train_df, cls=class_label, x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals, selected_model_params=selected_model_params) # + colab={"base_uri": "https://localhost:8080/"} id="fj1I73i2WBYF" outputId="79543ce6-c600-4ce8-b9bd-827e6b5d3ff2" # test model test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_test_df, cls=class_label) # performance on the test set print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}') # + [markdown] id="VcGsKgTkDS60" # ### 1.1 Apply Pearson Feature Selection to Daily Weather Dataset # + colab={"base_uri": "https://localhost:8080/"} id="xUGFfg6FDSB3" outputId="f188c3d8-7f22-4eca-8944-1c3166868183" # select features from the train dataset weather_daily_fs1_train_df, cols_to_drop = pearson_correlation_fs(weather_daily_train_df, class_label) # keep only selected features on the test dataset weather_daily_fs1_test_df = weather_daily_test_df.drop(columns=cols_to_drop) # + colab={"base_uri": "https://localhost:8080/"} id="Z4Aj4bDCEBFE" outputId="64b40b5f-ac5d-43e3-fb86-1a2d99d36cf0" # train model model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_fs1_train_df, cls=class_label) print(f'Selected hyperparameters: {selected_hyperparams}') # performance on the train set print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}') # + colab={"base_uri": "https://localhost:8080/", "height": 567} id="Wnlx9lRX6cOT" outputId="3eb8ac7c-2843-4bb4-dd46-237a21b1ca2f" # graph on train set performance # hyper-parameters selected by GridSearchCV selected_model_params = selected_hyperparams selected_model_params['random_state'] = RAND_SEED showEvalutationGraph_classification(MODEL_CLASS, weather_daily_fs1_train_df, cls=class_label, x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals, selected_model_params=selected_model_params) # + colab={"base_uri": "https://localhost:8080/"} id="Q56t8VALEOLV" outputId="4b2bf481-beb5-4a0c-c8f0-23dc080f7fca" # test model test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_fs1_test_df, cls=class_label) # performance on the test set print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}') # + [markdown] id="r0f0shnaTaEd" # ### 1.2 Apply SelectKBest Feature Selection to Daily Weather Dataset # + id="aP0zT8cDTaEe" colab={"base_uri": "https://localhost:8080/"} outputId="d5d7ae71-24a1-4ad8-cb42-2cad0040606c" # select features from the train dataset weather_daily_fs2_train_df, cols_to_drop = seleckKBest_fs(weather_daily_train_df, class_label, is_regression=True) print('features dropped:', cols_to_drop) # keep only selected features on the test dataset weather_daily_fs2_test_df = weather_daily_test_df.drop(columns=cols_to_drop) # + colab={"base_uri": "https://localhost:8080/"} id="obBlfL4DTaEg" outputId="17c5a179-c256-4049-c81e-737d38633335" # train model model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_fs2_train_df, cls=class_label) print(f'Selected hyperparameters: {selected_hyperparams}') # performance on the train set print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}') # + id="FmIkmj59TaEh" colab={"base_uri": "https://localhost:8080/", "height": 567} outputId="2bdca26b-f7eb-41b1-f804-4445ec716e35" # r2-scores graph on the train set # hyper-parameters selected by GridSearchCV selected_model_params = selected_hyperparams selected_model_params['random_state'] = RAND_SEED showEvalutationGraph_classification(MODEL_CLASS, weather_daily_fs2_train_df, cls=class_label, x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals, selected_model_params=selected_model_params) # + id="DFeHpfLrTaEh" colab={"base_uri": "https://localhost:8080/"} outputId="75ae4b84-c6b1-496d-d237-50950e9015a8" # test model test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_fs2_test_df, cls=class_label) # performance on the test set print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}') # + [markdown] id="uPWtfgzLLYJI" # ### 1.3 SMOTE on Daily Dataset # + colab={"base_uri": "https://localhost:8080/"} id="9Q9UhID9LYJI" outputId="461494b0-c944-4b54-ba6a-5f97ebd117c1" # train model model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_train_df, cls=class_label, sampling_technique='smote') print(f'Selected hyperparameters: {selected_hyperparams}') # performance on the train set print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}') # + colab={"base_uri": "https://localhost:8080/", "height": 567} id="sdm9243wLYJJ" outputId="3c3e3173-738e-4549-919a-c6e5634ca118" # r2-scores graph on the train set # hyper-parameters selected by GridSearchCV selected_model_params = selected_hyperparams selected_model_params['random_state'] = RAND_SEED showEvalutationGraph_classification(MODEL_CLASS, weather_daily_train_df, cls=class_label, x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals, selected_model_params=selected_model_params) # + colab={"base_uri": "https://localhost:8080/"} id="y8RoOykDLYJK" outputId="2ed6cad3-2af8-4ffd-a09c-f88fd9f014eb" # test model test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_test_df, cls=class_label) # performance on the test set print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}') # + [markdown] id="tCwFqnH6Lq17" # ### 1.4 Random Undersampling + SMOTE on Daily Dataset # + colab={"base_uri": "https://localhost:8080/"} id="lTdi5bbLLq17" outputId="467de6f2-8ad6-45c2-dbf6-9fdae7d9a2d7" # train model model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_train_df, cls=class_label, sampling_technique='hybrid') print(f'Selected hyperparameters: {selected_hyperparams}') # performance on the train set print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}') # + colab={"base_uri": "https://localhost:8080/", "height": 567} id="76m-rcuvLq18" outputId="58bc1a13-c493-4240-dab9-d3d85fc97646" # graph on train set performance # hyper-parameters selected by GridSearchCV selected_model_params = selected_hyperparams selected_model_params['random_state'] = RAND_SEED showEvalutationGraph_classification(MODEL_CLASS, weather_daily_train_df, cls=class_label, x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals, selected_model_params=selected_model_params) # + colab={"base_uri": "https://localhost:8080/"} id="Z5o9BQOHLq18" outputId="cee88453-b8ac-47f2-d7a9-ea3d746c513a" # test model test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_test_df, cls=class_label) # performance on the test set print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}') # + [markdown] id="XVMCfgbKahtE" # ### 1.5 Pearson Feature Selection + Hybrid Sampling to Daily Weather Dataset # + colab={"base_uri": "https://localhost:8080/"} id="ck48Q9VQahtN" outputId="57a04a7b-1eea-42f7-ed71-345fa12dc104" # train model model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_fs1_train_df, cls=class_label, sampling_technique='hybrid') print(f'Selected hyperparameters: {selected_hyperparams}') # performance on the train set print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}') # + colab={"base_uri": "https://localhost:8080/", "height": 567} id="ioTeYyewahtN" outputId="3233b440-1da0-4004-eef0-eba2ea2f89f1" # graph on train set performance # hyper-parameters selected by GridSearchCV selected_model_params = selected_hyperparams selected_model_params['random_state'] = RAND_SEED showEvalutationGraph_classification(MODEL_CLASS, weather_daily_fs1_train_df, cls=class_label, x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals, selected_model_params=selected_model_params) # + colab={"base_uri": "https://localhost:8080/"} id="yKfAZyE_ahtN" outputId="927205fe-d903-4961-a31d-ff28f3c42ee7" # test model test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_fs1_test_df, cls=class_label) # performance on the test set print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}') # + [markdown] id="FSPsOKznhZlT" # ### 1.6 SelecKBest Feature Selection + Hybrid Sampling to Daily Weather Dataset # + colab={"base_uri": "https://localhost:8080/"} id="i-ZIHmixhZlV" outputId="d78cd25c-c895-422d-924b-178de5e497c3" # train model model, selected_hyperparams, train_accuracy, train_f1 = train_classification(initial_model, param_grid, weather_daily_fs2_train_df, cls=class_label, sampling_technique='hybrid') print(f'Selected hyperparameters: {selected_hyperparams}') # performance on the train set print(f'Train set performance: accuracy={train_accuracy}, macro-f1={train_f1}') # + colab={"base_uri": "https://localhost:8080/", "height": 567} id="d0CpcJB_hZlV" outputId="d7d812bf-b410-44c4-acd9-b2d47a6fad72" # graph on train set performance # hyper-parameters selected by GridSearchCV selected_model_params = selected_hyperparams selected_model_params['random_state'] = RAND_SEED showEvalutationGraph_classification(MODEL_CLASS, weather_daily_fs2_train_df, cls=class_label, x_axis_param_name=x_axis_param_name, x_axis_param_vals=x_axis_param_vals, selected_model_params=selected_model_params) # + colab={"base_uri": "https://localhost:8080/"} id="sBlMerD3hZlW" outputId="420b0357-47fa-433e-bb00-c44835dfeae3" # test model test_accuracy, test_f1, test_auc = eval_classification(model, weather_daily_fs2_test_df, cls=class_label) # performance on the test set print(f'Test set performance: accuracy={test_accuracy}, macro-f1={test_f1}, auc={test_auc}')
Notebooks/brri-dataset/experimentations/classification/selected_algorithms/decisionTree_classification_as_reference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ChihabEddine98/DL_course/blob/main/lec1_reuters.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + pycharm={"is_executing": true} id="XxcgHRMtb2BR" # Imports import tensorflow as tf import numpy as np from tensorflow import keras from tensorflow.keras.datasets import reuters from sklearn.model_selection import train_test_split # + pycharm={"name": "#%%\n"} colab={"base_uri": "https://localhost:8080/"} id="COccZkQkb2BT" outputId="9380cdb4-34ea-422a-dad9-fa3b08d4415e" # data load (train_data, train_labels), (test_data, test_labels) = reuters.load_data(num_words=10000) # + pycharm={"name": "#%%\n"} id="RHD98grAb2BT" def vectorize_sequences(sequences, dimension=10000): results = np.zeros((len(sequences), dimension)) for i in range (len (sequences)): for j in range (len (sequences [i])): results [i] [sequences [i] [j]] = 1. return results # + pycharm={"name": "#%%\n"} id="jaYsFkmAb2BU" from tensorflow.keras.utils import to_categorical # Data Prepro x_train = vectorize_sequences(train_data) x_test = vectorize_sequences(test_data) y_train = to_categorical(train_labels) y_test = to_categorical(test_labels) # + pycharm={"name": "#%%\n"} colab={"base_uri": "https://localhost:8080/"} id="vFmhZKdlb2BU" outputId="a44ba289-23db-4b7b-d384-9bc3a3defaba" # Validation set Split print(x_train.shape) x_train,x_val ,y_train, y_val = train_test_split(x_train,y_train,test_size = 0.3) print(x_train.shape) # + id="XA4AKA__b6ad" # Model Settings model = keras.Sequential(keras.layers.Dense(64,activation = 'relu' , input_shape = (x_train.shape[1],))) model.add(keras.layers.Dense(64,activation = 'relu')) model.add(keras.layers.Dense(46,activation = 'softmax')) model.compile(loss = 'categorical_crossentropy' , optimizer = 'rmsprop' , metrics=['acc']) # + id="StzaNQOhc2E6" # Train the model def train(model): with tf.device('/device:GPU:0'): history = model.fit(x_train,y_train, epochs = 20, batch_size = 512, validation_data = (x_val,y_val) ) return history # + colab={"base_uri": "https://localhost:8080/"} id="xLZnoqUDc_RO" outputId="376e0936-4b06-42c9-fac5-6e435f3cce97" # Train the model history_1 = train(model) # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="6Tqh1ziFdBiE" outputId="5949d4e6-a670-432a-c65b-a057cebf6f92" import matplotlib.pyplot as plt history_dict = history_1.history loss_values = history_dict['loss'] val_loss_values = history_dict['val_loss'] epochs = range(1, len(loss_values) + 1) plt.plot(epochs, loss_values, 'r', label='Training loss') plt.plot(epochs, val_loss_values, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() # + id="XXXIVLwweboG"
lec1_MLP/lec1_reuters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 1. Setup from unet import UNet # %matplotlib inline # %load_ext autoreload # %autoreload 2 # ## 2. U-Net # http://www.robots.ox.ac.uk/~vgg/publications/2016/Xie16/xie16.pdf # https://arxiv.org/abs/1505.04597 model = UNet() model.summary()
neural_networks/unet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import matplotlib matplotlib.style.use('ggplot') # - data = pd.read_csv('Final_HIV_without_Nan.csv') data.head() print("Types and counts of publication", data.groupby(['Publication']).size()) data.groupby(['Publication']).size().plot(kind='bar') vietnamnews = data[data["Publication"] == "vietnamnews.vn"] # + import nltk # Fill any blank fields vietnamnews.Headlines.fillna("", inplace=True) vietnamnews.Articles.fillna("", inplace=True) # Join the Headlines and Articles all_Articles = vietnamnews.Headlines.str.cat(vietnamnews.Articles, sep=' ') # Tokenize. The NLTK tokenizer isn't awesome. Spacy has a nice one, but I don't think it is installed words = nltk.word_tokenize(" ".join(all_Articles.tolist())) # - vietnamnews.head() # + from nltk.corpus import stopwords import string # clearly more cleaning is needed here, but really I should get a better tokenizer stop = stopwords.words('english') cleanwords = [i for i in words if i not in stop and i.isalpha() and len(i) > 2] # + from wordcloud import WordCloud, STOPWORDS wordcloud2 = WordCloud( stopwords=STOPWORDS, background_color='white', width=1200, height=1000 ).generate(" ".join(cleanwords)) plt.imshow(wordcloud2) plt.axis('off') plt.show() # + # Bigrams should be more interesting bigrams = nltk.bigrams(cleanwords) # + # look at the most common. from collections import Counter counter = Counter(bigrams) print(counter.most_common(10)) # + num_to_show = 30 labels = [" ".join(e[0]) for e in counter.most_common(num_to_show)] values = [e[1] for e in counter.most_common(num_to_show)] indexes = np.arange(len(labels)) width = 1 plt.bar(indexes, values, width) plt.xticks(indexes + width * 0.5, labels, rotation=90) #plt.barh(indexes, values, width) #plt.yticks(indexes + width * 0.2, labels) plt.show() # -
Analysis/HIV Visuals & World Cloud.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Helper Functions, will eventually live in pascal module # + import numpy as np import itertools import random #### name parsing helper functions def components_to_name(components, delimiter = '_'): composition_label = '' for c, n in components.items(): if n > 0: composition_label += '{0}{1:.2f}{2}'.format(c, n, delimiter) return composition_label[:-1] def name_to_components(name, factor = 1, delimiter = '_',): ''' given a chemical formula, returns dictionary with individual components/amounts expected name format = 'MA0.5_FA0.5_Pb1_I2_Br1'. would return dictionary with keys ['MA, FA', 'Pb', 'I', 'Br'] and values [0.5,.05,1,2,1]*factor ''' components = {} for part in name.split(delimiter): species = part count = 1.0 for l in range(len(part), 0, -1): try: count = float(part[-l:]) species = part[:-l] break except: pass components[species] = count * factor return components #### individual solution functions def get_components(name, factor = 1, delimiter = '_',): components = {} for part in name.split(delimiter): species = part count = 1.0 for l in range(len(part), 0, -1): try: count = float(part[-l:]) species = part[:-l] break except: pass components[species] = count * factor return components def calculate_mix(target, volume, solution_sources): ''' given a target composition, target volume, and stock solution sources, calculates the volumes needed from individual stocks to achieve target composition target: target composition. Will be passed to name_to_components() Example: 'MA0.5_FA0.5_Pb1_I2_Br1' volume: target volume, in L solution_sources: dictionary with stock solution compositions, molarities, solvents, well positions, and available volumes labeled. Example: solution_sources = { 'MA_Pb_I3': dict(well = 'A1', molarity = 2, solvent = dict(DMSO = 9, DMF = 1), volume = 15e-3), 'Cs_I': dict(well = 'A2', molarity = 3, solvent = dict(DMSO = 1), volume = 15e-3), 'FA_Pb_I3': dict(well = 'A3', molarity = 1.5, solvent = dict(DMSO = 9, DMF = 1), volume = 15e-3) } ''' target_composition = name_to_components(target) wells = [solution_properties['well'] for solution_name, solution_properties in solution_sources.items()] num_solutions = len(solution_sources) components = list(target_composition.keys()) num_components = len(components) solution_matrix = np.zeros((num_components, num_solutions)) for n, (solution_name, solution_properties) in enumerate(solution_sources.items()): solution_components = get_components(solution_name, factor = solution_properties['molarity']) for m, component_name in enumerate(components): if component_name in solution_components: solution_matrix[m,n] = solution_components[component_name] target_matrix = np.zeros((num_components, )) for m, (component_name, component_amount) in enumerate(target_composition.items()): target_matrix[m] = component_amount amount_matrix = np.linalg.lstsq(solution_matrix, target_matrix, rcond = None)[0] amount_matrix[amount_matrix < 1e-6] = 0 #clean up values that are essentially 0. If we have a significant negative value here, should get caught downstream doublecheck = solution_matrix @ amount_matrix if np.linalg.norm((doublecheck - target_matrix))/np.linalg.norm(target_matrix) < 0.01: #check that we are within 1% error wrt target composition results = {} # for solution, solution_volume in zip(solutions, amount_matrix): # results[solution] = solution_volume * volume for well, solution_volume in zip(wells, amount_matrix): results[well] = np.round(solution_volume * volume, 6) # round to nearest uL else: results = False print('Error: Unable to generate target solution with current stock solutions.') # raise Exception('Unable to generate target solution with current stock solutions.') return results #### combining functions to generate experiment mesh def compositions_spread(compositions, n): composition_components = [name_to_components(s) for s in compositions] components = [] for s in composition_components: components += list(s.keys()) components = np.unique(components) mat = np.zeros((len(compositions), len(components))) for sidx, s in enumerate(composition_components): for cidx, c in enumerate(components): if c in s: mat[sidx, cidx] = s[c] compositions = [] for mix in itertools.combinations_with_replacement(mat, n): composition_amounts = np.array(mix).mean(axis = 0) composition_label = '' for c, a in zip(components, composition_amounts): if a > 0: composition_label += '{0}{1:.3f}_'.format(c, a) compositions.append(composition_label[:-1]) #exclude the last underscore return list(np.unique(compositions)) def spincoat_spread(spincoats, method, n = None): mat = np.array(spincoats) if method == 'interpolate': if n is None: raise Exception('If method = "interpolate", n must be specified.') spincoats = [] for mix in itertools.combinations_with_replacement(mat, n): spincoats.append(np.array(mix).mean(axis = 0)) elif method == 'permute': unique_steps = [] for i in range(mat[0].shape[0]): steps = np.array([c[i] for c in mat]) unique_steps.append(np.unique(steps, axis = 0)) spincoats = list(np.vstack(rows) for rows in itertools.product(*unique_steps)) else: raise Exception('Invalid method provided - must be "interpolate" or "permute"') return np.unique(spincoats, axis = 0) def anneal_spread(anneals, method, n = None): mat = np.array(anneals) if method == 'interpolate': if n is None: raise Exception('If method = "interpolate", n must be specified.') anneals = [] for mix in itertools.combinations_with_replacement(mat, n): anneals.append(np.array(mix).mean(axis = 0)) elif method == 'permute': anneals = np.array(list(itertools.product(*[c for c in mat.T]))) else: raise Exception('Invalid method provided - must be "interpolate" or "permute"') return np.unique(anneals, axis = 0) # + def well_list_generator(nrows = 12, ncols = 8): num = 0 col = -1 while num < nrows*ncols: row = num%nrows if row == 0: col += 1 yield f'{str.upper(chr(col+97))}{row+1}' num += 1 def generate_sample_list(compositions, spincoats, anneals, sample_volume = 100e-6, max_volume = 1e-3, randomize = True, repeats = 1): samples = [] wells = {} current_wells = {} well_generator = well_list_generator() def start_new_well(composition, volume): this_well = well_generator.__next__() current_wells[c] = this_well wells[this_well] = {'composition': composition, 'volume': volume} return this_well for c, sc, an in itertools.product(compositions, spincoats, anneals): if c not in current_wells: #we havent dedicated a well to mix this composition yet - lets set one this_well = start_new_well(c, sample_volume) else: this_well = current_wells[c] if wells[this_well]['volume'] + sample_volume > max_volume: this_well = start_new_well(c, sample_volume) else: wells[this_well]['volume'] += sample_volume for r in range(repeats): samples.append(dict(composition = c, spincoat = sc, anneal = an, well = this_well)) if randomize: random.shuffle(samples) return samples, wells # - # # Define Experimental Mesh # ### Target Compositions # + endpoint_compositions = [ 'Cs_Pb_I3', 'FA_Pb_I3', 'MA_Pb_I3' ] compositions = compositions_spread(endpoint_compositions, 2) print(f'==={len(compositions)} compositions===') for c in compositions: print(c) # - # ### Target Spincoating Conditions # + endpoint_spincoats = [] endpoint_spincoats.append([ [2000, 15, 0], #rpm, duration (s), ramp rate (rpm/s, 0 = max rate) [4000, 20, 0], [5000, 30, 0], [1000, 50, 0] ]) endpoint_spincoats.append([ [2000, 15, 0], [4000, 40, 0], [5000, 30, 0], [2000, 50, 0] ]) #can keep appending arbitrary number of endpoints # - spincoats = spincoat_spread(endpoint_spincoats, method = 'interpolate', n = 2) print(f'==={len(spincoats)} spincoats===') for c in spincoats: print(f'{c}\n') # ### Target Annealing Conditions endpoint_anneals = [ [80, 40*60], #temperature (C), duration (s) [100, 20*60], ] anneals = anneal_spread(endpoint_anneals, method = 'permute') print(f'=== {len(anneals)} anneals ===') for c in anneals: print(f'{c}') samples, mixing_wells = generate_sample_list(compositions, spincoats, anneals, max_volume = 1000e-6) print(f'{len(samples)} Samples') # ## Stock Solution Setup stock_solutions = { 'MA_Pb_I3': dict(well = 'A1', molarity = 2, solvent = dict(DMSO = 9, DMF = 1), volume = 4e-3), 'Cs_Pb_I3': dict(well = 'A2', molarity = 3, solvent = dict(DMSO = 1), volume = 4e-3), 'FA_Pb_I3': dict(well = 'A3', molarity = 2, solvent = dict(DMSO = 9, DMF = 1), volume = 4e-3) } stock_wells = {v['well']:comp for comp,v in stock_solutions.items()} # + min_volume_to_aspirate = 4e-4 #volume below which the liquid level is too low to properly aspirate. This sets a volume floor necessary_stock = {v['well']:min_volume_to_aspirate for v in stock_solutions.values()} for i, s in enumerate(samples): this_mix = calculate_mix(s['composition'], 150e-6, stock_solutions) for well, amount in this_mix.items(): necessary_stock[well] += amount samples[i]['stock_mixture'] = this_mix print('=== Stock Solution Volume Check === ') for well, amt_needed in necessary_stock.items(): amt_in_well = stock_solutions[stock_wells[well]]['volume'] amt_ratio = amt_in_well/amt_needed if amt_ratio > 2: status = 'Not taking any chances, are you?' elif amt_ratio > 1.1: status = 'OK' elif amt_ratio > 1: status = 'Cutting it close...' else: status = '!!!!! NOT ENOUGH STOCK !!!!!' print(f'{well} ({stock_wells[well]})\t{amt_in_well*1000:.2f}/{amt_needed*1000:.2f} mL\t{status}') # - # ### Stock solution per well, assuming distributing from stock -> 96 well plate -> spincoating transfers = {well:dict(destination_wells = [], transfer_volumes = []) for well in stock_wells} for destination_well, destination_well_contents in mixing_wells.items(): need = calculate_mix(destination_well_contents['composition'], destination_well_contents['volume'], stock_solutions) for stock_well, stock_volume_to_transfer in need.items(): if stock_volume_to_transfer > 0: transfers[stock_well]['destination_wells'].append(destination_well) transfers[stock_well]['transfer_volumes'].append(stock_volume_to_transfer) # # Initializing Hardware # + from opentrons import protocol_api import opentrons.execute # metadata() metadata = { 'protocolName': 'My Protocol', 'author': 'Name <<EMAIL>>', 'description': 'Simple protocol to get started using OT2', 'apiLevel': '2.6' } # protocol run function. the part after the colon lets your editor know # where to look for autocomplete suggestions # def run(protocol: protocol_api.ProtocolContext): # # labware # plate = protocol.load_labware('corning_96_wellplate_360ul_flat', '2') # tiprack = protocol.load_labware('opentrons_96_tiprack_300ul', '1') # # pipettes # left_pipette = protocol.load_instrument( # 'p300_single', 'left', tip_racks=[tiprack]) # # commands # left_pipette.pick_up_tip() # left_pipette.aspirate(100, plate['A1']) # left_pipette.dispense(100, plate['B2']) # left_pipette.drop_tip() # - # Initialize robot/protocol control + lab hardware protocol = opentrons.execute.get_protocol_api('2.6') protocol.home() # + #labware stock_wells = protocol.load_labware('FRG_4ml_v0', '1') #Labware identifier, deck position mixing_wells = protocol.load_labware('96wellplate', '2') tiprack = protocol.load_labware('opentrons_96_tiprack_300ul', '3') #pipettes left_pipette = protocol.load_instrument('p300_single', 'left', tip_racks = [tiprack]) # - # # Automation Begin # Distribute stock solutions to well plate for source_well, transfer_info in transfers.items(): pipette.transfer( transfer_info['transfer_volumes'], stock_wells[source_well], [mixing_wells.wells_by_name()[well_name] for well_name in ['B1', 'B2', 'B3']]) # Run the experiment!
experimentaldesign/Example_Experiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # name: python2 # --- # ## Figure 18 # # Similar to [Figure 16](https://github.com/EdwardJKim/astroclass/blob/master/paper/notebooks/figure16/perform_mag_cut.ipynb) # but using $g-r$ color cuts. # %matplotlib inline from __future__ import division, print_function, unicode_literals import numpy as np import matplotlib.pyplot as plt import seaborn as sns plt.rc('legend', fontsize=10) truth_train = np.loadtxt('../../data/truth_train.dat') truth_test = np.loadtxt('../../data/truth_test.dat') # load base classifiers clr_cut_str = ['0_' + str(i) for i in range(1, 10)] + ['1_' + str(i) for i in range(3)] tpc = [np.loadtxt('../../data/vvds_99_tpc_test.mlz', unpack=True, usecols=(2,))] + \ [np.loadtxt('../../data/vvds_g_r_{0}_tpc_test.mlz'.format(i), unpack=True, usecols=(2,)) for i in clr_cut_str] som = [np.loadtxt('../../data/vvds_99_som_test.mlz', unpack=True, usecols=(2,))] + \ [np.loadtxt('../../data/vvds_g_r_{0}_som_test.mlz'.format(i), unpack=True, usecols=(2,)) for i in clr_cut_str] hbc = [np.loadtxt('../../data/vvds_99_median.hbc',unpack=True, usecols=(0,))] + \ [np.loadtxt('../../data/vvds_g_r_{0}_median.hbc'.format(i), unpack=True, usecols=(0,)) for i in clr_cut_str] hbc = [i[-len(truth_test):] for i in hbc] bmc = [np.loadtxt('../../data/vvds_99.bmc', unpack=True, usecols=(0,))] + \ [np.loadtxt('../../data/vvds_g_r_{0}.bmc'.format(i), unpack=True, usecols=(0,)) for i in clr_cut_str] # + def calc_completeness_purity(truth, classif, mag, p_cut=0.5, bins=np.arange(16, 26, 0.5)): ''' ''' # true galaxies classified as stars gs_bin, _ = np.histogram(mag[(classif > p_cut) & (truth == 0)], bins=bins) # true galaxies classified as galaxies gg_bin, _ = np.histogram(mag[(classif < p_cut) & (truth == 0)], bins=bins) # true stars classified as galaxies sg_bin, _ = np.histogram(mag[(classif < p_cut) & (truth == 1)], bins=bins) # true stars classified as stars ss_bin, _ = np.histogram(mag[(classif > p_cut) & (truth == 1)], bins=bins) # galaxy completeness g_comp_bin = gg_bin / (gg_bin + gs_bin) g_comp_bin[~np.isfinite(g_comp_bin)] = 1 # galaxy purity g_pur_bin = gg_bin / (gg_bin + sg_bin) g_pur_bin[~np.isfinite(g_pur_bin)] = 1 # star completeness s_comp_bin = ss_bin / (ss_bin + sg_bin) s_comp_bin[~np.isfinite(s_comp_bin)] = 1 # star purity s_pur_bin = ss_bin / (ss_bin + gs_bin) s_pur_bin[~np.isfinite(s_pur_bin)] = 1 return g_comp_bin, g_pur_bin, s_comp_bin, s_pur_bin def find_purity_at(truth_test, clf, step=0.001, gc=None, sc=None): if bool(gc) == bool(sc): raise Exception('Specify only one of gp or sp parameter.') pbin = np.arange(0, 1, step) pure_all = np.zeros(len(pbin)) comp_all = np.zeros(len(pbin)) for i, p in enumerate(pbin): # true galaxies classified as stars gs = ((clf >= p) & (truth_test == 0)).sum() # true galaxies classified as galaxies gg = ((clf < p) & (truth_test == 0)).sum() # true stars classified as galaxies sg = ((clf < p) & (truth_test == 1)).sum() # true stars classified as stars ss = ((clf >= p) & (truth_test == 1)).sum() if gc is not None: if gg == 0 and gg + sg == 0: pure_all[i] = 1 else: pure_all[i] = gg / (gg + sg) if gg == 0 and gg + gs == 0: comp_all[i] = 1 else: comp_all[i] = gg / (gg + gs) if sc is not None: if ss == 0 and ss + sg == 0: comp_all[i] = 1 else: comp_all[i] = ss / (ss + sg) if ss == 0 and ss + gs == 0: pure_all[i] = 1 else: pure_all[i] = ss / (ss + gs) if gc is not None: ibin = np.argmin(np.abs(comp_all - gc)) return pbin[ibin], pure_all[ibin] if sc is not None: ibin = np.argmin(np.abs(comp_all - sc)) return pbin[ibin], pure_all[ibin] # - from sklearn.metrics import roc_auc_score tpc_auc = [roc_auc_score(truth_test, i) for i in tpc] som_auc = [roc_auc_score(truth_test, i) for i in som] hbc_auc = [roc_auc_score(truth_test, i) for i in hbc] bmc_auc = [roc_auc_score(truth_test, i) for i in bmc] from sklearn.metrics import mean_squared_error tpc_mse = [mean_squared_error(truth_test, i) for i in tpc] som_mse = [mean_squared_error(truth_test, i) for i in som] hbc_mse = [mean_squared_error(truth_test, i[-len(truth_test):]) for i in hbc] bmc_mse = [mean_squared_error(truth_test, i) for i in bmc] def find_gal_pur(truth, clf, gc=0.9964, step=0.001): result = [] for k in clf: _, j = find_purity_at(truth, k, gc=gc, step=step) result += [j] return result tpc_gpur = find_gal_pur(truth_test, tpc) bmc_gpur= find_gal_pur(truth_test, bmc) hbc_gpur= find_gal_pur(truth_test, hbc) som_gpur= find_gal_pur(truth_test, som) def find_star_pur(truth, clf, sc=0.7145, step=0.0005): result = [] for k in clf: _, j = find_purity_at(truth, k, sc=sc, step=step) result += [j] return result tpc_spur = find_star_pur(truth_test, tpc) bmc_spur = find_star_pur(truth_test, bmc) hbc_spur = find_star_pur(truth_test, hbc) som_spur = find_star_pur(truth_test, som) # + p = sns.color_palette() sns.set_style("ticks") fig = plt.figure(figsize=(6, 10)) ax1 = plt.subplot2grid((5, 3), (0, 0), colspan=3) ax2 = plt.subplot2grid((5, 3), (1, 0), colspan=3) ax3 = plt.subplot2grid((5, 3), (2, 0), colspan=3) ax4 = plt.subplot2grid((5, 3), (3, 0), colspan=3) ax5 = plt.subplot2grid((5, 3), (4, 0), colspan=3) plt.setp(ax1.get_xticklabels(), visible=False) plt.setp(ax2.get_xticklabels(), visible=False) plt.setp(ax3.get_xticklabels(), visible=False) plt.setp(ax4.get_xticklabels(), visible=False) plt.setp(ax5.get_xticklabels(), rotation=45) ax1.plot([10456, 10169, 9823, 9211, 8559, 7753, 6788, 5893, 5105, 4531, 4105, 3565, 2408], ls='-', marker='o', markersize=4) ax1.set_ylabel('training size') ax1.set_yticks([0, 2000, 4000, 6000, 8000, 10000]) ax1.set_yticklabels([r'$0$', r'$2 \times 10^3$', r'$4 \times 10^3$', r'$6 \times 10^3$', r'$8 \times 10^3$', r'$1 \times 10^4$']) for ticks in ax1.get_yaxis().majorTicks[1:]: ticks.set_pad(0) ax2.plot(bmc_auc, label='BMC', color=p[0], ls='-', c='k', marker='o', markersize=4) ax2.plot(tpc_auc, label='TPC', color=p[1], ls='-', marker='o', markersize=4) ax2.plot(hbc_auc, label='HB', color=p[3], ls='-', marker='o', markersize=4) #ax2.plot([0] * len(tpc_auc), label='Morphology', c='b', marker='o', markersize=4) ax2.set_ylim([0.75, 1.0]) ax2.set_yticks([0.8, 0.9, 1.0]) ax2.legend(loc='lower left') ax2.set_ylabel('AUC') ax3.plot(bmc_mse, label='BMC', color=p[0], ls='-', marker='o', markersize=4) ax3.plot(tpc_mse, label='TPC', color=p[1], ls='-', marker='o', markersize=4) ax3.plot([0.0397] * len(tpc_mse), label='Morphology', color=p[2], ls='-', marker='o', markersize=4) ax3.plot(hbc_mse, label='HB', color=p[3], ls='-', marker='o', markersize=4) ax3.set_ylim(0.02, 0.14) ax3.set_yticks([0.04, 0.08, 0.12]) ax3.set_ylabel('MSE') ax3.legend(loc='upper left', ncol=2) ax4.plot(bmc_gpur, label='BMC', color=p[0], ls='-', marker='o', markersize=4) ax4.plot(tpc_gpur, label='TPC', color=p[1], ls='-', marker='o', markersize=4) ax4.plot([0.9597] * len(tpc_gpur), color=p[2], label='Morphology', marker='o', markersize=4) ax4.plot(hbc_gpur, label='HB', color=p[3], ls='-', marker='o', markersize=4) #ax4.set_ylim(0.92, 0.98) ax4.set_yticks([0.92, 0.94, 0.96, 0.98]) ax4.set_ylabel(r'$p_g\left(c_g=0.9964\right)$', fontsize=12) ax5.plot(bmc_spur, label='BMC', color=p[0], ls='-', marker='o', markersize=4) ax5.plot(tpc_spur, label='TPC', color=p[1], ls='-', marker='o', markersize=4) ax5.plot([0.9666] * len(tpc_spur), label='Morphology', color=p[2], ls='-', marker='o', markersize=4) ax5.plot(hbc_spur, label='HB', color=p[3], ls='-', marker='o', markersize=4) ax5.set_ylim(0.65, 1.05) ax5.set_yticks([0.2, 0.4, 0.6, 0.8, 1.0]) ax5.set_ylabel(r'$p_s\left(c_s=0.7145\right)$', fontsize=12) ax5.set_xticklabels(['', '> 0.2', '> 0.4', '> 0.6', '> 0.8', '> 1.0', '> 1.2']) ax5.set_xlabel(r'$g-r$ color cut') plt.savefig('../../figures/perform_g_r_cut.pdf') plt.show() # -
paper/notebooks/figure18/perform_g_r.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Starbucks_Korea Crawler using Selenium # + # #!pip install selenium # # !pip install bs4 # - from urllib import request from selenium import webdriver import pandas as pd import numpy as np import bs4 import requests import re from time import sleep print ("BeautifulSoup version:%6.6s" % bs4.__version__) # ## chromedriver + selenium 을 사용한 크롤링 # 다운받은 chromedriver의 path설정과 크롤링주소 설정 path=r"C:\Users\Kavin\Downloads\chromedriver_win32\chromedriver" driver=webdriver.Chrome(path) driver.get("https://www.istarbucks.co.kr/store/store_map.do") sleep(5) # + # 스타벅스 홈페이지 내에서 전체 시 이름을 리스트에 담아줍니다 location_search = driver.find_element_by_class_name('loca_search') location_search.click() sleep(5) city_name_list = [el.text for el in driver.find_elements_by_xpath("//*[@id='container']/div/form/fieldset/div/section/article[1]/article/article[2]/div[1]/div[2]/ul/li")] city_name_list # - # ### #1 세종시 제외한 16개 시/도 크롤링 # while문을 돌면서 각 지역별 스타벅스 지점명과 주소 크롤링 i = 0 while i < len(city_name_list)-1: # 지역선택 클릭하기 location_search = driver.find_element_by_class_name('loca_search') location_search.click() sleep(5) # 시 선택 city= driver.find_element_by_class_name('sido_arae_box') city_li = city.find_elements_by_tag_name('li') city_li[i].click() sleep(5) #구군에서 '전체' 선택하도록 하기 gugun = driver.find_element_by_class_name('gugun_arae_box') gu_li = gugun.find_element_by_tag_name('li') gu_li.click() sleep(5) source = driver.page_source bs = bs4.BeautifulSoup(source,'lxml') entire = bs.find('ul', class_='quickSearchResultBoxSidoGugun') li_list = entire.find_all('li') name_list = [] address_list = [] # DT점명 가져오기 for name in li_list: name_list.append(name.find('strong').text) # DT주소 가져오기 for address in li_list: address_list.append(address.find('p').text) # csv파일 저장하기 - path, filename 설정(지역별로 파일명 상이) path = "C:/dataitgirls/" #저장하고 싶은 경로 앞부분 full_path = path + 'starbucks_' + city_name_list[i] +'.csv' #현재 경로 + 파일명 df = pd.DataFrame({'name':name_list, 'address':address_list}) # 'address' 칼럼에 전화번호 제거 df['address'] = df['address'].str.replace(r'\d{2,3}-\d{3,4}-\d{4}', '') # csv파일로 저장 df.to_csv(full_path, header=True, index=True, encoding='euc-kr') i = i+1 # ### #2 세종시 크롤링 # + # 세종시는 구/군 선택이 없으므로 while문 반복 이후 따로 크롤링 # 지역선택 클릭하기 loca = driver.find_element_by_class_name('loca_search') loca.click() sleep(5) # 시 선택 loca = driver.find_element_by_class_name('sido_arae_box') li = loca.find_elements_by_tag_name('li') li[16].click() sleep(5) source = driver.page_source bs = bs4.BeautifulSoup(source,'lxml') entire = bs.find('ul', class_='quickSearchResultBoxSidoGugun') li_list = entire.find_all('li') name_list = [] address_list = [] # DT점명 가져오기 for name in li_list: name_list.append(name.find('strong').text) # DT주소 가져오기 for address in li_list: address_list.append(address.find('p').text) df_Sejong = pd.DataFrame({'name':name_list, 'address':address_list}) # 'address' 칼럼에 전화번호 제거 df_Sejong['address'] = df_Sejong['address'].str.replace(r'\d{2,3}-\d{3,4}-\d{4}', '') # csv파일로 저장 df_Sejong.to_csv("C:/dataitgirls/starbucks_세종.csv", header=True, index=True, encoding='euc-kr') # - # ### #3 하나의 코드로 합치기 # while문을 돌면서 각 지역별 스타벅스 지점명과 주소 크롤링 - 세종시 포함 i = 0 while i < len(city_name_list): # 지역선택 클릭하기 location_search = driver.find_element_by_class_name('loca_search') location_search.click() sleep(5) # 시 선택 city= driver.find_element_by_class_name('sido_arae_box') city_li = city.find_elements_by_tag_name('li') city_li[i].click() sleep(5) # 세종시가 구/군에 해당하는 정보를 가지고 있는지 없는지를 판별 is_Sejong = [ el.text for el in driver.find_elements_by_xpath("//*[@id='container']/div/form/fieldset/div/section/article[1]/article/article[2]/div[2]")] # 세종시일때 (구/군 선택 X) if len(is_Sejong[0]) == 0: pass # 세종시가 아닌 시일때 (구/군 선택 O) else: #구군에서 '전체' 선택하도록 하기 gugun = driver.find_element_by_class_name('gugun_arae_box') gu_li = gugun.find_element_by_tag_name('li') gu_li.click() sleep(5) source = driver.page_source bs = bs4.BeautifulSoup(source,'lxml') entire = bs.find('ul', class_='quickSearchResultBoxSidoGugun') li_list = entire.find_all('li') name_list = [] address_list = [] # DT점명 가져오기 for name in li_list: name_list.append(name.find('strong').text) # DT주소 가져오기 for address in li_list: address_list.append(address.find('p').text) # csv파일 저장하기 - path, filename 설정(지역별로 파일명 상이) path = "C:/dataitgirls/" #저장하고 싶은 경로 앞부분 full_path = path + 'starbucks_' + city_name_list[i] +'.csv' #현재 경로 + 파일명 df = pd.DataFrame({'name':name_list, 'address':address_list}) # 'address' 칼럼에 전화번호 제거 df['address'] = df['address'].str.replace(r'\d{2,3}-\d{3,4}-\d{4}', '') # csv파일로 저장 df.to_csv(full_path, header=True, index=True, encoding='euc-kr') i = i+1
Starbucks_selenium_crawler.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:wildfires] # language: python # name: conda-env-wildfires-py # --- # ## Setup from specific import * # ### Get shifted data ( endog_data, exog_data, master_mask, filled_datasets, masked_datasets, land_mask, ) = get_offset_data() client = get_client() client # ### Define the training and test data # + @data_split_cache def get_split_data(): X_train, X_test, y_train, y_test = train_test_split( exog_data, endog_data, random_state=1, shuffle=True, test_size=0.3 ) return X_train, X_test, y_train, y_test X_train, X_test, y_train, y_test = get_split_data() # - X_train.shape[0] / 2000 # Nr. of SHAP chunks. X_train.shape[0] / 50 # Nr. of SHAP interaction chunks. # ### Specific model training without grid seach # + n_splits = 5 param_dict = { "random_state": 1, "bootstrap": True, "ccp_alpha": 0.0, "max_depth": 18, "max_features": "auto", "min_samples_leaf": 1, "min_samples_split": 2, "n_estimators": 500, } # - # #### Cached results only cached = CachedResults( estimator_class=DaskRandomForestRegressor, n_splits=n_splits, cache_dir=CACHE_DIR ) results = cached.collate_scores(train_scores=True) model = DaskRandomForestRegressor(**param_dict) model.n_jobs = 32 model_key = tuple(sorted(model.get_params().items())) try: model = cached.get_estimator(model_key) except KeyError: with parallel_backend("dask"): model.fit(X_train, y_train) cached.store_estimator(model_key, model) # #### Place into expected cache location. # + @cross_val_cache def dummy_f(): return {}, model _, model1 = dummy_f() # - # ### Grid search # + import scipy.stats n_splits = 5 # Define the parameter space. # 1024 combinations ([100, 200] est., x 5 splits) takes ~ 20 hrs. parameters_RF = { "n_estimators": [300, 500], "max_depth": [14, 18], "min_samples_split": [2, 5], "min_samples_leaf": [1, 2, 4], "max_features": ["auto"], "ccp_alpha": np.linspace(0, 4e-9, 10), } default_param_dict = { "random_state": 1, "bootstrap": True, } # - # ## Hyperparameter optimisation # #### Calculate results results, rf = fit_dask_sub_est_random_search_cv( DaskRandomForestRegressor(**default_param_dict), X_train.values, y_train.values, parameters_RF, client, n_splits=n_splits, max_time="24h", n_iter=None, verbose=True, return_train_score=True, refit=True, local_n_jobs=30, random_state=0, cache_dir=CACHE_DIR, ) # ## Hyperparameter Search Visualisation # + hyperparams = defaultdict(list) for param_tuples, param_results in results.items(): for category, scores in param_results.items(): if len(scores) == n_splits: hyperparams[category].append(np.mean(scores)) hyperparams[category + "_std"].append(np.std(scores)) else: print(param_tuples, category, len(scores)) break # Do not append anything. else: for param, param_value in param_tuples: hyperparams[param].append(param_value) # - hyperparams = pd.DataFrame(hyperparams) score_keys = list(param_results) score_std_keys = [score_key + "_std" for score_key in score_keys] param_keys = list(set(hyperparams.columns) - set(score_keys) - set(score_std_keys)) hyperparams.fillna(-1, inplace=True) hyperparams_gap = hyperparams[hyperparams["test_score"] > 0.64].copy() hyperparams_gap["gap"] = hyperparams_gap["train_score"] - hyperparams_gap["test_score"] print(len(hyperparams_gap)) hyperparams_gap.sort_values(by="gap") hyperparams.sort_values(by="test_score", ascending=False)[:20] hyperparams.boxplot(column=score_keys, by=["min_samples_split", "n_estimators"]) melted = pd.melt( hyperparams[hyperparams["test_score"] > 0.65].drop(columns=score_std_keys), id_vars=param_keys, value_vars=score_keys, var_name="category", value_name="score", ) melted # ### Visualise the effect of individual parameters # + from alepython.ale import _sci_format for param_key in param_keys: if param_key == "ccp_alpha": fig = plt.figure(figsize=(25, 6)) else: fig = plt.figure(figsize=(9, 6)) ax = sns.boxplot(x=param_key, y="score", hue="category", data=melted) ax.set(ylabel="R2 Score") ax.grid(which="both", alpha=0.4, linestyle="--") if param_key == "ccp_alpha": ax.xaxis.set_ticklabels( _sci_format( np.array( list(map(lambda x: float(x.get_text()), ax.xaxis.get_ticklabels())) ) ) ) ax.xaxis.set_tick_params(rotation=45) figure_saver.save_figure(fig, param_key, sub_directory="hyperparameters") # - # ### Repeat for the standard deviations melted_std = pd.melt( hyperparams[hyperparams["test_score"] > 0.65].drop(columns=score_keys), id_vars=param_keys, value_vars=score_std_keys, var_name="category", value_name="score_std", ) melted_std # ### Visualise the effect of individual parameters # + from alepython.ale import _sci_format for param_key in param_keys: if param_key == "ccp_alpha": fig = plt.figure(figsize=(25, 6)) else: fig = plt.figure(figsize=(9, 6)) ax = sns.boxplot(x=param_key, y="score_std", hue="category", data=melted_std) ax.set(ylabel="R2 Score") ax.grid(which="both", alpha=0.4, linestyle="--") if param_key == "ccp_alpha": ax.xaxis.set_ticklabels( _sci_format( np.array( list(map(lambda x: float(x.get_text()), ax.xaxis.get_ticklabels())) ) ) ) ax.xaxis.set_tick_params(rotation=45) figure_saver.save_figure(fig, param_key, sub_directory="hyperparameters") # - # ### Dependence of R2 gap on performance # + mask = hyperparams["test_score"] > 0.66 gap = hyperparams[mask]["train_score"] - hyperparams[mask]["test_score"] # colorby = "max_depth" for colorby in param_keys: c = hyperparams[mask][colorby] try: np.asarray(c, dtype=np.float64) except ValueError: continue for key in ("train_score", "test_score")[1:]: plt.figure() plt.scatter(hyperparams[mask][key], gap, marker="o", alpha=0.3, c=c) plt.ylabel("R2 train - test") plt.xlabel(key) plt.colorbar(label=colorby) plt.grid(alpha=0.4, linestyle="--") # - # ### Scoring evaluation # + # %%time scores = {} model.n_jobs = 32 with parallel_backend("threading", n_jobs=32): y_pred = model.predict(X_test) scores["test_r2"] = r2_score(y_test, y_pred) scores["test_mse"] = mean_squared_error(y_test, y_pred) train_y_pred = model.predict(X_train) scores["train_r2"] = r2_score(y_train, train_y_pred) scores["train_mse"] = mean_squared_error(y_train, train_y_pred) # - scores plt.hexbin(y_pred, y_test, bins="log") plt.figure(figsize=(20, 10)) plt.hist(y_pred - y_test, bins=800) plt.yscale("log") y_test = y_test.values diffs = y_pred - y_test # + mask = y_test > 0.01 indices = np.argsort(diffs[mask]) plt.scatter( np.arange(len(indices)), diffs[mask][indices], marker="o", rasterized=True, alpha=0.1, c=np.log(y_test[mask][indices]), ) plt.colorbar(label="log(BA Test)") plt.ylabel("Prediction - Observation (test)") plt.yscale("symlog", linthreshy=0.01) # - plt.figure(figsize=(30, 15)) plt.scatter( np.log10(y_test), diffs, rasterized=True, marker="o", alpha=0.1, c=np.log10(y_pred) ) plt.colorbar(label="log10(Pred)") plt.yscale("symlog", linthreshy=0.00001) plt.ylabel("Pred - Obs") plt.xlabel("log10 Obs") plt.title("Validation Data") train_diffs = train_y_pred - y_train plt.figure(figsize=(30, 15)) plt.scatter( np.log10(y_train), train_diffs, rasterized=True, marker="o", alpha=0.1, c=np.log10(train_y_pred), ) plt.colorbar(label="log10(Pred)") plt.yscale("symlog", linthreshy=0.00001) plt.ylabel("Pred - Obs") plt.xlabel("log10 Obs") plt.title("Training Data") mask = y_train > 0.01 plt.figure(figsize=(30, 15)) plt.scatter( np.log10(y_train), np.log10(train_y_pred), rasterized=True, marker="o", alpha=0.01, c=np.log10(train_y_pred), ) plt.colorbar(label="log10(Pred)") plt.plot(np.log10(y_train), np.log10(y_train)) # plt.yscale('symlog', linthreshy=0.00001); plt.ylabel("log10 Pred") plt.xlabel("log10 Obs") plt.title("Training Data")
analyses/seasonality_paper_1/no_temporal_shifts/model-new.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: .venv # language: python # name: .venv # --- # # !pip install --upgrade google-api-python-client # !pip install google.cloud.bigquery # !pip install google.cloud.storage from google.cloud import bigquery client = bigquery.Client()
learn_sql.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- library(CausalImpact) library(zoo) # ## (3 балла) Изучите модель, используемую в качестве дефолтной в CausalImpact. Объясните «на пальцах» о том, как работает дефолтная модель, и сделайте выводы о границах её применимости. # ![model](model.png) # Таргет $y_t$ моделируется, как некое латентное состояние плюс белый шум. # Латентное состояние включает в себя локальный уровень $\mu_t$, локальный линейный тренд $\delta_t$ и множество переменных предиктора $x_t$, домноженных на коофиценты регресии $\beta_{\rho}$. Локальный линейный тренд изменяется под воздействием случайного блуждания по нормальному распределению с центром в предыдущей точке и фиксированной дисперсией $\sigma_{\delta}^2$. Локальный уровень - аналогично с прибавкой из тренда и своей дисперсией $\sigma_{\mu}^2$. Коофиценты для регресии $\beta_{\rho}$ также получаются из случайного блуждания с фиксированной дисперсией по каждой размерности. # Суммируя локальный уровень и предикторы, домноженные на коофиценты регресии, получаем латентное состояние для распределения $y_t$. # Таким образом, можем получить распределение для величин $y_{n+1} \dots y_m$, вычитая которые из реальных данных после события, можем пронаблюдать вероятность causal impact. # Модель довольно гибкая и способна при дополнительных слагаемых вобрать в себя эффекты от тренда, сезонности, особых событий. # ## (3 балла + бонусные баллы за редкие и хорошие примеры) У модели, используемой в CausalImpact, есть множество различных дополнительных параметров (параметры сезонности, динамические параметры модели). Проведите эксперименты с различными данными и на их основе создайте список эмпирических правил, когда использование дополнительных параметров позволит улучшить результат, а когда, несмотря на их кажущуюся «логичность», их использование портит результат. Постарайтесь в этих правилах рассмотреть максимальное число ситуаций "с подвохом", когда человек, не проделавший данное упражнение, скорее попытался бы воспользоваться неудачными параметрами. Все правила проиллюстрируйте примерами, и желательно чтобы данные в примерах были "правдоподобными". # Сезонность, очевидно, может помочь при сезонности в данных. # Динамические коэффициенты могут помочь, когда линейная зависимость между метриками и данными изменяется со временем. set.seed(239) genCIevent <- function(order = c(1,0,1), ar = 0.999, ma = 0.999, sd = 1) { x1 <- 100 + arima.sim(model = list(ar = ar, ma = ma, order = order), n = 100, sd = sd) y <- 1.2 * x1 + rnorm(100) y[71:100] <- y[71:100] + 10 data <- cbind(y, x1) pre.period <- c(1, 70) post.period <- c(71, 100) result = list(data=data, pre.period=pre.period, post.period=post.period) return(result) } event <- genCIevent(ma = 0.2, sd = 2) matplot(event$data, type = "l") plot(data) impact <- CausalImpact(data, pre.period, post.period, model.args = list()) impact$summary plot(impact) impact <- CausalImpact(data, pre.period, post.period, model.args = list(nseasons = 7)) impact$summary plot(impact) # ## (3 балла) Допустим несколько месяцев назад Яндекс значительно улучшил качество поиска картинок. Вы хотите узнать увеличило ли это изменение активность пользователей на основном поиске Яндекса. Какими рядом-таргетом и рядом-предиктором вы бы воспользовались? Обоснуйте свой ответ, а также укажите возможные проблемы вашего решения и как бы вы их обошли. # Требования к ряду-предиктору следующие: необходимо, чтобы по предиктору мы могли хорошо предсказывать таргет до события, а также влияние события на предиктор должно быть минимально. # При поиске текста Яндекс может предлагать related картинки и видео. Улучшение качества поиска картинок должно увеличить количество переходов с текста на картинки, а также, непосредственно, количество картинок, которые пользователь захотел рассмотреть поближе. Среднее количество таких событий за день/час можно взять в качестве временного ряда-таргета. Такой ряд сможет показать изменение активности пользователей. # Для предиктора же подойдет, например, количество переходов в related видео или другие источники (учитывая, что качество их поиска не изменилось). Количество переходов и поиска на картинки и на видео должно коррелировать, учитывая, что пользователь ищет некую визуальную информацию. # Возможные проблемы: # * Надо точно отследить, чтобы в окрестности события - улучшения качества поиска картинок - не было других событий, влияющих на ряд-предиктор. # ## (10 баллов) Представьте, что у Яндекса есть ещё один сервис – Яндекс.Котики. Вам нужно оценить эффективности рекламных кампаний, шедших в период с августа по ноябрь (включительно) 2015 года (для каждой отдельная оценка). У этой задачи может быть много способов её решения, ваша цель достигнуть максимально точной и корректной итоговой оценки. [В первом файле](https://wiki.school.yandex.ru/shad/groups/2016/Semester3/Analytical/.files/causalimpacthomeworkdata1.csv) вы можете найти информацию об аудитории данного сервиса (target), различные оценки аудитории, интересующейся котиками (market), а так же показатели различных срезов аудитории сервиса, которые могли быть или не быть подвержены воздействию рекламных кампаний. [Во втором файле](https://wiki.school.yandex.ru/shad/groups/2016/Semester3/Analytical/.files/causalimpacthomeworkdata2.csv) вы найдете информацию о датах рекламных кампаний и оценку (возможно неверную) их размеров. В этом же файле есть информация и о рекламных кампаниях некоторых других сервисов, которые потенциально могли воздействовать на Яндекс.Котиков (например, реклама с посылом "весь Яндекс - это круто" могла помочь нашему сервису, а могла и не повлиять на его популярность). # Начнем. # Считаем данные. Распарсим, где получится, числа, а также - даты. data1 <- read.csv("CausalImpact_homework_data1.csv") data2 <- read.csv("CausalImpact_homework_data2.csv") # + options(warn=-1) # data1 data1[,c(2:ncol(data1))] <- lapply(data1[,c(2:ncol(data1))], function(x) as.numeric(as.character(x))) data1 <- na.locf(data1) data1[,c(2:ncol(data1))] <- lapply(data1[,c(2:ncol(data1))], as.numeric) # data2 data2[,c(3:ncol(data2))] <- lapply(data2[,c(3:ncol(data2))], function(x) as.numeric(as.character(x))) data2 <- na.locf(data2) data2[,c(3:ncol(data2))] <- lapply(data2[,c(3:ncol(data2))], as.numeric) options(warn=0) # - origin <- as.Date(data1["date"][1,]) last.day <- as.Date(data1["date"][nrow(data1),]) data1$date <- seq.Date(origin, by = 1, length.out = nrow(data1)) data2["start_date"] <- lapply(data2["start_date"], function(days) as.Date(days - 1, origin = origin)) data2["end_date"] <- lapply(data2["end_date"], function(days) as.Date(days - 1, origin = origin)) # Проверяем, есть ли NA-шки. c(anyNA(data1), anyNA(data2)) data1[c(1:4),] data2[c(1:4),] # Получим список событий снутри промежутка август15 - ноябрь15. observe.start <- as.Date("2015-08-01") observe.end <- as.Date("2015-11-30") observe.main_event <- list(start_date = observe.start, end_date = observe.end) observe.events <- data2[which(data2$start_date >= start.observe & data2$end_date <= observe.end),] observe.events # + target <- data1$target cor.prs <- function(target, predictor) { res <- cor.test(target, predictor, method = "pearson", use = "complete.obs") return(list(cor = as.numeric(res$estimate), p.value = res$p.value)) } check <- function (predictor, event) { date.before <- data1$date < event$start_date date.inside <- data1$date >= event$start_date & data1$date <= event$end_date date.after <- data1$date > event$end_date before <- cor.prs(target[date.before], predictor[date.before]) inside <- cor.prs(target[date.inside], predictor[date.inside]) after <- cor.prs(target[date.after], predictor[date.after]) return(list(before = before, inside = inside, after = after)) } # - predictors <- c("market_1", "market_2", "market_3", "market_4", "market_5") # + befores <- list() insides <- list() afters <- list() for (predictor in predictors){ test <- check(unlist(data1[predictor]), observe.main_event) befores <- c(befores, test$before$cor) insides <- c(insides, test$inside$cor) afters <- c(afters, test$after$cor) } # - rbind(befores, insides, afters) # Нам интересны события, которые нарушают корреляцию таргета и предиктора. Возьмем большое событие с август15 по ноябрь15 и посмотрим, как ведет себя корреляция Пирсона для рядов до, внутри и после события. # Как можно видеть, до периода рекламы все придикторы хорошо коррелируют с таргетом, во время - только 4ый, а после - все, кроме 1го. Таким образом, будем рассматривать 2, 3 и 5 столбцы-предикторы. # Запустим CI для каждой пары интересное событие / предиктор и заполним таблицу. # predictors <- c("market_2", "market_3", "market_5") predictors <- c("market_5") for (i in 1:nrow(observe.events)) { event <- observe.events[i,] for (predictor in predictors) { data <- zoo(cbind(target, data1[predictor]), data1$date) pre.period <- as.Date(c(origin, event$start_date - 1)) post.period <- as.Date(c(event$start_date, event$end_date)) impact <- CausalImpact(data, pre.period, post.period) # summary(impact) } } # | Рекламная компания (num) | size/is related | market_2 | market_3 | market_5 | # |:------------------------:|-----------------|-----------|-----------|-----------| # | 25 | 1.5 / 1 | 0.01 98% | 0.01 98% | 0.02 97% | # | 26 | 0.1 / 1 | 0.001 99% | 0.001 99% | 0.001 99% | # | 27 | 0.0 / 0 | 0.001 99% | 0.001 99% | 0.001 99% | # | 28 | 0.7 / 0 | 0.04 95% | 0.04 96% | 0.06 94% | # | 29 | 0.4 / 0 | 0.06 93% | 0.04 95% | 0.03 96% | # | 31 | 0.0 / 1 | 0.04 99% | 0.01 99% | 0.01 99% | # Таким образом, лучший результат наблюдается у реклам под номерами 26, 27 и 31.
task02/task.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:metis] * # language: python # name: conda-env-metis-py # --- # + import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix from keras.datasets import fashion_mnist from tensorflow import keras from tensorflow.keras import datasets, layers, models from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, GlobalAveragePooling2D, InputLayer, Dropout, ZeroPadding2D from tensorflow.keras.preprocessing import image from tensorflow.keras.utils import to_categorical from keras.utils.vis_utils import model_to_dot from keras.utils.vis_utils import plot_model from IPython.display import SVG import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline # - (X_train, y_train), (X_test, y_test) = fashion_mnist.load_data() # Data normalization - normalize the data dimension that they are of approximately the same scale X_train = X_train.astype('float32') / 255 X_test = X_test.astype('float32') / 255 print('Number of train data: ' + str(len(X_train))) print('Number of test data: ' + str(len(X_test))) # ### Split the data into train/validation/test # * train set - training the model # * validation set - tuning the hyperparameter and evaluate the models # * test set - testing the model after the model has gone through initial vetting by the validation set # + # break training set into train and val sets. #(X_train, X_val) = X_train[5000:], X_train[:5000] #(y_train, y_val) = y_train[5000:], y_train[:5000] # generate val set-- have issues with label sizes X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42) print('Fashion MNIST Train - rows: ', X_train.shape[0], ' columns: ', X_train.shape[1:4]) print('Fashion MNIST Validation - rows: ', X_val.shape[0], ' columns: ', X_val.shape[1:4]) print('Fashion MNIST Test - rows: ', X_test.shape[0], ' columns: ', X_test.shape[1:4]) # + # reshape the input data X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) X_val = X_val.reshape(X_val.shape[0], 28, 28, 1) X_test = X_test.reshape(X_test.shape[0], 28, 28, 1) # one-hot encode y_train = keras.utils.to_categorical(y_train, 10) y_val = keras.utils.to_categorical(y_val, 10) y_test = keras.utils.to_categorical(y_test, 10) # print training set shape print('X_tain shape: ', X_train.shape, 'y_train shape: ', y_train.shape) print (X_train.shape[0], 'train set') print (X_val.shape[0], 'validation set') print (X_test.shape[0], 'test set') # - # ### Model Architecture # * Sequential model API in Keras # * Conv2D - create a convolutional layer # * Pooling() - create a pooling layer # * Dropout () - apply dropout # + model = Sequential() # Must define the input shape in the first layer of the neural network model.add(Conv2D(filters=64, kernel_size=2, padding='same', activation='relu', input_shape=(28,28,1))) model.add(MaxPooling2D(pool_size=2)) model.add(Dropout(0.3)) model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(Dropout(0.3)) model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(Dropout(0.1)) #model.add(Conv2D(filters=128, kernel_size=2, padding='same', activation='relu')) #model.add(MaxPooling2D(pool_size=2)) #model.add(Dropout(0.1)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.3)) #0.5 model.add(Dense(10, activation='softmax')) model.summary() # - # ### Compile the model # Configure the learning process with compile() beforing training the model. # # * optimizer # * loss function # * metrics model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # ### Train the model # + from keras.callbacks import ModelCheckpoint checkpointer = ModelCheckpoint(filepath='model.weights.best.hdf5', verbose = 1, save_best_only=True) result = model.fit(X_train, y_train, batch_size=64, epochs=20, validation_data=(X_val, y_val), callbacks=[checkpointer]) # - # ### Load model with the bwst validation accuracy # Load the weights with the best validation accuracy model.load_weights('model.weights.best.hdf5') # ### Test accuracy # + # Evaluate the model on test set score = model.evaluate(X_test, y_test, verbose=0) # Print test accuracy print('\n', 'Test accuracy:', score[1]) # - result.history['accuracy'] plt.plot(result.history['accuracy']) result.history['val_accuracy'] plt.plot(result.history['val_accuracy']) # + plt.figure(figsize=(10,4)) plt.subplot(1,2,1) plt.plot(result.history['accuracy'], label='train accuracy', color='red') plt.plot(result.history['val_accuracy'], label='val accuracy', color='blue') plt.legend() plt.subplot(1,2,2) plt.plot(result.history['loss'], label='train loss', color='red') plt.plot(result.history['val_loss'], label='val loss', color='blue') plt.legend() plt.savefig('Accuracy & Loss', dpi=150) plt.show() # - model.metrics_names # ### Visualize prediction # Now let's visualize the prediction using the model you just trained. First we get the predictions with the model from the test data. Then we print out 15 images from the test data set, and set the titles with the prediction (and the groud truth label). If the prediction matches the true label, the title will be green; otherwise it's displayed in red. labels = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] # + y_hat = model.predict(X_test) # Plot a random sample of 10 test images, their predicted labels true labels figure = plt.figure(figsize=(20, 8)) for i, index in enumerate(np.random.choice(X_test.shape[0], size=15, replace=False)): ax = figure.add_subplot(3, 5, i + 1, xticks=[], yticks=[]) # Display each image plt.savefig('Output Sample', dpi=150) ax.imshow(np.squeeze(X_test[index])) predict_index = np.argmax(y_hat[index]) true_index = np.argmax(y_test[index]) # Set the title for each image ax.set_title("{} ({})".format(labels[predict_index], labels[true_index]), color=("green" if predict_index == true_index else "red")) # - # ### Confusion Matrix import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 2 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment='center', color='white' if cm[i, j] > thresh else 'black') plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # + # predit the values from the validation dataset y_pred = model.predict(X_test) # convert prediction classes to one hot vectors y_pred_classes = np.argmax(y_pred, axis=1) # convert validation observation to one hot vectors y_true = np.argmax(y_test, axis=1) confusion_mtx = confusion_matrix(y_true, y_pred_classes) plot_confusion_matrix(confusion_mtx, classes=['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']) # - # * A large number of T-shirts/Tops are misclassified as Shirts # * also lots of Shirts are misclassified as Coats # #### Correctly Predicted Labels correct = [] for i in range(len(y_test)): if (y_pred_classes[i] == y_true[i]): correct.append(i) if (len(correct) == 4): break # + fig, ax = plt.subplots(2, 2, figsize=(12, 6)) fig.set_size_inches(10, 10) ax[0, 0].imshow(X_test[correct[0]].reshape(28, 28), cmap='gray') ax[0, 0].set_title('Predicted Label: ' + str(labels[y_pred_classes[correct[0]]]) + '\n' + 'Acutal Label: ' + str(labels[y_true[correct[0]]])) ax[0, 1].imshow(X_test[correct[1]].reshape(28, 28), cmap='gray') ax[0, 1].set_title('Predicted Label: ' + str(labels[y_pred_classes[correct[1]]]) + '\n' + 'Acutal Label: ' + str(labels[y_true[correct[1]]])) ax[1, 0].imshow(X_test[correct[2]].reshape(28, 28), cmap='gray') ax[1, 0].set_title('Predicted Label: ' + str(labels[y_pred_classes[correct[2]]]) + '\n' + 'Acutal Label: ' + str(labels[y_true[correct[0]]])) ax[1, 1].imshow(X_test[correct[3]].reshape(28, 28), cmap='gray') ax[1, 1].set_title('Predicted Label: ' + str(labels[y_pred_classes[correct[3]]]) + '\n' + 'Acutal Label: ' + str(labels[y_true[correct[3]]])) # - # #### Incorrectly Predicted Classes incorrect = [] for i in range(len(y_test)): if (not y_pred_classes[i] == y_true[i]): incorrect.append(i) if (len(incorrect) == 4): break # + fig, ax = plt.subplots(2, 2, figsize=(12, 6)) fig.set_size_inches(10, 10) ax[0, 0].imshow(X_test[incorrect[0]].reshape(28, 28), cmap='gray') ax[0, 0].set_title('Predicted Label: ' + str(labels[y_pred_classes[incorrect[0]]]) + '\n' + 'Acutal Label: ' + str(labels[y_true[incorrect[0]]])) ax[0, 1].imshow(X_test[incorrect[1]].reshape(28, 28), cmap='gray') ax[0, 1].set_title('Predicted Label: ' + str(labels[y_pred_classes[incorrect[1]]]) + '\n' + 'Acutal Label: ' + str(labels[y_true[incorrect[1]]])) ax[1, 0].imshow(X_test[incorrect[2]].reshape(28, 28), cmap='gray') ax[1, 0].set_title('Predicted Label: ' + str(labels[y_pred_classes[incorrect[2]]]) + '\n' + 'Acutal Label: ' + str(labels[y_true[incorrect[0]]])) ax[1, 1].imshow(X_test[incorrect[3]].reshape(28, 28), cmap='gray') ax[1, 1].set_title('Predicted Label: ' + str(labels[y_pred_classes[incorrect[3]]]) + '\n' + 'Acutal Label: ' + str(labels[y_true[incorrect[3]]])) # - # ### Classification Report print(classification_report(y_true, y_pred_classes, target_names=labels))
Workflow /Final Model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_amazonei_tensorflow_p36 # language: python # name: conda_amazonei_tensorflow_p36 # --- import sys sys.path.append('/home/ec2-user/SageMaker') import toniutils as tu # !pip install keras from keras.models import model_from_json # <h2>Load Models</h2> # + import numpy as np import io import pickle import boto3 from keras.models import model_from_json s3_client = boto3.client('s3') #result = client.get_object(Bucket='bme-bucket', Key='models/keras-model-1.json') #model = model_from_json(result["Body"].read()) # - models_dict ={} for model in ['model-' + str(i) for i in range(1, 19)]: #print('models/keras-' + model + '.json') model_json = s3_client.get_object(Bucket='bme-bucket', Key='models/keras-' + model + '.json') models_dict[model] = model_from_json(model_json['Body'].read()) # <h2>Load Data</h2> # + # download without using disk my_array_data2 = io.BytesIO() s3_client.download_fileobj('bme-bucket', 'engineered_data/Y_minmaxscaled.pkl', my_array_data2) my_array_data2.seek(0) Y = pickle.load(my_array_data2) my_array_data2 = io.BytesIO() s3_client.download_fileobj('bme-bucket', 'engineered_data/X.pkl', my_array_data2) my_array_data2.seek(0) X = pickle.load(my_array_data2) # - # <h3>Format data</h3> X_2nd_layer = models_dict['model-1'].predict(X) for i in range(2, 19): X_2nd_layer = np.hstack( (X_2nd_layer, models_dict['model-' + str(i)].predict(X)) ) # <h2>2nd Layer Models</h2> # + from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import Lasso from sklearn.linear_model import ElasticNet from sklearn.linear_model import LinearRegression from sklearn.linear_model import RidgeCV from sklearn.ensemble import ExtraTreesRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import AdaBoostRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.multioutput import MultiOutputRegressor, RegressorChain from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error as mse # - ESTIMATORS = { "Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32, # Out of 20000 random_state=0), #"K-nn": KNeighborsRegressor(), # To Lazy "Linear regression": LinearRegression(), "Ridge": RidgeCV(), "Lasso": Lasso(), "ElasticNet": ElasticNet(random_state=0), "RandomForestRegressor": RandomForestRegressor(max_depth=4, random_state=2), "Decision Tree Regressor":DecisionTreeRegressor(max_depth=5), "MultiO/P GBR": MultiOutputRegressor(GradientBoostingRegressor(n_estimators=5)), "MultiO/P AdaB": MultiOutputRegressor(AdaBoostRegressor(n_estimators=5)), "Regressor/C GBR": RegressorChain(GradientBoostingRegressor(n_estimators=5)), "Regressor/C AdaB": RegressorChain(AdaBoostRegressor(n_estimators=5)) } X_train, X_test, y_train, y_test = train_test_split(X_2nd_layer, Y, test_size = 0.2, random_state = 2) # + FITTED_ESTIMATORS = {} y_mse = {} for name, estimator in ESTIMATORS.items(): print(name) FITTED_ESTIMATORS[name] = estimator.fit(X_train, y_train) # fit() with instantiated object y_mse[name] = mse(y_test, estimator.predict(X_test)) # - y_mse # <h2>Save Models</h2> # + import boto3 import io import pickle s3_client = boto3.client('s3') # - my_array_data = io.BytesIO() pickle.dump(FITTED_ESTIMATORS, my_array_data) my_array_data.seek(0) s3_client.upload_fileobj(my_array_data, 'bme-bucket', 'models/experiment-0/2nd-layer-models/models_dict.pkl') my_array_data = io.BytesIO() pickle.dump(y_mse, my_array_data) my_array_data.seek(0) s3_client.upload_fileobj(my_array_data, 'bme-bucket', 'models/experiment-0/2nd-layer-models_mse.pkl')
2nd-layer-models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # Introduction to JumpStart - Semantic Segmentation # --- # Welcome to Amazon [SageMaker JumpStart](https://docs.aws.amazon.com/sagemaker/latest/dg/studio-jumpstart.html)! You can use JumpStart to solve many Machine Learning tasks through one-click in SageMaker Studio, or through [SageMaker JumpStart API](https://sagemaker.readthedocs.io/en/stable/doc_utils/jumpstart.html). In this demo notebook, we demonstrate how to use the JumpStart API for Semantic Segmentation. Semantic segmentation is the task of detecting and delineating each distinct object of interest appearing in an image. It is a fine-grained, pixel-level approach to developing computer vision applications. It tags every pixel in an image with a class label from a predefined set of classes. It differs from Instance Segmentation in the following: Semantic Segmentation treats multiple objects of the same class as a single entity whereas Instance Segmentation treats multiple objects of the same class as distinct individual instances. # # # In this notebook, we demonstrate two use cases of semantic segmentation models: # # * How to use pre-trained Semantic Segmentation models for inference. # * How to use JumpStart transfer learning algorithm to finetune a Semantic Segmentation model on a custom dataset. # # --- # 1. [Set Up](#1.-Set-Up) # 2. [Select a pre-trained model](#2.-Select-a-pre-trained-model) # 3. [Run inference on the pre-trained model](#3.-Run-inference-on-the-pre-trained-model) # * [Retrieve JumpStart Artifacts & Deploy an Endpoint](#3.1.-Retrieve-JumpStart-Artifacts-&-Deploy-an-Endpoint) # * [Download an example image for inference](#3.2.-Download-an-example-image-for-inference) # * [Query endpoint and parse response](#3.3.-Query-endpoint-and-parse-response) # * [Display model predictions](#3.4.-Display-model-predictions) # * [Clean up the endpoint](#3.5.-Clean-up-the-endpoint) # 4. [Fine-tune the pre-trained model on a custom dataset](#4.-Fine-tune-the-pre-trained-model-on-a-custom-dataset) # * [Retrieve Training Artifacts](#4.1.-Retrieve-Training-Artifacts) # * [Set Training parameters](#4.2.-Set-Training-parameters) # * [Train with Automatic Model Tuning (HPO)](#AMT) # * [Start Training](#4.4.-Start-Training) # * [Deploy and run inference on the fine-tuned model](#4.5.-Deploy-and-run-inference-on-the-fine-tuned-model) # # Note: This notebook was tested on ml.t3.medium instance in Amazon SageMaker Studio with Python 3 (Data Science) kernel and in Amazon SageMaker Notebook instance with conda_python3 kernel. # ## 1. Set Up # --- # Before executing the notebook, there are some initial steps required for set up. This notebook requires latest version of sagemaker and ipywidgets # # --- # !pip install sagemaker ipywidgets --upgrade --quiet # ### Permissions and environment variables # # --- # To train and host on Amazon SageMaker, we need to set up and authenticate the use of AWS services. Here, we use the execution role associated with the current notebook as the AWS account role with SageMaker access. It has necessary permissions, including access to your data in S3. # # --- # + import sagemaker, boto3, json from sagemaker import get_execution_role aws_role = get_execution_role() aws_region = boto3.Session().region_name sess = sagemaker.Session() # - # ### 2. Select a pre-trained model # # *** # Here, we download jumpstart model_manifest file from the jumpstart s3 bucket, filter-out all the Semantic Segmentation models and select a model for inference. # *** # + from ipywidgets import Dropdown # download JumpStart model_manifest file. boto3.client("s3").download_file( f"jumpstart-cache-prod-{aws_region}", "models_manifest.json", "models_manifest.json" ) with open("models_manifest.json", "rb") as json_file: model_list = json.load(json_file) # filter-out all the Semantic Segmentation models from the manifest list. semseg_models = [] for model in model_list: model_id = model["model_id"] if "-semseg-" in model_id and model_id not in semseg_models: semseg_models.append(model_id) print(f"\033[38;5;2mChose a model: \033[0;0m\n") # display the model-ids in a dropdown to select a model for inference. model_dropdown = Dropdown( options=semseg_models, value="mxnet-semseg-fcn-resnet50-ade", description="", style={"description_width": "initial"}, layout={"width": "max-content"}, ) display(model_dropdown) # - # model_version="*" fetches the latest version of the model model_id, model_version = model_dropdown.value, "*" # ## 3. Run inference on the pre-trained model # # *** # # Using JumpStart, we can perform inference on the pre-trained model, even without fine-tuning it first on a new dataset. # *** # ### 3.1. Retrieve JumpStart Artifacts & Deploy an Endpoint # # *** # We retrieve the `deploy_image_uri`, `deploy_source_uri`, and `base_model_uri` for the pre-trained model. To host the pre-trained base-model, we create an instance of [`sagemaker.model.Model`](https://sagemaker.readthedocs.io/en/stable/api/inference/model.html) and deploy it. # *** # + from sagemaker import image_uris, model_uris, script_uris, hyperparameters from sagemaker.model import Model from sagemaker.predictor import Predictor from sagemaker.utils import name_from_base endpoint_name = name_from_base(f"jumpstart-example-infer-{model_id}") inference_instance_type = "ml.p2.xlarge" # Retrieve the inference docker container uri deploy_image_uri = image_uris.retrieve( region=None, framework=None, # automatically inferred from model_id image_scope="inference", model_id=model_id, model_version=model_version, instance_type=inference_instance_type, ) # Retrieve the inference script uri. This includes scripts for model loading, inference handling etc. deploy_source_uri = script_uris.retrieve( model_id=model_id, model_version=model_version, script_scope="inference" ) # Retrieve the base model uri base_model_uri = model_uris.retrieve( model_id=model_id, model_version=model_version, model_scope="inference" ) # Create the SageMaker model instance model = Model( image_uri=deploy_image_uri, source_dir=deploy_source_uri, model_data=base_model_uri, entry_point="inference.py", # entry point file in source_dir and present in deploy_source_uri role=aws_role, predictor_cls=Predictor, name=endpoint_name, ) # deploy the Model. Note that we need to pass Predictor class when we deploy model through Model class, # for being able to run inference through the sagemaker API. base_model_predictor = model.deploy( initial_instance_count=1, instance_type=inference_instance_type, predictor_cls=Predictor, endpoint_name=endpoint_name, ) # - # ### 3.2. Download an example image for inference # --- # We download an example image from the JumpStart assets S3 bucket. # # --- # + jumpstart_assets_s3_bucket = f"jumpstart-cache-prod-{aws_region}" pedestrian_img_key_prefix = "inference-notebook-assets" pedestrian_img = "img_pedestrian.png" boto3.client("s3").download_file( jumpstart_assets_s3_bucket, f"{pedestrian_img_key_prefix}/{pedestrian_img}", pedestrian_img ) # - # ### 3.3. Query endpoint and parse response # # --- # Input to the endpoint is a single image in binary format. Response of the endpoint is a predicted label for each pixel in the image. # # --- # + import json def query(model_predictor, image_file_name): """Query the model predictor.""" with open(image_file_name, "rb") as file: input_img_rb = file.read() query_response = model_predictor.predict( input_img_rb, { "ContentType": "application/x-image", "Accept": "application/json;verbose", }, ) return query_response def parse_response(query_response): """Parse response and return predictions as well as the set of all labels and object labels present in the image.""" response_dict = json.loads(query_response) return response_dict["predictions"], response_dict["labels"], response_dict["image_labels"] # + query_response = query(base_model_predictor, pedestrian_img) predictions, labels, image_labels = parse_response(query_response) print("Objects present in the picture:", image_labels) # - # ### 3.4. Display model predictions # --- # Next, we display the bounding boxes overlaid on the original image. To get color palette for visualization, we borrow the VOC palette implementation from [GluonCV](https://cv.gluon.ai/_modules/gluoncv/utils/viz/segmentation.html#get_color_pallete) # # --- # + import matplotlib.image as mpimg from matplotlib import pyplot as plt import numpy as np from PIL import Image def getvocpalette(num_cls): """Get a color palette.""" n = num_cls palette = [0] * (n * 3) for j in range(0, n): lab = j palette[j * 3 + 0] = 0 palette[j * 3 + 1] = 0 palette[j * 3 + 2] = 0 i = 0 while lab > 0: palette[j * 3 + 0] |= ((lab >> 0) & 1) << (7 - i) palette[j * 3 + 1] |= ((lab >> 1) & 1) << (7 - i) palette[j * 3 + 2] |= ((lab >> 2) & 1) << (7 - i) i = i + 1 lab >>= 3 return palette def display_predictions(predictions): """Display predictions with each pixel subsituted by the color of the corresponding label.""" palette = getvocpalette(256) npimg = np.array(predictions) npimg[npimg == -1] = 255 mask = Image.fromarray(npimg.astype("uint8")) mask.putpalette(palette) plt.imshow(mask) # - display_predictions(predictions) # ### 3.5. Clean up the endpoint # Delete the SageMaker endpoint base_model_predictor.delete_model() base_model_predictor.delete_endpoint() # ## 4. Fine-tune the pre-trained model on a custom dataset # # --- # Previously, we saw how to run inference on a pre-trained model. Next, we discuss how a model can be finetuned to a custom dataset with any number of classes. # # The model available for fine-tuning build a fully convolutional network (FCN) "head" on top of the base network. The fine-tuning step fine-tunes the FCNHead while keeping the parameters of the rest of the model frozen, and returns the fine-tuned model. The objective is to minimize per-pixel Softmax Cross Entropy Loss to train FCN. The model returned by fine-tuning can be further deployed for inference. Below are the instructions for how the training data should be formatted for input to the model. # # * Input: A directory with sub-directories images, masks and a file class_label_to_prediction_index.json. # # * Output: A trained model that can be deployed for inference. # * A label mapping file is saved along with the trained model file on the s3 bucket. # # The input directory should look like below if the training data contains two images. The names of .png files can be anything. # # # * input_directory # * images # * abc.png # * def.png # * masks # * abc.png # * def.png # * class_label_to_prediction_index.json # # The mask files should have class label information for each pixel. # # # We provide pennfudanped dataset as a default dataset for fine-tuning the model. # PennFudanPed comprises images of pedestrians. The dataset has been downloaded from [here](https://www.cis.upenn.edu/~jshi/ped_html/#pub1). # # Citation: # <sub><sup> # @ONLINE {pennfudanped, # author = "<NAME>, <NAME>, <NAME>, and <NAME>", # title = "Penn-Fudan Database for Pedestrian Detection and Segmentation", # year = "2007", # url = "https://www.cis.upenn.edu/~jshi/ped_html/" } # </sup></sub> # ### 4.1. Retrieve Training Artifacts # # Here, we retrieve the training docker container, the training algorithm source, and the pre-trained base model. Note that model_version="*" fetches the latest model. # # --- # + from sagemaker import image_uris, model_uris, script_uris train_scope = "training" training_instance_type = "ml.p3.2xlarge" # Retrieve the docker image train_image_uri = image_uris.retrieve( region=None, framework=None, # automatically inferred from model_id model_id=model_id, model_version=model_version, image_scope=train_scope, instance_type=training_instance_type, ) # Retrieve the training script. This contains all the necessary files including data processing, model training etc. train_source_uri = script_uris.retrieve( model_id=model_id, model_version=model_version, script_scope=train_scope ) # Retrieve the pre-trained model tarball to further fine-tune train_model_uri = model_uris.retrieve( model_id=model_id, model_version=model_version, model_scope=train_scope ) # - # ### 4.2. Set Training parameters # # --- # Now that we are done with all the set up that is needed, we are ready to train our Semantic Segmentation model. To begin, let us create a [``sageMaker.estimator.Estimator``](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html) object. This estimator will launch the training job. # # There are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include: (i) Training data path. This is S3 folder in which the input data is stored, (ii) Output path: This the s3 folder in which the training output is stored. (iii) Training instance type: This indicates the type of machine on which to run the training. Typically, we use GPU instances for these training. We defined the training instance type above to fetch the correct train_image_uri. # # The second set of parameters are algorithm specific training hyper-parameters. # # --- # + # Sample training data is available in this bucket training_data_bucket = f"jumpstart-cache-prod-{aws_region}" training_data_prefix = "training-datasets/PennFudanPed_SemSeg/" training_dataset_s3_path = f"s3://{training_data_bucket}/{training_data_prefix}" output_bucket = sess.default_bucket() output_prefix = "jumpstart-example-semseg-training" s3_output_location = f"s3://{output_bucket}/{output_prefix}/output" # - # --- # For algorithm specific hyper-parameters, we start by fetching python dictionary of the training hyper-parameters that the algorithm accepts with their default values. This can then be overridden to custom values. # # --- # + from sagemaker import hyperparameters # Retrieve the default hyper-parameters for fine-tuning the model hyperparameters = hyperparameters.retrieve_default(model_id=model_id, model_version=model_version) # [Optional] Override default hyperparameters with custom values hyperparameters["epochs"] = "4" print(hyperparameters) # - # ### 4.3. Train with Automatic Model Tuning ([HPO](https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning.html)) <a id='AMT'></a> # *** # Amazon SageMaker automatic model tuning, also known as hyperparameter tuning, finds the best version of a model by running many training jobs on your dataset using the algorithm and ranges of hyperparameters that you specify. It then chooses the hyperparameter values that result in a model that performs the best, as measured by a metric that you choose. We will use a [HyperparameterTuner](https://sagemaker.readthedocs.io/en/stable/api/training/tuner.html) object to interact with Amazon SageMaker hyperparameter tuning APIs. # *** # + pycharm={"name": "#%%\n"} from sagemaker.tuner import ContinuousParameter # Use AMT for tuning and selecting the best model use_amt = True # Define objective metric per framework, based on which the best model will be selected. metric_definitions_per_model = { "mxnet": { "metrics": [{"Name": "val_loss", "Regex": "validation loss=([0-9\\.]+)"}], "type": "Minimize", } } # You can select from the hyperparameters supported by the model, and configure ranges of values to be searched for training the optimal model.(https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-define-ranges.html) hyperparameter_ranges = { "adam-learning-rate": ContinuousParameter(0.0001, 0.1, scaling_type="Logarithmic") } # Increase the total number of training jobs run by AMT, for increased accuracy (and training time). max_jobs = 6 # Change parallel training jobs run by AMT to reduce total training time, constrained by your account limits. # if max_jobs=max_parallel_jobs then Bayesian search turns to Random. max_parallel_jobs = 2 # - # ### 4.4. Start Training # --- # We start by creating the estimator object with all the required assets and then launch the training job. It takes less than 30 mins on the default dataset. # # --- # + from sagemaker.estimator import Estimator from sagemaker.utils import name_from_base from sagemaker.tuner import HyperparameterTuner training_job_name = name_from_base(f"jumpstart-example-{model_id}-transfer-learning") # Create SageMaker Estimator instance semseg_estimator = Estimator( role=aws_role, image_uri=train_image_uri, source_dir=train_source_uri, model_uri=train_model_uri, entry_point="transfer_learning.py", # Entry-point file in source_dir and present in train_source_uri. instance_count=1, instance_type=training_instance_type, max_run=360000, hyperparameters=hyperparameters, output_path=s3_output_location, base_job_name=training_job_name, ) if use_amt: metric_definitions = next( value for key, value in metric_definitions_per_model.items() if model_id.startswith(key) ) hp_tuner = HyperparameterTuner( semseg_estimator, metric_definitions["metrics"][0]["Name"], hyperparameter_ranges, metric_definitions["metrics"], max_jobs=max_jobs, max_parallel_jobs=max_parallel_jobs, objective_type=metric_definitions["type"], base_tuning_job_name=training_job_name, ) # Launch a SageMaker Tuning job to search for the best hyperparameters hp_tuner.fit({"training": training_dataset_s3_path}) else: # Launch a SageMaker Training job by passing s3 path of the training data semseg_estimator.fit({"training": training_dataset_s3_path}, logs=True) # - # ### 4.5. Deploy and run inference on the fine-tuned model # # --- # # A trained model does nothing on its own. We now want to use the model to perform inference. We follow the same steps as in [3. Run inference on the pre-trained model](#3.-Run-inference-on-the-pre-trained-model). We start by retrieving the jumpstart artifacts for deploying an endpoint. However, instead of base_predictor, we deploy the `semseg_estimator` that we fine-tuned. # # --- # + inference_instance_type = "ml.p2.xlarge" # Retrieve the inference docker container uri deploy_image_uri = image_uris.retrieve( region=None, framework=None, # automatically inferred from model_id image_scope="inference", model_id=model_id, model_version=model_version, instance_type=inference_instance_type, ) # Retrieve the inference script uri. This includes scripts for model loading, inference handling etc. deploy_source_uri = script_uris.retrieve( model_id=model_id, model_version=model_version, script_scope="inference" ) endpoint_name = name_from_base(f"jumpstart-example-FT-{model_id}-") # Use the estimator from the previous step to deploy to a SageMaker endpoint finetuned_predictor = (hp_tuner if use_amt else semseg_estimator).deploy( initial_instance_count=1, instance_type=inference_instance_type, entry_point="inference.py", # entry point file in source_dir and present in deploy_source_uri image_uri=deploy_image_uri, source_dir=deploy_source_uri, endpoint_name=endpoint_name, ) # - # --- # Next, we download an example pedestrian image from the S3 bucket for inference. # # --- # + jumpstart_assets_bucket = f"jumpstart-cache-prod-{aws_region}" pedestrian_image_key = "training-datasets/PennFudanPed_SemSeg/images" pedestrian_image_file_name = "FudanPed00001.png" boto3.client("s3").download_file( jumpstart_assets_bucket, f"{pedestrian_image_key}/{pedestrian_image_file_name}", pedestrian_image_file_name, ) # - # --- # Next, we query the finetuned model, parse the response and display the predictions. Functions for these are implemented in sections [3.3. Query endpoint and parse response](#3.3.-Query-endpoint-and-parse-response) and [3.4. Display model predictions](#3.4.-Display-model-predictions) # # --- query_response = query(finetuned_predictor, pedestrian_img) predictions, labels, image_labels = parse_response(query_response) display_predictions(predictions) # --- # Next, we delete the endpoint corresponding to the finetuned model. # # --- # Delete the SageMaker endpoint finetuned_predictor.delete_model() finetuned_predictor.delete_endpoint()
introduction_to_amazon_algorithms/jumpstart_semantic_segmentation/Amazon_JumpStart_Semantic_Segmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Alle Excel - Dateien aus Ordner visualisieren import os import pandas as pd files = os.listdir("../data/ordner") excel_files = [file for file in files if file[-5:] == ".xlsx"] dfs = {} for file in excel_files: dfs[file] = pd.read_excel("../data/ordner/" + file) for filename, df in dfs.items(): print(filename + ": " + str(len(df))) # + filenames = [] number_of_lines = [] x = [] counter = 1 for filename, df in dfs.items(): x.append(counter) counter = counter + 1 filenames.append(filename) number_of_lines.append(len(df)) print(x) print(filenames) print(number_of_lines) # + # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns fig, ax = plt.subplots() fig.dpi = 100 ax.bar(x, number_of_lines, tick_label=filenames) plt.show() # -
UDEMY_Datavis_Python/11 - auf ordner zugreifen/Alle Excel - Dateien aus Ordner visualisieren (Musterloesung).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Statistics using SimpleITK # We can use [Simple](https://simpleitk.readthedocs.io/) for extracting features from label images. For convenience reasons we use the [napari-simpleitk-image-processing](https://github.com/haesleinhuepf/napari-simpleitk-image-processing) library. import numpy as np import pandas as pd from skimage.io import imread from pyclesperanto_prototype import imshow from napari_simpleitk_image_processing import watershed_otsu_labeling from napari_simpleitk_image_processing import label_statistics blobs = imread('../../data/blobs.tif') imshow(blobs) # ## Starting point: a segmented label image labels = watershed_otsu_labeling(blobs) imshow(labels, labels=True) # ## Label statistics # + statistics = label_statistics(blobs, labels, None, True, True, True, True, True, True) df = pd.DataFrame(statistics) df # - # These are all columns that are available: print(statistics.keys()) df.describe()
docs/22_feature_extraction/statistics_with_simpleitk.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.image as mpimg import matplotlib.pyplot as plt import cv2 import os import math from moviepy.editor import VideoFileClip from IPython.display import HTML from IPython.display import display from LaneFinder import * IMAGE_SIZE=(720, 1280) CHESSBOARD_IMAGE_DIR = "camera_cal" TEST_IMAGE_DIR = "test_images" INPUT_VIDEO_DIR= "input_videos" OUTPUT_IMAGE_DIR = "output_images" OUTPUT_VIDEO_DIR = "output_videos" ABSOLUTE_SOBEL_X = (7, 20, 100) ABSOLUTE_SOBEL_Y = (7, 20, 100) MAGNITUDE_SOBEL = (3, 30, 100) DIRECTION_SOBEL = (31, 0.5, 1.0) S_CHANNEL_THRESHOLD = (170, 255) WARP_SRC = np.float32([(532, 496), (756, 496), (288, 664), (1016, 664)]) WARP_DST = np.float32([(WARP_SRC[2][0], WARP_SRC[2][1] - 200), (WARP_SRC[3][0], WARP_SRC[3][1] - 200), (WARP_SRC[2][0], WARP_SRC[2][1]), (WARP_SRC[3][0], WARP_SRC[3][1])]) SLIDING_WINDOW_PARAMS = (9, 100, 50) METERS_PER_PIXEL_Y = 30 / 720 METERS_PER_PIXEL_X = 3.7 / 700 MAX_RECENT_XFITTED = 10 LANE_DETECTION_FAILURE_COUNT_BEFORE_SLIDING_WINDOW=20 REGION_OF_INTEREST_VERTS = np.array([[ (0, IMAGE_SIZE[0]), (IMAGE_SIZE[1] / 2, IMAGE_SIZE[0] / 2 + 45), (IMAGE_SIZE[1] / 2, IMAGE_SIZE[0] / 2 + 45), (IMAGE_SIZE[1], IMAGE_SIZE[0]) ]], dtype = np.int32) # initialize class instance containing advanced lane line detection methods LaneFinder = LaneFinder( image_size=IMAGE_SIZE, chessboard_image_dir=CHESSBOARD_IMAGE_DIR, absolute_sobel_x=ABSOLUTE_SOBEL_X, absolute_sobel_y=ABSOLUTE_SOBEL_Y, magnitude_sobel=MAGNITUDE_SOBEL, direction_sobel=DIRECTION_SOBEL, s_channel_thresh=S_CHANNEL_THRESHOLD, warp_perspective=(WARP_SRC, WARP_DST), sliding_window_params=SLIDING_WINDOW_PARAMS, meters_per_pixel=(METERS_PER_PIXEL_Y, METERS_PER_PIXEL_X), max_recent_xfitted=MAX_RECENT_XFITTED, lane_detection_failure_count_before_sliding_window=LANE_DETECTION_FAILURE_COUNT_BEFORE_SLIDING_WINDOW, region_of_interest_verts=REGION_OF_INTEREST_VERTS, ) # + def __main__(): print("Starting the Pipeline for the Advanced Lane Finding...") print("First, test the pipeline for the images...") images() print("Done with The Images...") print("Starting the Pipeline for the Videos...") Videos() print("Done with Videos...") def images(): test_img_paths = os.listdir(TEST_IMAGE_DIR) for fname in test_img_paths: # the following hack needed to start sliding window search for each new image LaneFinder._left_line.detected = False LaneFinder._right_line.detected = False img = cv2.imread(os.path.join(TEST_IMAGE_DIR, fname)) img_text = LaneFinder.pipeline(image=img) # create output directory for images, if does not exist if not os.path.isdir(OUTPUT_IMAGE_DIR): os.mkdir(OUTPUT_IMAGE_DIR) # save undistorted image cv2.imwrite(os.path.join(OUTPUT_IMAGE_DIR, fname[0:-4] + "_text.jpg"), img_text) def Videos(): test_video_paths = os.listdir(INPUT_VIDEO_DIR) # create output directory for vidoes, if does not exist if not os.path.isdir(OUTPUT_VIDEO_DIR): os.mkdir(OUTPUT_VIDEO_DIR) for fname in test_video_paths: # define paths to source and destination videos vid_src = os.path.join(INPUT_VIDEO_DIR, fname) vid_dst = os.path.join(OUTPUT_VIDEO_DIR, fname) print("Starting the "+ fname +" Video...") video = VideoFileClip(vid_src) video_clip = video.fl_image(LaneFinder.pipeline) # %time video_clip.write_videofile(vid_dst, audio = False) display(HTML( """ <video width="960" height="540" controls> <source src="{0}"> </video> """.format(vid_dst))) # - __main__() # + active="" #
Project 2 - Advanced Lane Finding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # -*- coding: utf-8 -*- """ Created on Mon Jan 21, 2019 File: Text Analysis In 1949, Dr. <NAME> published The Art of Readable Writing, in which he proposed a measure of text readability known as the FLesh Index. This index is based on the average number of syllables per word and the average number of words per sentence in a piece of text. Index scores usually range from 0 to 100, and they indicate readable prose for the folling grade levels: Flesch Index Grade Level of readability 0–30 College 50–60 High School 90–100 Fourth Grade In this case study, we develop a program that computes the Flesh Index for a text file. @author: Byen23 """ # Request """Write a program that computes the Flesch Index and grade level for text stored in a text file.""" # Analysis """The input to thsi program is the name of a text file. The outputs are teh number of sentences, words, and syllables in the file, as well as the files's Flesh Index and Grade evel Equivalent. During Analysis, we consult experts in domain to learn any information that might be relevant in solving the problem. For our problem, this information includes the definition of a sentence, word, and syllable. For the purpose of this program, these terms are defined at Table 4-7.""" """ Word Any sequence of non-whitespace characters. Sentence Any sequence of words ending in a period, question mark, exclamation point, colon, or semicolon. Syllable Any word of three characters or less; or any vowl(a, e, i, o, u) or pair of consecutive vowels, except for a final -es, -ed, or -e that is not -le. Note that the definition of word and sentence are approximations. SOme words, such as doubles or kettles, end in -es but will be counted as having one syllable, and an ellipsis(...) will be counted as three syllables. Flesch's formula to calculate the index F is the following: F = 206.835 - 1.015 x (words / sentences) - 84.6 x (syllables words) The Flesh-Kincaid Grade Level Formula is used to compute the Equivalent Grade Level G: G = 0.39 x (words/sentences)+ 11.8 x (syllables/words) - 15.59 """ # Design """This program will perform the following tasks: 1. Recieve the filename from the user, open the file for input, the text. 2. Count the sentences in the text. 3. Count the words in the text. 4. Count the syllables in the text. 5. Compute the Flesch Index. 6. Compute the Grade Level Equivalent. 7. Print these two values with the appropriate labels, as well as the counts from tast 2-4. The first and last task require no desig. Let's assume that the text is input as a single string from teh file and is then processed in tasks 2-4. These three tasks can be designed as coe segments that use the input string and produce an integer value. Task 5, computing the FLesch Index, uses the three integer results of tasks 2-4 to compute the FLesch Index. Lastly, task 6 is code segment that uses the same integers and computes the Grade Level Equivalent. The five tasks are listed in Tables 4-8, where text is a variable that refers the string read from the file. All the real work is done in the tasks that count the items: Add the number of characters in text that end the sentences. These characters were specified in analysis, and the string method count is used to count them in the algorithm. Split text into a list of words and determine the text length. Count the syllables in each word in text.""" """ -------------------------------------------------------------------- | Task | What it does | -------------------------------------------------------------------- -------------------------------------------------------------------- Count the sentences | Counts the number of sentences in text. --------------------------------------------------------------------- Count the words | Counts the number of words in text. --------------------------------------------------------------------- Count the syllables | Counts the number of syllables in text. --------------------------------------------------------------------- Compute the Flesch Index | Computes the Flesch Index for the given | numbers of sentences, words, and | syllables. --------------------------------------------------------------------- Compute the grade level | Computes the Grade Level Equivalent for | the given numbers of sentences, words, | and syllables. --------------------------------------------------------------------- """ # Table 4-8 - The tasks defined in the text analysis program. """The last task is the most complex. For each word in the text, we must count the syllables in that word. From analysis, we know that each distinct vowel counts as a syllable, unless it is in the ending -ed, -es, or -e (but not -le). For now, we ignore the possibility of consecutive vowels.""" # Implementation (Coding) """The main tasks are marked off in the program code with a blank line and a comment.""" """ Computes and displays the FLesch Index and the Grade Level Equivalent for the readability of a text file. """ # Take the inputs fileName = input("Enter the file name: ") inputFile = open(fileName, 'r') text = inputFile.read() # Count the sentences sentences = text.count('.') + text.count('?') + \ text.count(':') + text.count(';') + \ text.count('!') # Count the words words = len(text.split()) # Count the syllables syllables = 0 vowels = "aeiouAEIOU" for word in text.spit(): for vowel in vowels: syllables += word.count(vowel) for ending in ['es', 'ed', 'e']: if word.endswith(ending): syllables -= 1 if word.endswith('le'): syllables += 1 # Compute the FLesch Index and Grade Level index = 206.835 - 1.015 * (words / sentences) - \ 84.6 * (syllables / words) level = round(0.38 * (words / sentences) + 11.8 * \ (syllables / words) - 15.59) # Output the results print("The Flesch Index is", index) print("The Grade Level Equivalent is", level) print(sentences, "sentences") print(words, "words") print(syllables, "syllables")
22.Text-analysis.py.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: imbalanced # language: python # name: imbalanced # --- # # SVM-SMOTE # # Creates new samples by interpolation of samples of the support vectors from minority class and its closest neighbours. # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.datasets import make_blobs from sklearn import svm from imblearn.over_sampling import SVMSMOTE # - # ## Create data # # https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html # # We will create 2 classes, one majority one minority, clearly separated to facilitate the demonstration. # + # Configuration options blobs_random_seed = 42 centers = [(0, 0), (5, 5)] cluster_std = 1.5 num_features_for_samples = 2 num_samples_total = 1600 # Generate X X, y = make_blobs( n_samples=num_samples_total, centers=centers, n_features=num_features_for_samples, cluster_std=cluster_std) # transform arrays to pandas formats X = pd.DataFrame(X, columns=['VarA', 'VarB']) y = pd.Series(y) # create an imbalancced Xset # (make blobs creates same number of obs per class # we need to downsample manually) X = pd.concat([ X[y == 0], X[y == 1].sample(200, random_state=42) ], axis=0) y = y.loc[X.index] # display size X.shape, y.shape # + sns.scatterplot( data=X, x="VarA", y="VarB", hue=y, alpha=0.5 ) plt.title('Toy dataset') plt.show() # - # ## Find the support vectors # + # Initialize SVM classifier clf = svm.SVC(kernel='linear') # Fit data clf = clf.fit(X, y) # support vectors, indeces clf.support_ # - # number of support vectors len(clf.support_) # the support vectors clf.support_vectors_ # + # mark the support vectors sns.scatterplot( data=X, x="VarA", y="VarB", hue=y, alpha=0.5 ) plt.scatter(clf.support_vectors_[:,0], clf.support_vectors_[:, 1], s=100, linewidth=1, facecolors='none', edgecolors='k') # + # plot the decision boundaries sns.scatterplot( data=X, x="VarA", y="VarB", hue=y, alpha=0.5 ) ax = plt.gca() xlim = ax.get_xlim() ylim = ax.get_ylim() # add decision boundaries xx, yy = np.meshgrid(np.linspace(xlim[0], xlim[1], 50), np.linspace(ylim[0], ylim[1], 50)) # decision_function Z = np.dot(np.c_[xx.ravel(), yy.ravel()], clf.coef_[0]) + clf.intercept_[0] Z = Z.reshape(xx.shape) plt.contour(xx, yy, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) # + # now we plot the support vectors, whith lie at the and # within the decision boundaries sns.scatterplot( data=X, x="VarA", y="VarB", hue=y, alpha=0.5 ) plt.scatter(clf.support_vectors_[:,0], clf.support_vectors_[:, 1], s=100, linewidth=1, facecolors='none', edgecolors='k') ax = plt.gca() xlim = ax.get_xlim() ylim = ax.get_ylim() # add decision boundaries xx, yy = np.meshgrid(np.linspace(xlim[0], xlim[1], 50), np.linspace(ylim[0], ylim[1], 50)) # decision_function Z = np.dot(np.c_[xx.ravel(), yy.ravel()], clf.coef_[0]) + clf.intercept_[0] Z = Z.reshape(xx.shape) plt.contour(xx, yy, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) # - # ## SVM SMOTE # # [SVMSMOTE](https://imbalanced-learn.org/stable/references/generated/imblearn.over_sampling.SVMSMOTE.html) # + sm = SVMSMOTE( sampling_strategy='auto', # samples only the minority class random_state=0, # for reproducibility k_neighbors=5, # neighbours to create the synthetic examples m_neighbors=10, # neighbours to determine if minority class is in "danger" n_jobs=4, svm_estimator = svm.SVC(kernel='linear') ) # remember that the templates are those minority observations # within the danger zone # create the synthetic examples X_res, y_res = sm.fit_resample(X, y) # + # size of original data X.shape, y.shape # + # size of undersampled data X_res.shape, y_res.shape # + # number of minority class observations y.value_counts(), y_res.value_counts() # + # plot of original data sns.scatterplot( data=X, x="VarA", y="VarB", hue=y,alpha=0.5 ) plt.title('Original dataset') plt.show() # + # plot of original data sns.scatterplot( data=X_res, x="VarA", y="VarB", hue=y_res, alpha=0.5 ) plt.title('SVM SMOTE') plt.show() # + # plot of original data sns.scatterplot( data=X_res, x="VarA", y="VarB", hue=y_res, alpha=0.5 ) ax = plt.gca() xlim = ax.get_xlim() ylim = ax.get_ylim() # add decision boundaries xx, yy = np.meshgrid(np.linspace(xlim[0], xlim[1], 50), np.linspace(ylim[0], ylim[1], 50)) # decision_function Z = np.dot(np.c_[xx.ravel(), yy.ravel()], clf.coef_[0]) + clf.intercept_[0] Z = Z.reshape(xx.shape) plt.contour(xx, yy, Z, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) plt.title('SVM SMOTE with support vectors') plt.show() # - # We can see that most of the synthetic examples were created closer to the support vectors from the minority class, within the decision boundary from the SVM. # **HOMEWORK** # # - Test SVM SMOTE using the toy datasets that we created for section 4 and see how the distribution of the newly created data varies with the different separateness of the classes.
Section-05-Oversampling/05-08-SVM-SMOTE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PyTorch # language: python # name: torch # --- import torch import matplotlib.pyplot as plt import cv2 import pandas as pd # Load the trained model model = torch.load('./CFExp/weights.pt') # Set the model to evaluate mode model.eval() # Read the log file using pandas into a dataframe df = pd.read_csv('./CFExp/log.csv') # ### Training and testing loss, f1_score and auroc values for the model trained on the CrackForest dataset # Plot all the values with respect to the epochs df.plot(x='epoch',figsize=(15,8)); print(df[['Train_auroc','Test_auroc']].max()) # ### Sample Prediction ino = 2 # Read a sample image and mask from the data-set img = cv2.imread(f'./CrackForest/Images/{ino:03d}.jpg').transpose(2,0,1).reshape(1,3,320,480) mask = cv2.imread(f'./CrackForest/Masks/{ino:03d}_label.PNG') with torch.no_grad(): a = model(torch.from_numpy(img).type(torch.cuda.FloatTensor)/255) # Plot histogram of the prediction to find a suitable threshold. From the histogram a 0.1 looks like a good choice. plt.hist(a['out'].data.cpu().numpy().flatten()) # Plot the input image, ground truth and the predicted output plt.figure(figsize=(10,10)); plt.subplot(131); plt.imshow(img[0,...].transpose(1,2,0)); plt.title('Image') plt.axis('off'); plt.subplot(132); plt.imshow(mask); plt.title('Ground Truth') plt.axis('off'); plt.subplot(133); plt.imshow(a['out'].cpu().detach().numpy()[0][0]>0.2); plt.title('Segmentation Output') plt.axis('off'); plt.savefig('./SegmentationOutput.png',bbox_inches='tight')
.ipynb_checkpoints/Analysis-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import _pickle as pkl # train_df=pd.read_csv('./complaints_train_data_clean.csv',usecols=["category_name","complaint_title","complaint_description",],na_filter=False) from gensim.parsing.preprocessing import STOPWORDS def remove_stopwords(tokens): # input and outputs a list of words return [word for word in tokens if word not in STOPWORDS] train_df=pd.read_csv('./complaints_train_validation_data_clean.csv',usecols=["index","category_name","complaint_title","complaint_description",],na_filter=False) whitelist = set('abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ') sentences = (train_df['complaint_title']+" "+train_df['complaint_description']).tolist() sentences = [''.join(filter(whitelist.__contains__, x)).lower() for x in sentences] sentences = [x.split() for x in sentences] sentences = list(map(remove_stopwords, sentences)) train_df.insert(1,"complaint_text",sentences) val_df=pd.read_csv('./complaints_validation_data_clean.csv',usecols=["category_name","complaint_title","complaint_description",],na_filter=False) test_df=pd.read_csv('./complaints_test_data_clean.csv',usecols=["category_name","complaint_title","complaint_description",],na_filter=False) ## gold labels gold_df=pd.read_csv('./clean-gold-labels.tsv',sep='\t',usecols=["category_name","complaint_description",],na_filter=False) gold_df['complaint_title'] = gold_df['complaint_description'] sentences = (gold_df['complaint_description']).tolist() sentences = [''.join(filter(whitelist.__contains__, x)).lower() for x in sentences] sentences = [x.split() for x in sentences] sentences = list(map(remove_stopwords, sentences)) gold_df.insert(1,"complaint_text",sentences) ###### sentences = (test_df['complaint_title']+" "+test_df['complaint_description']).tolist() sentences = [''.join(filter(whitelist.__contains__, x)).lower() for x in sentences] sentences = [x.split() for x in sentences] sentences = list(map(remove_stopwords, sentences)) test_df.insert(1,"complaint_text",sentences) gold_df.head() # + colsize = len(train_df['category_name']) train_df['category_name'] = train_df["category_name"].astype('category') #train_df['true_label'] = pd.Series(np.zeros(colsize), index=train_df.index) train_df['predicted_label'] = pd.Series(np.zeros(colsize), index=train_df.index) # train_df['true_label'] = train_df['category_name'].cat.codes # for i in range(colsize): # if(train_df['true_label'][i]==5): # print(train_df['true_label'][i],train_df['category_name'][i]) # - train_df.groupby("category_name").agg({"complaint_title": np.count_nonzero}) for i,x in enumerate(train_df.groupby("category_name").agg({"complaint_title": np.count_nonzero}).index._data): print(i,x) # + class_names = train_df.groupby("category_name").agg({"complaint_title": np.count_nonzero}).index.tolist() pkl.dump(class_names,open("class_names.p","wb")) noOfClasses = len(train_df.groupby("category_name").agg({"complaint_title": np.count_nonzero}).index) print(class_names) # - train_df.iloc[3]['complaint_title']+" "+train_df.iloc[3]['complaint_description'] keywords = {'Air Pollution':['dust|smoke|burn'], 'Autorickshaws and Taxis':['taxi','auto','autorickshaw'], 'BMTC - Driver or Conductor':['rude','behaviour'], 'BMTC - Need new Bus Route':['frequency'], 'BMTC - Others':[], 'Bad Roads':['road'], 'Broken Storm Water Drains':['overflow','drainage'], 'Cattle':['cows','buffaloes','goats','cow'], 'Clearing of Blockage of Under Ground Drainage Pipelines and Replacement of Damaged or Missing Manhole Cover':[], 'Desilting - Lakes':['lake'], 'Diseases':['malaria','dengue','cholera','fever','disease','hospital','epidemic'], 'Electricity':['power','current','power cut'], 'Flooding of Roads and Footpaths':['water','flood','floods'], 'Footpaths':['footpath'], 'Garbage':['waste','plastic','dirt'], 'Government Land Encroachment':['occupy','illegal'], 'Hawkers and Vendors':[], 'Hoardings':['advertise'], 'Illegal posters and Hoardings':['banner', 'ads ','advertise'], 'Lakes - Others':['lake'], 'Maintenance of Roads and Footpaths - Others':[], 'Manholes':['manhole','man hole'], 'Mosquitos':['mosquito','mosquitoe','mosquitoes','dengue','malaria'], 'Need New Streetlights':['streetlight','light','new streetlight'], 'Need New Toilets ':['toilet','urinal','urinate'], 'New Bus Shelters':['shelter'], 'No Sewage Drains':['drainage'], 'Noise Pollution':['siren','speakers','speakers','loud'], 'Others':[], 'Overflow of Storm Water Drains':['pipes'], 'Parking Violations':['parked','parker'], 'Parks and playgrounds':['park','play','playground'], 'Potholes':['holes','pothole'], 'Public Nuisance':[], 'Repair of streetlights':['streetlight','light','broken','damaged'], 'Sewage and Storm Water Drains - Others':['drainage'], 'Stray Dogs':['dog'], 'Traffic':['vehicles'], 'Trees, Parks and Playgrounds - Others':['tree'], 'Unauthorized Construction':['encroach','building','built'], 'Water Leakage':[], 'Water Supply ':[]} regexPatterns = {'Air Pollution':['air.*pollution|pollution|dust'], 'Autorickshaws and Taxis':['autorickshaws|taxis|taxi|auto|autorickshaw'], 'BMTC - Driver or Conductor':['bmtc.*driver|bmtc.*conductor|bus.*driver|bus.*conductor'], 'BMTC - Need new Bus Route':['bus.*route'], 'BMTC - Others':['bmtc'], 'Bad Roads':['bad.*road|road.*bad'], 'Broken Storm Water Drains':['(broken|damage).*(drain)'], 'Cattle':['(cattle|cows|buffaloes|goats)'], 'Clearing of Blockage of Under Ground Drainage Pipelines and Replacement of Damaged or Missing Manhole Cover':['clearing|blockage|under|ground|drainage|pipelines|replacement|damaged|missing|manhole|cover'], 'Desilting - Lakes':['lake'], 'Diseases':['diseases|malaria|dengue|cholera'], 'Electricity':['electricity|power|current|power.*cut'], 'Flooding of Roads and Footpaths':['((water|flood|flow).*(roads|footpaths))|((roads|footpaths).*(water|flood|flow))'], 'Footpaths':['footpath'], 'Garbage':['garbage|waste|plastic|dirt'], 'Government Land Encroachment':['(government.*land).*(encroach|occupy|illegal)'], 'Hawkers and Vendors':['(hawkers|vendors)'], 'Hoardings':['(hoardings|advertisements)'], 'Illegal posters and Hoardings':['posters|hoardings|banner|ads|advertise'], 'Lakes - Others':['lake'], 'Maintenance of Roads and Footpaths - Others':['(maintenance).*(roads|footpaths)'], 'Manholes':['(manholes|manhole|man hole)'], 'Mosquitos':['mosquito|mosquitoe|mosquitoes|dengue|malaria'], 'Need New Streetlights':['(need|no|new).*(streetlight|light)'], 'Need New Toilets ':['toilets|toilet|urinal|urinate'], 'New Bus Shelters':['bus.*shelter|shelter.*bus'], 'No Sewage Drains':['drain'], 'Noise Pollution':['noise|noise.*pollution|siren|speakers|speakers|loud'], 'Others':['others'], 'Overflow of Storm Water Drains':['overflow.*(drains|pipes)'], 'Parking Violations':['parking|parked|parker'], 'Parks and playgrounds':['(parks|playgrounds|park|play|playground)'], 'Potholes':['(pot hole|holes|pothole)'], 'Public Nuisance':['(public.*nuisance|nuisance)'], 'Repair of streetlights':['((light).*(repair|broke|damage))|((repair|broke|damage).*(light))'], 'Sewage and Storm Water Drains - Others':['(sewage|storm|water|drains|drainage)'], 'Stray Dogs':['(stray|dogs|dog)'], 'Traffic':['(traffic|vehicles)'], 'Trees, Parks and Playgrounds - Others':['(trees|parks|playgrounds|tree)'], 'Unauthorized Construction':['encroach','building','built'], 'Water Leakage':['water.*leak|leak.*water'], 'Water Supply ':['water.*supply|supply.*water']} # + extracts_df=pd.read_csv('./p.tsv',sep='\t',usecols=["category_name","Entity","complaint words",],na_filter=False) extracts_df = extracts_df[extracts_df['category_name'].isin(class_names)] # extracts_df def combine(x): x = x.tolist() x = set(x) x = '|'.join(list(x)).lower() return x extracts_df = extracts_df.groupby("category_name").agg({"Entity": combine,"complaint words":combine }) extracts_df.to_csv("extracts.csv") extracts_df # - import re class_words = [ re.sub('-','',x).lower().split() + keywords[x] for x in class_names ] print(class_words,len(class_words)) for v in (train_df.loc[train_df['category_name'] =='Air Pollution']).iterrows(): print(v[0]) # print(v[1]['category_name']) print(v[1]['complaint_title']) print(v[1]['complaint_description']) # + ## training word2vec # import gensim.matutils as gm # from gensim.models.keyedvectors import KeyedVectors # import gensim, logging # logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) # model = gensim.models.Word2Vec(sentences, size=200, window=5, min_count=5) # model.save("../word2vec_icmcData") #to continue training # model = Word2Vec.load("../word2vec_icmcData") # + # print(len(model.wv.vocab)) # print([x for x in model.wv.vocab.keys()]) # model.accuracy('../questions-words.txt') # + import gensim.matutils as gm import gensim from gensim.models.keyedvectors import KeyedVectors # Load pretrained model (since intermediate data is not included, the model cannot be refined with additional data) # model = KeyedVectors.load_word2vec_format('../glove_w2v.txt', binary=False) # C binary format model = gensim.models.Word2Vec.load("./J/word2vec_bangalore_description_train_min_count_5_clean.model") model = model.wv # model = KeyedVectors.load_word2vec_format('../word2vec_icmcData.txt', binary=False) # C binary format wordvec_unavailable= set() def write_to_file(wordvec_unavailable): with open("wordvec_unavailable.txt","w") as f: for word in wordvec_unavailable: f.write(word+"\n") def get_word_vectors(btw_words): # returns vector of embeddings of words word_vectors= [] for word in btw_words: try: word_v = np.array(model[word]) word_v = word_v.reshape(len(word_v),1) #print(word_v.shape) word_vectors.append(model[word]) except: wordvec_unavailable.add(word) return word_vectors def get_similarity(word_vectors,target_word): # sent(list of word vecs) to word similarity similarity = 0 target_word_vector = 0 try: target_word_vector = model[target_word] except: wordvec_unavailable.add(target_word+" t") return similarity target_word_sparse = gm.any2sparse(target_word_vector,eps=1e-09) for wv in word_vectors: wv_sparse = gm.any2sparse(wv, eps=1e-09) similarity = max(similarity,gm.cossim(wv_sparse,target_word_sparse)) return similarity # + import os import re stopwords_pattern = ' of| and| no| others| or| -|,|no ' def ltp(x): return '(' + '|'.join(x) + ')' def create_LF_Based_On_Category_Name(debug=False): if os.path.exists("Category_Name_LFs.py"): os.remove("Category_Name_LFs.py") f = open("Category_Name_LFs.py","a+") for i in range(len(class_names)): functionName = re.sub(r'( )+|-|,','',class_names[i]) pattern = re.sub(stopwords_pattern , '', class_names[i].lower().strip()) pattern= re.sub("( )+",",",pattern) pattern= re.sub(" $","",pattern) words = pattern.split(',') wordsStr = '['+','.join(['"'+x+'"' for x in words])+']' pattern = ltp(words) if(debug): print(pattern) f.write("\n") f.write(r'''def LF_Category_Name_'''+functionName+'''(c): words = '''+wordsStr+''' if(len(set(c['complaint_text']).intersection(words))>0): return ('''+str(i)+''',1) return (-1,0)''') f.write("\n") f.close() def create_LF_Based_On_Keywords(debug=False): if os.path.exists("KeyWord_Based_LFs.py"): os.remove("KeyWord_Based_LFs.py") f = open("KeyWord_Based_LFs.py","a+") for i in range(len(class_names)): functionName = re.sub(r'( )+|-|,','',class_names[i]) pattern = re.sub(stopwords_pattern , '', class_names[i].lower().strip()) pattern= re.sub("( )+",",",pattern) words = pattern.split(',') ##### add keywords ##### words = words+ keywords[class_names[i]] #### wordsStr = '['+','.join(['"'+x+'"' for x in words])+']' pattern = ltp(words) if(debug): print(pattern) f.write("\n") f.write(r'''def LF_KeyWord_'''+functionName+'''(c): words = '''+wordsStr+''' if(len(set(c['complaint_text']).intersection(words))>0): return ('''+str(i)+''',1) return (-1,0)''') f.write("\n") f.close() def create_LF_Extracts_Phrases_Regex(debug=False): if os.path.exists("Regex_Based_Extracts_Phrases_LFs.py"): os.remove("Regex_Based_Extracts_Phrases_LFs.py") f = open("Regex_Based_Extracts_Phrases_LFs.py","a+") for i in range(len(class_names)): if(class_names[i] in extracts_df.index.tolist()): functionName = re.sub(r'( )+|-|,','',class_names[i]) pattern = re.sub(stopwords_pattern , '', class_names[i].lower().strip()) pattern= re.sub("( )+",",",pattern) words = pattern.split(',') ##### add keywords ##### words = words+ keywords[class_names[i]] #### wordsStr = '['+','.join(['"'+x+'"' for x in words])+']' if(debug): print(pattern) f.write("\n") f.write(r'''def LF_Extract_Phrase_Regex_'''+functionName+'''(c): pattern = \''''+extracts_df.loc[class_names[i]]['complaint words']+'''\' if(re.search(pattern,c['complaint_description'],flags=re.I)): return ('''+str(i)+''',1) return (-1,0)''') f.write("\n") f.close() def create_LF_Description_Regex(debug=False): if os.path.exists("Regex_Based_Description_LFs.py"): os.remove("Regex_Based_Description_LFs.py") f = open("Regex_Based_Description_LFs.py","a+") for i in range(len(class_names)): functionName = re.sub(r'( )+|-|,','',class_names[i]) pattern = re.sub(stopwords_pattern , '', class_names[i].lower().strip()) pattern= re.sub("( )+",",",pattern) words = pattern.split(',') ##### add keywords ##### words = words+ keywords[class_names[i]] #### wordsStr = '['+','.join(['"'+x+'"' for x in words])+']' if(debug): print(pattern) f.write("\n") f.write(r'''def LF_Desc_Regex_'''+functionName+'''(c): words = '''+wordsStr+''' pattern = \''''+''.join(regexPatterns[class_names[i]])+'''\' if(re.search(pattern,c['complaint_description'],flags=re.I)): return ('''+str(i)+''',1) return (-1,0)''') f.write("\n") f.close() def create_LF_Title_Regex(debug=False): if os.path.exists("Regex_Based_Title_LFs.py"): os.remove("Regex_Based_Title_LFs.py") f = open("Regex_Based_Title_LFs.py","a+") for i in range(len(class_names)): functionName = re.sub(r'( )+|-|,','',class_names[i]) pattern = re.sub(stopwords_pattern , '', class_names[i].lower().strip()) pattern= re.sub("( )+",",",pattern) words = pattern.split(',') ##### add keywords ##### words = words+ keywords[class_names[i]] #### wordsStr = '['+','.join(['"'+x+'"' for x in words])+']' pattern = ltp(words) if(debug): print(pattern) f.write("\n") f.write(r'''def LF_Title_Regex_'''+functionName+'''(c): words = '''+wordsStr+''' pattern = \''''+ ''.join(regexPatterns[class_names[i]]) +'''\' if(re.search(pattern,c['complaint_title'],flags=re.I)): return ('''+str(i)+''',1) return (-1,0)''') f.write("\n") f.close() def create_LF_Based_On_Embeddings(debug=False): if os.path.exists("Embeddings_Based_LFs.py"): os.remove("Embeddings_Based_LFs.py") f = open("Embeddings_Based_LFs.py","a+") for i in range(len(class_names)): functionName = re.sub(r'( )+|-|,','',class_names[i]) pattern = re.sub(stopwords_pattern, '', class_names[i].lower().strip()) pattern= re.sub("( )+",",",pattern) words = pattern.split(',') words = words+ keywords[class_names[i]] #### wordsStr = '['+','.join(['"'+x+'"' for x in words])+']' pattern = ltp(words) if(debug): print(pattern) f.write("\n") f.write(r'''def LF_Embedding_'''+functionName+'''(c): words = '''+wordsStr+''' sc = 0 word_vectors = get_word_vectors(c['complaint_text']) for ow in words: sc=max(sc,get_similarity(word_vectors,ow)) return ('''+str(i)+''',sc)''') f.write("\n") f.close() def create_LF_Based_On_Embeddings_Title(debug=False): if os.path.exists("Title_Embeddings_Based_LFs.py"): os.remove("Title_Embeddings_Based_LFs.py") f = open("Title_Embeddings_Based_LFs.py","a+") for i in range(len(class_names)): functionName = re.sub(r'( )+|-|,','',class_names[i]) pattern = re.sub(stopwords_pattern, '', class_names[i].lower().strip()) pattern= re.sub("( )+",",",pattern) words = pattern.split(',') words = words+ keywords[class_names[i]] #### wordsStr = '['+','.join(['"'+x+'"' for x in words])+']' pattern = ltp(words) if(debug): print(pattern) f.write("\n") f.write(r'''def LF_Title_Embedding_'''+functionName+'''(c): words = '''+wordsStr+''' sc = 0 word_vectors = get_word_vectors(c['complaint_title'].lower().split()) for ow in words: sc=max(sc,get_similarity(word_vectors,ow)) return ('''+str(i)+''',sc)''') f.write("\n") f.close() def create_LF_Based_On_Embeddings_Description(debug=False): if os.path.exists("Description_Embeddings_Based_LFs.py"): os.remove("Description_Embeddings_Based_LFs.py") f = open("Description_Embeddings_Based_LFs.py","a+") for i in range(len(class_names)): functionName = re.sub(r'( )+|-|,','',class_names[i]) pattern = re.sub(stopwords_pattern, '', class_names[i].lower().strip()) pattern= re.sub("( )+",",",pattern) words = pattern.split(',') words = words+ keywords[class_names[i]] #### wordsStr = '['+','.join(['"'+x+'"' for x in words])+']' pattern = ltp(words) if(debug): print(pattern) f.write("\n") f.write(r'''def LF_Description_Embedding_'''+functionName+'''(c): words = '''+wordsStr+''' sc = 0 word_vectors = get_word_vectors(c['complaint_description'].lower().split()) for ow in words: sc=max(sc,get_similarity(word_vectors,ow)) return ('''+str(i)+''',sc)''') f.write("\n") f.close() def create_LF_Based_On_TFIDF(debug=False): if os.path.exists("TFIDF_Based_LFs.py"): os.remove("TFIDF_Based_LFs.py") f = open("TFIDF_Based_LFs.py","a+") for i in range(len(class_names)): functionName = re.sub(r'( )+|-|,','',class_names[i]) f.write("\n") f.write(r'''def LF_TFIDF_{}(c): sc = cosine_similarity(class_words_tfidf['{}'],tfidf_matrix[c.name+{}]) return ({},sc)'''.format(functionName,class_names[i],len(class_names),i)) f.write("\n") f.close() # + LF_Names = [] LF_output_map = dict() LF_l = [] create_LF_Title_Regex() create_LF_Description_Regex() # create_LF_Based_On_Keywords() # create_LF_Extracts_Phrases_Regex() # create_LF_Based_On_Category_Name() # create_LF_Based_On_Embeddings() # create_LF_Based_On_TFIDF() # create_LF_Based_On_Embeddings_Title() # create_LF_Based_On_Embeddings_Description() # for i in range(len(class_names)): # functionName = re.sub(r'( )+|-|,','',class_names[i]) # LF_Names.append('LF_Category_Name_'+functionName) # LF_output_map['LF_Category_Name_'+functionName]=i # LF_l.append(i) # for i in range(len(class_names)): # functionName = re.sub(r'( )+|-|,','',class_names[i]) # LF_Names.append('LF_Embedding_'+functionName) # LF_output_map['LF_Embedding_'+functionName]=i # for i in range(len(class_names)): # functionName = re.sub(r'( )+|-|,','',class_names[i]) # LF_Names.append('LF_TFIDF_'+functionName) # LF_output_map['LF_TFIDF_'+functionName]=i for i in range(len(class_names)): functionName = re.sub(r'( )+|-|,','',class_names[i]) LF_Names.append('LF_KeyWord_'+functionName) LF_output_map['LF_KeyWord_'+functionName]=i LF_l.append(i) for i in range(len(class_names)): functionName = re.sub(r'( )+|-|,','',class_names[i]) LF_Names.append('LF_Title_Regex_'+functionName) LF_output_map['LF_Title_Regex_'+functionName]=i LF_l.append(i) for i in range(len(class_names)): functionName = re.sub(r'( )+|-|,','',class_names[i]) LF_Names.append('LF_Desc_Regex_'+functionName) LF_output_map['LF_Desc_Regex_'+functionName]=i LF_l.append(i) for i in range(len(class_names)): if(class_names[i] in extracts_df.index.tolist()): functionName = re.sub(r'( )+|-|,','',class_names[i]) LF_Names.append('LF_Extract_Phrase_Regex_'+functionName) LF_output_map['LF_Extract_Phrase_Regex_'+functionName]=i LF_l.append(i) # for i in range(len(class_names)): # functionName = re.sub(r'( )+|-|,','',class_names[i]) # LF_Names.append('LF_Title_Embedding_'+functionName) # LF_output_map['LF_Title_Embedding_'+functionName]=i # LF_l.append(i) # for i in range(len(class_names)): # functionName = re.sub(r'( )+|-|,','',class_names[i]) # LF_Names.append('LF_Description_Embedding_'+functionName) # LF_output_map['LF_Description_Embedding_'+functionName]=i # LF_l.append(i) print('['+','.join(LF_Names)+']') # - # %load Regex_Based_Title_LFs.py # %load Regex_Based_Description_LFs.py # + LFs = [LF_Title_Regex_AirPollution,LF_Title_Regex_AutorickshawsandTaxis,LF_Title_Regex_BMTCDriverorConductor,LF_Title_Regex_BMTCNeednewBusRoute,LF_Title_Regex_BMTCOthers,LF_Title_Regex_BadRoads,LF_Title_Regex_BrokenStormWaterDrains,LF_Title_Regex_Cattle,LF_Title_Regex_ClearingofBlockageofUnderGroundDrainagePipelinesandReplacementofDamagedorMissingManholeCover,LF_Title_Regex_DesiltingLakes,LF_Title_Regex_Diseases,LF_Title_Regex_Electricity,LF_Title_Regex_FloodingofRoadsandFootpaths,LF_Title_Regex_Footpaths,LF_Title_Regex_Garbage,LF_Title_Regex_GovernmentLandEncroachment,LF_Title_Regex_HawkersandVendors,LF_Title_Regex_Hoardings,LF_Title_Regex_IllegalpostersandHoardings,LF_Title_Regex_LakesOthers,LF_Title_Regex_MaintenanceofRoadsandFootpathsOthers,LF_Title_Regex_Manholes,LF_Title_Regex_Mosquitos,LF_Title_Regex_NeedNewStreetlights,LF_Title_Regex_NeedNewToilets,LF_Title_Regex_NewBusShelters,LF_Title_Regex_NoSewageDrains,LF_Title_Regex_NoisePollution,LF_Title_Regex_Others,LF_Title_Regex_OverflowofStormWaterDrains,LF_Title_Regex_ParkingViolations,LF_Title_Regex_Parksandplaygrounds,LF_Title_Regex_Potholes,LF_Title_Regex_PublicNuisance,LF_Title_Regex_Repairofstreetlights,LF_Title_Regex_SewageandStormWaterDrainsOthers,LF_Title_Regex_StrayDogs,LF_Title_Regex_Traffic,LF_Title_Regex_TreesParksandPlaygroundsOthers,LF_Title_Regex_UnauthorizedConstruction,LF_Title_Regex_WaterLeakage,LF_Title_Regex_WaterSupply,LF_Desc_Regex_AirPollution,LF_Desc_Regex_AutorickshawsandTaxis,LF_Desc_Regex_BMTCDriverorConductor,LF_Desc_Regex_BMTCNeednewBusRoute,LF_Desc_Regex_BMTCOthers,LF_Desc_Regex_BadRoads,LF_Desc_Regex_BrokenStormWaterDrains,LF_Desc_Regex_Cattle,LF_Desc_Regex_ClearingofBlockageofUnderGroundDrainagePipelinesandReplacementofDamagedorMissingManholeCover,LF_Desc_Regex_DesiltingLakes,LF_Desc_Regex_Diseases,LF_Desc_Regex_Electricity,LF_Desc_Regex_FloodingofRoadsandFootpaths,LF_Desc_Regex_Footpaths,LF_Desc_Regex_Garbage,LF_Desc_Regex_GovernmentLandEncroachment,LF_Desc_Regex_HawkersandVendors,LF_Desc_Regex_Hoardings,LF_Desc_Regex_IllegalpostersandHoardings,LF_Desc_Regex_LakesOthers,LF_Desc_Regex_MaintenanceofRoadsandFootpathsOthers,LF_Desc_Regex_Manholes,LF_Desc_Regex_Mosquitos,LF_Desc_Regex_NeedNewStreetlights,LF_Desc_Regex_NeedNewToilets,LF_Desc_Regex_NewBusShelters,LF_Desc_Regex_NoSewageDrains,LF_Desc_Regex_NoisePollution,LF_Desc_Regex_Others,LF_Desc_Regex_OverflowofStormWaterDrains,LF_Desc_Regex_ParkingViolations,LF_Desc_Regex_Parksandplaygrounds,LF_Desc_Regex_Potholes,LF_Desc_Regex_PublicNuisance,LF_Desc_Regex_Repairofstreetlights,LF_Desc_Regex_SewageandStormWaterDrainsOthers,LF_Desc_Regex_StrayDogs,LF_Desc_Regex_Traffic,LF_Desc_Regex_TreesParksandPlaygroundsOthers,LF_Desc_Regex_UnauthorizedConstruction,LF_Desc_Regex_WaterLeakage,LF_Desc_Regex_WaterSupply] # + # pkl.dump(LF_Names,open("LF_Names_245.p","wb")) # pkl.dump(LF_output_map,open("LF_output_map_245.p","wb")) # pkl.dump(LF_l,open("LF_l_245.p","wb")) pkl.dump(LF_Names,open("LF_Names_161.p","wb")) pkl.dump(LF_output_map,open("LF_output_map_161.p","wb")) pkl.dump(LF_l,open("LF_l_161.p","wb")) print(len(LF_Names),len(LF_output_map),len(LF_l)) # + def get_L_S_Tensor(df,msg): L_S = [] print('labelling ',msg,' data') for i in range(len(df.index)): L_S_ci=[] L=[] S=[] P_ik = [] for LF in LFs: # print(i,LF.__name__) l,s = LF(df.iloc[i]) L.append(l) S.append((s+1)/2) #to scale scores in [0,1] L_S_ci.append(L) L_S_ci.append(S) L_S.append(L_S_ci) if(i%500==0 and i!=0): print(str(i)+'data points labelled in',(time.time() - start_time)/60,'mins') return L_S import time import datetime start_time = time.time() lt = time.localtime() print("started at: {}-{}-{}, {}:{}:{}".format(lt.tm_mday,lt.tm_mon,lt.tm_year,lt.tm_hour,lt.tm_min,lt.tm_sec)) # val_L_S = get_L_S_Tensor(val_df) # pkl.dump(val_L_S,open("val_L_S.p","wb")) # test_L_S = get_L_S_Tensor(test_df,'test') # pkl.dump(test_L_S,open("test_L_S_TFIDF.p","wb")) # train_L_S = get_L_S_Tensor(train_df,'train') # pkl.dump(train_L_S,open("train_L_S_TFIDF.p","wb")) # train_L_S = get_L_S_Tensor(train_df,'keywords train') # pkl.dump(train_L_S,open("train_L_S_Keywords.p","wb")) # test_L_S = get_L_S_Tensor(test_df,'keywords test') # pkl.dump(test_L_S,open("test_L_S_Keywords_regex_extracts.p","wb")) # train_L_S = get_L_S_Tensor(train_df,'keywords train') # pkl.dump(train_L_S,open("train_L_S_Keywords_regex_extracts.p","wb")) test_L_S = get_L_S_Tensor(test_df,'regex test') pkl.dump(test_L_S,open("test_L_S_regex.p","wb")) train_L_S = get_L_S_Tensor(train_df,'regex train') pkl.dump(train_L_S,open("train_L_S_regex.p","wb")) # test_L_S = get_L_S_Tensor(test_df,'embeddings test') # pkl.dump(test_L_S,open("test_L_S_Embeddings.p","wb")) # test_L_S = get_L_S_Tensor(test_df,'T and D embeddings test') # pkl.dump(test_L_S,open("test_L_S_T-D_Embeddings.p","wb")) # train_L_S = get_L_S_Tensor(train_df,'embeddings train') # pkl.dump(train_L_S,open("train_L_S_Embeddings.p","wb")) # train_L_S = get_L_S_Tensor(train_df,'T and D embeddings train') # pkl.dump(train_L_S,open("train_L_S_T-D_Embeddings.p","wb")) # gold_L_S = get_L_S_Tensor(gold_df,'gold-labels-clean') # pkl.dump(gold_L_S,open("gold-labels-clean.p","wb")) print(str(datetime.timedelta(seconds=time.time() - start_time))) # - # import sys # # !{sys.executable} -m pip install tensorflow # !type python # + pkl.dump(val_L_S,open("val_L_S.p","wb"),protocol=2) pkl.dump(train_L_S,open("train_L_S.p","wb"),protocol=2) pkl.dump(test_L_S,open("test_L_S.p","wb"),protocol=2) # + import numpy as np from scipy.sparse.csr import csr_matrix #need this if you want to save tfidf_matrix from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity def process(s): return ''.join([i for i in s if not i.isdigit() and i not in '_' ]).lower() vectorizer = TfidfVectorizer(preprocessor=process) tfidf_matrix = vectorizer.fit_transform(class_words+train_df['complaint_description'].tolist()) # + print(len(vectorizer.vocabulary_)) idf = vectorizer.idf_ d = dict(zip(vectorizer.get_feature_names(), idf)) from collections import OrderedDict d = OrderedDict(sorted(d.items(),key=lambda kv: kv[1], reverse=True)) # for x in d.items(): # print(x) for c in class_names: for w in re.sub('-|,','',c.lower()).split(): if w in d.keys(): print(w,d[w]) else: print(w,'not in vocabulary') # + # feature_names = vectorizer.get_feature_names() # print(len(feature_names)) # print((feature_names)) class_words_tfidf= dict() for i,cat in enumerate(class_names): class_words_tfidf[cat] = tfidf_matrix.getrow(i) # + def LF_KeyWord_AirPollution(c): words = ["air","pollution"] if(len(set(c['complaint_text']).intersection(words))>0): return (0,1) return (-1,0) def LF_KeyWord_AutorickshawsandTaxis(c): words = ["autorickshaws","taxis","taxi","auto","autorickshaw"] if(len(set(c['complaint_text']).intersection(words))>0): return (1,1) return (-1,0) def LF_KeyWord_BMTCDriverorConductor(c): words = ["bmtc","driver","conductor"] if(len(set(c['complaint_text']).intersection(words))>0): return (2,1) return (-1,0) def LF_KeyWord_BMTCNeednewBusRoute(c): words = ["bmtc","need","new","bus","route"] if(len(set(c['complaint_text']).intersection(words))>0): return (3,1) return (-1,0) def LF_KeyWord_BMTCOthers(c): words = ["bmtc"] if(len(set(c['complaint_text']).intersection(words))>0): return (4,1) return (-1,0) def LF_KeyWord_BadRoads(c): words = ["bad","roads","road"] if(len(set(c['complaint_text']).intersection(words))>0): return (5,1) return (-1,0) def LF_KeyWord_BrokenStormWaterDrains(c): words = ["broken","storm","water","drains","overflow","drainage"] if(len(set(c['complaint_text']).intersection(words))>0): return (6,1) return (-1,0) def LF_KeyWord_Cattle(c): words = ["cattle","cows","buffaloes","goats"] if(len(set(c['complaint_text']).intersection(words))>0): return (7,1) return (-1,0) def LF_KeyWord_ClearingofBlockageofUnderGroundDrainagePipelinesandReplacementofDamagedorMissingManholeCover(c): words = ["clearing","blockage","under","ground","drainage","pipelines","replacement","damaged","missing","manhole","cover"] if(len(set(c['complaint_text']).intersection(words))>0): return (8,1) return (-1,0) def LF_KeyWord_DesiltingLakes(c): words = ["desilting","lakes"] if(len(set(c['complaint_text']).intersection(words))>0): return (9,1) return (-1,0) def LF_KeyWord_Diseases(c): words = ["diseases","malaria","dengue","cholera"] if(len(set(c['complaint_text']).intersection(words))>0): return (10,1) return (-1,0) def LF_KeyWord_Electricity(c): words = ["electricity","power","current","power cut"] if(len(set(c['complaint_text']).intersection(words))>0): return (11,1) return (-1,0) def LF_KeyWord_FloodingofRoadsandFootpaths(c): words = ["flooding","roads","footpaths","water","flood","floods"] if(len(set(c['complaint_text']).intersection(words))>0): return (12,1) return (-1,0) def LF_KeyWord_Footpaths(c): words = ["footpaths","footpath"] if(len(set(c['complaint_text']).intersection(words))>0): return (13,1) return (-1,0) def LF_KeyWord_Garbage(c): words = ["garbage","waste","plastic","dirt"] if(len(set(c['complaint_text']).intersection(words))>0): return (14,1) return (-1,0) def LF_KeyWord_GovernmentLandEncroachment(c): words = ["government","land","encroachment","occupy","illegal"] if(len(set(c['complaint_text']).intersection(words))>0): return (15,1) return (-1,0) def LF_KeyWord_HawkersandVendors(c): words = ["hawkers","vendors"] if(len(set(c['complaint_text']).intersection(words))>0): return (16,1) return (-1,0) def LF_KeyWord_Hoardings(c): words = ["hoardings","advertisements"] if(len(set(c['complaint_text']).intersection(words))>0): return (17,1) return (-1,0) def LF_KeyWord_IllegalpostersandHoardings(c): words = ["illegal","posters","hoardings"] if(len(set(c['complaint_text']).intersection(words))>0): return (18,1) return (-1,0) def LF_KeyWord_LakesOthers(c): words = ["lakes","lake"] if(len(set(c['complaint_text']).intersection(words))>0): return (19,1) return (-1,0) def LF_KeyWord_MaintenanceofRoadsandFootpathsOthers(c): words = ["maintenance","roads","footpaths"] if(len(set(c['complaint_text']).intersection(words))>0): return (20,1) return (-1,0) def LF_KeyWord_Manholes(c): words = ["manholes","manhole","man hole"] if(len(set(c['complaint_text']).intersection(words))>0): return (21,1) return (-1,0) def LF_KeyWord_Mosquitos(c): words = ["mosquitos","mosquitoes"] if(len(set(c['complaint_text']).intersection(words))>0): return (22,1) return (-1,0) def LF_KeyWord_NeedNewStreetlights(c): words = ["need","new","streetlights","streetlight","light","new streetlight"] if(len(set(c['complaint_text']).intersection(words))>0): return (23,1) return (-1,0) def LF_KeyWord_NeedNewToilets(c): words = ["need","new","toilets","toilet"] if(len(set(c['complaint_text']).intersection(words))>0): return (24,1) return (-1,0) def LF_KeyWord_NewBusShelters(c): words = ["new","bus","shelters","shelter"] if(len(set(c['complaint_text']).intersection(words))>0): return (25,1) return (-1,0) def LF_KeyWord_NoSewageDrains(c): words = ["sewage","drains","drainage"] if(len(set(c['complaint_text']).intersection(words))>0): return (26,1) return (-1,0) def LF_KeyWord_NoisePollution(c): words = ["noise","pollution","siren","speakers","speakers","loud"] if(len(set(c['complaint_text']).intersection(words))>0): return (27,1) return (-1,0) def LF_KeyWord_Others(c): words = ["others"] if(len(set(c['complaint_text']).intersection(words))>0): return (28,1) return (-1,0) def LF_KeyWord_OverflowofStormWaterDrains(c): words = ["overflow","storm","water","drains","pipes"] if(len(set(c['complaint_text']).intersection(words))>0): return (29,1) return (-1,0) def LF_KeyWord_ParkingViolations(c): words = ["parking","violations"] if(len(set(c['complaint_text']).intersection(words))>0): return (30,1) return (-1,0) def LF_KeyWord_Parksandplaygrounds(c): words = ["parks","playgrounds","park","play","playground"] if(len(set(c['complaint_text']).intersection(words))>0): return (31,1) return (-1,0) def LF_KeyWord_Potholes(c): words = ["potholes","holes","pothole"] if(len(set(c['complaint_text']).intersection(words))>0): return (32,1) return (-1,0) def LF_KeyWord_PublicNuisance(c): words = ["public","nuisance"] if(len(set(c['complaint_text']).intersection(words))>0): return (33,1) return (-1,0) def LF_KeyWord_Repairofstreetlights(c): words = ["repair","streetlights","streetlight","light","broken","damaged"] if(len(set(c['complaint_text']).intersection(words))>0): return (34,1) return (-1,0) def LF_KeyWord_SewageandStormWaterDrainsOthers(c): words = ["sewage","storm","water","drains","drainage"] if(len(set(c['complaint_text']).intersection(words))>0): return (35,1) return (-1,0) def LF_KeyWord_StrayDogs(c): words = ["stray","dogs","dog"] if(len(set(c['complaint_text']).intersection(words))>0): return (36,1) return (-1,0) def LF_KeyWord_Traffic(c): words = ["traffic","vehicles"] if(len(set(c['complaint_text']).intersection(words))>0): return (37,1) return (-1,0) def LF_KeyWord_TreesParksandPlaygroundsOthers(c): words = ["trees","parks","playgrounds","tree"] if(len(set(c['complaint_text']).intersection(words))>0): return (38,1) return (-1,0) def LF_KeyWord_UnauthorizedConstruction(c): words = ["unauthorized","construction"] if(len(set(c['complaint_text']).intersection(words))>0): return (39,1) return (-1,0) def LF_KeyWord_WaterLeakage(c): words = ["water","leakage"] if(len(set(c['complaint_text']).intersection(words))>0): return (40,1) return (-1,0) def LF_KeyWord_WaterSupply(c): words = ["water","supply"] if(len(set(c['complaint_text']).intersection(words))>0): return (41,1) return (-1,0)
icmc/createLFs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- name = 'Python' age = 27 # ### 快速寫法 # 簡單快速,但又臭又長,不好維護 sentence = '我是' + name + ', ' + '今年' + str(age) + '歲' print(sentence) # ### Python 2.x的格式化方法 # 雖然看起來很直觀,但用起來還要考慮變數型別,好麻煩 sentence = '我是%s, 今年%d歲' % (name, age) print(sentence) # ### Python 3.x的格式化方法 # 好用好維護,但筆畫比較多 # 很正統的寫法 sentence = '我是{name}, 今年{age:.2f}歲'.format(name=name, age=age) print(sentence) # 偷吃步 sentence = '我是{}, 今年{:.5f}歲'.format(name, age) print(sentence) # ### Python 3.6後的格式化方法(formatted string literals) # 牛B sentence = f'我是{name}, 今年{age:.2f}歲' print(sentence)
Python/[Python] String formatting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #%load_ext wurlitzer #^^^ the wurlitzer extension is used to capture C/C++ output to be displayed in the notebook #^^^ this is very useful for debugging, but it doesn't work on windows #Python 2/3 compatibility from __future__ import print_function,division import time from klampt import * from klampt import vis from IPython.display import clear_output from klampt.vis.ipython import Playback world = WorldModel() if world.loadFile("../data/tx90scenario0.xml"): sim = Simulator(world) vis.add("world",world) playback_widget = Playback(vis.scene()) #If you'd like to show the print output from loading the file, comment out this line clear_output() vis.show() display(playback_widget) #NOTE: if you are going to add/modify items to the world in the same cell that it is created, you will #need to place all of those calls in a begin_rpc/end_rpc block vis.addText("HUD1","0",position=(1,1)) vis.add("ghost",world.robot(0).getConfig(),color=(1,0,0,0.5)) else: print("There was a problem loading the world file") #Controls: #left mouse click to rotate the view #right click or ctrl+click to pan the view #mouse wheel or shift+click to zoom the view # + import random from klampt.plan.cspace import * from klampt.plan.robotcspace import RobotCSpace from klampt.model.collide import WorldCollider from klampt.model.trajectory import * #first time the plan-to is called next_move_time = 1.0 #if this is too large, Jupyter will complain... planning_time_limit = 2.0 robot = world.robot(0) def plan_to(qa,qb): collider = WorldCollider(world) cspace = RobotCSpace(robot,collider) cspace.eps = 1e-2 optimizing = True MotionPlan.setOptions(type="sbl",connectionThreshold=2,perturbationRadius=2,shortcut=True) planner = MotionPlan(cspace) planner.setEndpoints(qa,qb) t0 = time.time() foundTime = None path = None vis.addText("HUD2","Planning...",position=(1,5)) while time.time() - t0 < planning_time_limit: planner.planMore(1) if foundTime == None: path = planner.getPath() if path != None and len(path) > 0: foundTime = time.time() vis.addText("HUD2","Found first path in %gs"%(foundTime-t0,),pos=(1,5)) if not optimizing: break if foundTime != None: if optimizing: vis.addText("HUD2","Optimized for another %gs"%(time.time()-foundTime,),pos=(1,5)) path = planner.getPath() else: path = None return path def control_loop(t,controller): global next_move_time vis.addText("HUD1","%.2f"%(t,),pos=(1,1)) if t+0.02 >= next_move_time: print("Planning on next time step for %fs..."%(planning_time_limit,)) if t >= next_move_time: qmin,qmax = robot.getJointLimits() q = [random.uniform(a,b) for (a,b) in zip(qmin,qmax)] print("Calling plan to...",q) vis.setColor("ghost",1,1,0,0.5) vis.add("ghost",q) try: path = plan_to(controller.getCommandedConfig(),q) except Exception as e: print(e) path = None if path != None: vis.setColor("ghost",0,1,0,0.5) for i,q in enumerate(path): if i == 1: controller.addMilestoneLinear(q) elif i >= 2: controller.addMilestoneLinear(q) else: vis.setColor("ghost",1,0,0,0.5) #controller.setMilestone(q) next_move_time += 5.0 pass # + dt = 0.02 def do_reset(): global next_move_time sim.reset() sim.updateWorld() next_move_time = 2.0 def do_advance(): global world,sim if world.numRobots() > 0: control_loop(sim.getTime(),sim.controller(0)) #call code in the above cell #print("Simulating...",dt) sim.simulate(dt) sim.updateWorld() #print("Done.") #this binds the playback widget buttons playback_widget.advance = do_advance playback_widget.reset = do_reset playback_widget.quiet = False # -
Jupyter/PlanAndSim.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.display import Audio sound_file = './beep-01a.wav' Audio(sound_file, autoplay=True) # + import networkx as nx import numpy as np import pandas as pd from community import best_partition from sklearn.metrics.cluster import normalized_mutual_info_score as NMI from sklearn.metrics.cluster import adjusted_mutual_info_score as AMI # - def getCommunityLouvain(fileName): A = pd.read_csv(fileName, sep=" ", header=None) G = nx.from_numpy_matrix(np.array(A)) return best_partition(G) fileNames = ['output_3layer_1.txt', 'output_3layer_2.txt', 'output_3layer_3.txt'] labelsDict = dict() for fileName in fileNames: comm = getCommunityLouvain(fileName) labelsDict[fileName] = np.array(list(comm.values())) scores = dict() for key1 in labelsDict: for key2 in labelsDict: if key1 != key2 and key2 + "-" + key1 not in scores: score = NMI(labelsDict[key1], labelsDict[key2]) scores[key1 + "-" + key2] = score scores scores = dict() for key1 in labelsDict: for key2 in labelsDict: if key1 != key2 and key2 + "-" + key1 not in scores: score = AMI(labelsDict[key1], labelsDict[key2]) scores[key1 + "-" + key2] = score labelsDict = dict() for fileName in fileNames: comm = getCommunityLouvain(fileName) labelsDict[fileName] = np.array(list(comm.values())) scores = dict() for key1 in labelsDict: for key2 in labelsDict: if key1 != key2 and key2 + "-" + key1 not in scores: score = AMI(labelsDict[key1], labelsDict[key2]) scores[key1 + "-" + key2] = score max(scores.values()) NMI(labelsDict['output_3layer_1.txt'], labelsDict['output_3layer_2.txt'])
Multilayer tests/NMI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tacatron # language: python # name: tacatron # --- # # Listening Procedure # # This Jupyter notebook loads both models as well as a validation dataset for testing. # # At a later stage, I will add the data preprocessing script to allow us to create textual data directly, should be pretty straight forward. # + import numpy as np import torch import torch.nn.functional as F import librosa import lws import matplotlib.pyplot as plt from IPython.display import Audio from networks import SSRN, Text2Mel from hparams import Hparams # %matplotlib inline # set up parameters for creating wav from mel spectrogram rate = 22050 eta = 1.3 gamma = 0.6 # load model and text data #device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") device = "cpu" # create models and load hp = Hparams() text2mel = Text2Mel(hp, device) ssrn = SSRN(hp, device) text2mel.load_state_dict(torch.load("save_stuff/text2mel/checkpoint/epoch_0_text2mel_model.pt")) ssrn.load_state_dict(torch.load("save_stuff/ssrn/checkpoint/epoch_270_ssrn_model.pt")) # load validation text text = np.load("../../ETTT/Pytorch-DCTTS/LJSpeech_val/LJ001-0050_txt.npy") mel_target = np.load("../../ETTT/Pytorch-DCTTS/LJSpeech_val/LJ001-0050_mel.npy") fft_target = np.load("../../ETTT/Pytorch-DCTTS/LJSpeech_val/LJ001-0050_fft.npy") # load to data to torch text = torch.from_numpy(text).long().unsqueeze(0) mel_target = torch.from_numpy(mel_target).unsqueeze(0) fft_target = torch.from_numpy(fft_target).unsqueeze(0) # create initial mel_input mel_in = torch.zeros(1,80,1) T = mel_target.shape[-1] # run our text2mel network forward with torch.no_grad(): for i in range(T): y, _, a = text2mel.forward(text, mel_in) # update mel_in mel_in = torch.cat((mel_in, y[:,:,-1].view(1,80,1)), dim=-1) # use our ssrn network to get the t output prediction our_pred,_ = ssrn.forward(y) # - with torch.no_grad(): # load y directly y = np.load("../../ETTT/Pytorch-DCTTS/save_stuff/my_prediction.npy") y = torch.from_numpy(y) our_pred,_ = ssrn.forward(y) y.shape # + print("attention") plt.matshow(a[0]) print("prediction plot") plt.matshow(y[0]) print("actual plot") plt.matshow(mel_target[0]) print("predict fft") plt.matshow(our_pred[0]) plt.matshow(fft_target[0]) l1_loss = F.l1_loss(y, mel_target) print(l1_loss.numpy()) # - # # Ground-Truth Sample # load our ground truth data and listen t = fft_target.numpy() # lastly convert our voice prediction to wav via lws t = t[0].astype(np.float64) t[t<0] = 0 t = t ** (eta / gamma) * 50 t = np.transpose(t, (1, 0)) lws_processor=lws.lws(1024, 256, mode='speech', fftsize=1024) t = lws_processor.run_lws(t) t = lws_processor.istft(t) Audio(t, rate=rate) our_pred # # Generated Sample # load our ground truth data and listen t = our_pred.numpy() # lastly convert our voice prediction to wav via lws t = t[0].astype(np.float64) t[t<0] = 0 t = t ** (eta / gamma) * 50 t = np.transpose(t, (1, 0)) lws_processor=lws.lws(1024, 256, mode='speech', fftsize=1024) t = lws_processor.run_lws(t) t = lws_processor.istft(t) Audio(t, rate=rate)
Listening.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # "Iris Classification" # > "Classification of flower types using Iris Dataset" # # - toc: false # - branch: master # - badges: true # - comments: true # - categories: [jupyter, pytorch, pytorch-lightning] # - hide: false # - search_exclude: true import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import pytorch_lightning as pl import pandas as pd import numpy as np path = 'D:\Study\Iris dataset\Iris.csv' df = pd.read_csv(path) df.head() # + from sklearn.model_selection import train_test_split from torch.utils.data import Dataset, DataLoader class IrisDS(Dataset): def __init__(self, data, labels): super().__init__() self.data = data self.labels = labels def __getitem__(self, ix): data = df[['SepalLengthCm', 'SepalWidthCm','PetalLengthCm','PetalWidthCm']].values[ix] features = torch.tensor(data) label = df['Species'][ix] label = self.labels[label] return features, label def __len__(self): return len(self.data) class IrisDM(pl.LightningDataModule): def __init__(self, df, labels): super().__init__() self.trn, self.val = train_test_split(df) self.trn_dataset = IrisDS(self.trn, labels) self.val_dataset = IrisDS(self.val, labels) def train_dataloader(self): return DataLoader(self.trn_dataset, batch_size=32, shuffle=True) def val_dataloader(self): return DataLoader(self.val_dataset, batch_size=32) labels = {'Iris-setosa':0, 'Iris-versicolor':1, 'Iris-virginica':2} dm = IrisDM(df, labels) a = iter(dm.train_dataloader()) b = next(a) b # - class Net(pl.LightningModule): def linear_layer(self,ni,no): return nn.Sequential( nn.Linear(ni,no), nn.ReLU(inplace=True) ) def __init__(self): super().__init__() self.fc = nn.Sequential( self.linear_layer(4,16), self.linear_layer(16,32) ) self.lin = nn.Linear(32,3) self.loss_fn = nn.CrossEntropyLoss() def forward(self, x): x = self.fc(x) x = self.lin(x) return x def training_step(self, batch, batch_idx): x, y = batch y_hat = self(x.float()) loss = self.loss_fn(y_hat, y) matches = [torch.argmax(i) == j for i,j in zip(y_hat,y)] acc = sum(matches)/len(matches) self.log('acc', acc, on_step=True, on_epoch=True, prog_bar=True, logger=True) return {'loss':loss, 'acc': acc} def validation_step(self, batch, batch_idx): x, y = batch y_hat = self(x.float()) loss = self.loss_fn(y_hat, y) matches = [torch.argmax(i) == j for i,j in zip(y_hat,y)] val_acc = sum(matches)/len(matches) self.log('val_acc', val_acc, on_step=True, on_epoch=True, prog_bar=True, logger=True) return {'loss':loss, 'val_acc': val_acc} def configure_optimizers(self): return optim.Adam(self.parameters(), lr=1e-3) def predict(self, test): if len(test) == 4: pred = self(test.float()) return torch.argmax(pred).item() else: pred = self(test.float()) return torch.argmax(pred, dim=1) def evaluate(self, testx, labels): preds = self.predict(testx) if isinstance(preds, int): return preds==labels else: matches = (preds==labels) acc = sum(matches)/len(matches) return acc.item() def get_progress_bar_dict(self): tqdm_dict = super().get_progress_bar_dict() if 'v_num' in tqdm_dict: del tqdm_dict['v_num'] return tqdm_dict if __name__ == '__main__': net = Net() trainer = pl.Trainer(max_epochs=100, gpus=[0]) trainer.fit(net, dm) # + testset = dm.val_dataset rnd = np.random.randint(0, len(testset)) testx = testset[rnd][0] testy = testset[rnd][1] batchset = iter(dm.val_dataloader()) batchx, batchy = next(batchset) pred = net.predict(batchx) acc = net.evaluate(batchx, batchy) acc # -
_notebooks/2021-01-26-IrisClassification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/alfianhid/Klasifikasi-Penyakit-Pada-Tanaman-Apel-Berdasarkan-Citra-Daun-Menggunakan-Algoritma-CNN/blob/main/Klasifikasi_Penyakit_Pada_Tanaman_Apel_Berdasarkan_Citra_Daun_Menggunakan_Algoritma_CNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="dfFR4cS_8kUR" # **Menghubungkan Google Colab ke Google Drive** # + colab={"base_uri": "https://localhost:8080/"} id="bceRnJN18b66" outputId="f24b84e3-9755-4a21-bd55-855f39c17015" from google.colab import drive drive.mount('/content/drive') # + [markdown] id="142TlINU9Jt5" # **Mengekstrak dataset zip dari Google Drive** # + id="68N75tOG9QZX" import os from os import listdir import zipfile local_zip = "/content/drive/My Drive/dataset.zip" zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall("/content/drive/My Drive/workspace") zip_ref.close() # + colab={"base_uri": "https://localhost:8080/"} id="sxeXYc5a-DpD" outputId="4ac3ce17-2311-41af-bacc-b3b229babebf" # Memeriksa apakah dataset sudah terekstrak dengan benar # !ls "/content/drive/My Drive/workspace/dataset" # + [markdown] id="h7_pIQUD_tEf" # **Impor beberapa library yang diperlukan** # + id="-9vwvceh_xYb" import numpy as np import pickle import cv2 import matplotlib.pyplot as plt from sklearn.preprocessing import LabelBinarizer from keras.models import Sequential, Model from tensorflow.keras.layers import Dense from tensorflow.python.keras.layers import deserialize, serialize from tensorflow.python.keras.saving import saving_utils from keras.layers.normalization import BatchNormalization from keras.layers.convolutional import Conv2D from keras.layers.convolutional import MaxPooling2D from keras.layers.core import Activation, Flatten, Dropout, Dense from keras import backend as K from keras.preprocessing.image import ImageDataGenerator from keras.optimizers import Adam from keras.preprocessing import image from keras.preprocessing.image import img_to_array from sklearn.preprocessing import MultiLabelBinarizer from sklearn.model_selection import train_test_split # + [markdown] id="0TC7yVuL_6Ke" # **Memproses dataset yang telah diekstrak** # + id="hghmeh8V_00v" # dimensi gambar resized DEFAULT_IMAGE_SIZE = tuple((256, 256)) # jumlah gambar yang akan digunakan N_IMAGES = 1000 # Path folder dataset root_dir = '/content/drive/My Drive/workspace/dataset' train_dir = os.path.join(root_dir, 'train') val_dir = os.path.join(root_dir, 'val') # + [markdown] id="R8TFKHXzA7pU" # **Membuat fungsi untuk me-resize dimensi gambar** # + id="7iy7FUpCBFb3" def convert_image_to_array(image_dir): try: image = cv2.imread(image_dir) if image is not None: image = cv2.resize(image, DEFAULT_IMAGE_SIZE) return img_to_array(image) else: return np.array([]) except Exception as e: print(f"Error : {e}") return None # + [markdown] id="82zYvFSHBJIM" # **Membuat fungsi untuk memproses folder gambar** # + colab={"base_uri": "https://localhost:8080/"} id="zbGZgE2bBTQT" outputId="e5990aa8-6821-442c-8c8f-0e81c08a78d9" image_list, label_list = [], [] try: print("[INFO] Sedang memproses gambar...") plant_disease_folder_list = listdir(train_dir) for plant_disease_folder in plant_disease_folder_list: print(f"[INFO] Memproses folder {plant_disease_folder}...") plant_disease_image_list = listdir(f"{train_dir}/{plant_disease_folder}/") for image in plant_disease_image_list[:N_IMAGES]: image_directory = f"{train_dir}/{plant_disease_folder}/{image}" if image_directory.endswith(".jpg")==True or image_directory.endswith(".JPG")==True: image_list.append(convert_image_to_array(image_directory)) label_list.append(plant_disease_folder) print("[INFO] Berhasil memproses seluruh gambar.") except Exception as e: print(f"Error : {e}") # Transform the loaded training image data into numpy array np_image_list = np.array(image_list, dtype=np.float16) / 225.0 print() # Check the number of images loaded for training image_len = len(image_list) print(f"Jumlah seluruh gambar: {image_len}") # + [markdown] id="iq_jpLOERUiv" # **Menampilkan jumlah kelas gambar** # + colab={"base_uri": "https://localhost:8080/"} id="DTQmgskmRXxP" outputId="81345da8-74f5-41ee-bca9-99a65e3b62ea" label_binarizer = LabelBinarizer() image_labels = label_binarizer.fit_transform(label_list) pickle.dump(label_binarizer,open('plant_disease_label_transform.pkl', 'wb')) n_classes = len(label_binarizer.classes_) print("Jumlah kelas gambar: ", n_classes) # + [markdown] id="pDR9fxPhSB1u" # **Melakukan augmentasi data untuk mendapatkan performa yang optimal** # + id="2tYYy7bASHP5" augment = ImageDataGenerator(rotation_range=25, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest") # + [markdown] id="TekyHNs4SL8s" # **Split dataset dengan proporsi 80:20** # + colab={"base_uri": "https://localhost:8080/"} id="BejLNh-kSP7x" outputId="3dd0b2e2-8f59-4a9c-c98f-9c409ffa74b5" print("[INFO] Membagi dataset menjadi data latih dan data uji...") x_train, x_test, y_train, y_test = train_test_split(np_image_list, image_labels, test_size=0.2, random_state=45) # + [markdown] id="9dRwT2HuSebI" # **Membangun model pengklasifikasi** # + id="cM2yNfYwSh4l" # Atur beberapa parameter pada model EPOCHS = 10 STEPS = 100 LR = 1e-3 BATCH_SIZE = 32 WIDTH = 256 HEIGHT = 256 DEPTH = 3 # + [markdown] id="3NH1uZNKSwsc" # **Membuat model dengan menambahkan fitur Convolutional, Normalization, Pooling, Dropout, dan Activation layer** # + colab={"base_uri": "https://localhost:8080/"} id="OTbvUOrpSray" outputId="a144abb3-8c79-4b2f-b645-ace7f7bc1740" model = Sequential() inputShape = (HEIGHT, WIDTH, DEPTH) chanDim = -1 if K.image_data_format() == "channels_first": inputShape = (DEPTH, HEIGHT, WIDTH) chanDim = 1 model.add(Conv2D(32, (3, 3), padding="same",input_shape=inputShape)) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(Conv2D(64, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(Conv2D(128, (3, 3), padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(axis=chanDim)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(1024)) model.add(Activation("relu")) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(n_classes)) model.add(Activation("softmax")) model.summary() # + [markdown] id="GvgGCq88TXRH" # **Melakukan pelatihan terhadap model** # + colab={"base_uri": "https://localhost:8080/"} id="TxK9wEHsTeIO" outputId="e2ae4f03-57a5-4b4f-a225-5f8b3f421450" # Mengaktifkan optimizer opt = Adam(learning_rate=LR, decay=LR/EPOCHS) # meng-compile model model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) # melatih model print("[INFO] Sedang melatih model...") history = model.fit(augment.flow(x_train, y_train, batch_size=BATCH_SIZE), validation_data=(x_test, y_test), steps_per_epoch=len(x_train) // BATCH_SIZE, epochs=EPOCHS, verbose=1) # + [markdown] id="UpSg5e_8i5dY" # **Mem-plot nilai akurasi dan loss pada hasil pelatihan model** # + colab={"base_uri": "https://localhost:8080/", "height": 545} id="pJyby-jujBFZ" outputId="a47e7d3e-01c6-4f71-9cb2-7aec3564f70f" # Mendefinisikan variabel acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc) + 1) # Akurasi data latih dan data uji plt.plot(epochs, acc, 'b', label='Training accurarcy') plt.plot(epochs, val_acc, 'r', label='Validation accurarcy') plt.title('Training and Validation accurarcy') plt.legend() plt.figure() # Loss data latih dan data uji plt.plot(epochs, loss, 'b', label='Training loss') plt.plot(epochs, val_loss, 'r', label='Validation loss') plt.title('Training and Validation loss') plt.legend() plt.show() # + [markdown] id="YDNKe_yotELL" # **Mengukur performa model** # + colab={"base_uri": "https://localhost:8080/"} id="4OOwivHptEpB" outputId="8d32e1cf-2814-45d0-d074-ad865466299f" print("[INFO] Menghitung akurasi model...") scores = model.evaluate(x_test, y_test) print(f"Test Accuracy: {scores[1]*100}") # + [markdown] id="csux2qjhyVvc" # **Menyimpan model untuk digunakan dalam tahap prediksi** # + id="j97dd1yZycHU" # Hotfix function def make_keras_picklable(): def __reduce__(self): model_metadata = saving_utils.model_metadata(self) training_config = model_metadata.get("training_config", None) model = serialize(self) weights = self.get_weights() return (unpack, (model, training_config, weights)) cls = Model cls.__reduce__ = __reduce__ # + id="QEgM0RIG05Ri" # Menjalankan fungsi yang telah didefinisikan make_keras_picklable() with open('model.pkl', 'wb') as f: pickle.dump(model, f) # + colab={"base_uri": "https://localhost:8080/"} id="Pat9Jd5F0_cj" outputId="6cb61979-9af4-4ea7-ac2c-f932952be351" print("[INFO] Saving label transform...") filename = 'plant_disease_label_transform.pkl' image_labels = pickle.load(open(filename, 'rb')) # + [markdown] id="6EDVa41UzL2W" # **Menguji model pengklasifikasi** # + id="gjh7uWeV1UPu" # Membuat fungsi untuk prediksi gambar def predict_disease(image_path): image_array = convert_image_to_array(image_path) np_image = np.array(image_array, dtype=np.float16) / 225.0 np_image = np.expand_dims(np_image,0) plt.imshow(plt.imread(image_path)) result = model.predict_classes(np_image) print((image_labels.classes_[result][0])) # + colab={"base_uri": "https://localhost:8080/", "height": 340} id="rUtamUJP1g_h" outputId="b04e6077-905a-4880-f25b-c71803e9ef6c" predict_disease('/content/drive/My Drive/workspace/dataset/val/daun_apel_black_rot/e4d473a3-83ad-496d-a97f-5aee9d713d69___JR_FrgE.S 2782.JPG') # + colab={"base_uri": "https://localhost:8080/", "height": 340} id="eWTsruhq29ih" outputId="ef0e6590-4120-46e8-d871-9fc3129c06b4" predict_disease('/content/drive/My Drive/workspace/dataset/val/daun_apel_cedar_rust/1e099d8e-b40c-4cf3-bb52-4e77b5a2dc69___FREC_C.Rust 0155.JPG') # + colab={"base_uri": "https://localhost:8080/", "height": 340} id="sa-lVpV43YJ9" outputId="e65dedc2-1d69-4374-f30b-a0164f4a9050" predict_disease('/content/drive/My Drive/workspace/dataset/val/daun_apel_scab/d8b8a834-08df-43d9-8a1a-4bc8e508a1e3___FREC_Scab 3280.JPG') # + colab={"base_uri": "https://localhost:8080/", "height": 340} id="LWUm8r6r3szl" outputId="9798e6d6-7f8e-4b79-e282-de94a2e40fac" predict_disease('/content/drive/My Drive/workspace/dataset/val/daun_apel_healthy/f591e5cc-e9fc-4d82-b2cd-09ab19cb4581___RS_HL 7829.JPG')
Klasifikasi_Penyakit_Pada_Tanaman_Apel_Berdasarkan_Citra_Daun_Menggunakan_Algoritma_CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Introduction to data analytics with pandas # - # ### <NAME> # # #### PyData Seattle, July 2017 # + [markdown] slideshow={"slide_type": "subslide"} # ## Systems check # # Do you have a working Python installation, with the `pandas` package ? # - import pandas as pd # + [markdown] slideshow={"slide_type": "skip"} # **Note :** This cell should run without raising a traceback. Assuming it runs, you can also try printing the value of `pd.__version__` to see what version of `pandas` you have installed. # + [markdown] slideshow={"slide_type": "slide"} # ## A little about me # # - Lapsed computational physicist # - PhD computational neuroscience, postdoc statistical epidemiology # - Data Scientist at CBRE - `www.cbredev.com` # - ATOM at Seattle # + [markdown] slideshow={"slide_type": "subslide"} # ## A little about the hero of this story # # <center><img src="images/coffee_machine.jpg" width="400px" /></center> # + [markdown] slideshow={"slide_type": "skip"} # We'll be analysing a real-world dataset together. It's about my favourite thing in the world : **coffee**. This dataset was collected at the Mathematics Institute at the University of Warwick. It's a time-series dataset, describing the **total number of coffees made by our espresso machine** by a certain date. # + [markdown] slideshow={"slide_type": "subslide"} # ## A little about this workshop # # We'll be running through an analysis of this dataset as a way to expose you to the `pandas` API. The aim is to develop a little familiarity with how to work with `pandas`. # + [markdown] slideshow={"slide_type": "fragment"} # Slides are available at https://github.com/QCaudron/pydata_pandas. One notebook contains solutions; **beware of spoilers**. # # The notebooks contain **notes** about what we're doing that I'll skip during this workshop, but try to explain on the way. # + [markdown] slideshow={"slide_type": "fragment"} # The `pandas` API is **enormous**. The [documentation](http://pandas.pydata.org/pandas-docs/stable/) is excellent, don't hesitate to look things up. # + [markdown] slideshow={"slide_type": "subslide"} # ## Key questions # # The dataset : **total number of coffees made vs time**. # # 1. Who are the main contributors to this dataset, and when are contributions generally made ? # 2. What are the department's weekday coffee habits ? # 3. How much coffee are people drinking ? # + [markdown] slideshow={"slide_type": "slide"} # ## Let's begin # - import pandas as pd # %matplotlib inline # + [markdown] slideshow={"slide_type": "skip"} # **Note :** The second line here tells `matplotlib` to plot directly under the cell where any plotting code is called. `pandas` uses `matplotlib` to generate graphs, and without this, the graphs would appear outside the Jupyter notebook when you called `plt.show()` - but we just want them to appear without having to do this. # # http://ipython.readthedocs.io/en/stable/interactive/plotting.html#id1 # + [markdown] slideshow={"slide_type": "subslide"} # ## Importing the data # - # Let's import the coffee data from CSV. # Read data from data/coffees.csv data = pd.read_csv("data/coffees.csv") # + [markdown] slideshow={"slide_type": "skip"} # **Note :** `pandas` can read from many data formats : CSV, JSON, Excel, HDF5, SQL, and more. # # http://pandas.pydata.org/pandas-docs/version/0.20/io.html # + [markdown] slideshow={"slide_type": "subslide"} # #### What does this data look like ? # - data # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's just look at the first few rows. # - # .head() data.head() # + [markdown] slideshow={"slide_type": "fragment"} # We have an index, and three columns : `timestamp`, `coffees`, and `contributor`. # # Uh-oh. Why is there a string of text, `testing`, in our coffee numbers ? What's going on in the `coffees` column in the row after that ? # + [markdown] slideshow={"slide_type": "skip"} # **Note :** `df.head(n=10)` would show the first ten rows. The default is `n=5`. # # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.head.html # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's look at that string in the third row. # - # .loc or .iloc data.loc[2] # + [markdown] slideshow={"slide_type": "fragment"} # Definitely a string. We'll note this as something to fix after we finish looking around. # + [markdown] slideshow={"slide_type": "skip"} # **Note :** `.loc` uses a label-based lookup, which means that the value you pass into the square brackets must be in the index. Another method, `.iloc`, is integer-location-based, so `.iloc[2]` would return the third row. In this case, they're the same, but had we changed our index, as we'll see later, things would work differently. # # Indexing a dataframe with `[]` directly returns a `pd.Series` or `pd.DataFrame` by searching over *columns*, not rows. Indexing a `pd.Series` with `[]` is like indexing a dataframe with `.iloc`. # # https://pandas.pydata.org/pandas-docs/stable/indexing.html # + [markdown] slideshow={"slide_type": "subslide"} # #### We should also take a look at that NaN. In fact, let's look at the first five values in `coffees`. # - # [] indexing on a series data.coffees[:5] # + [markdown] slideshow={"slide_type": "skip"} # **Note :** here, we're indexing a *series* ( a `pd.Series` object ). From a `pd.DataFrame` ( here, `data` ), when you access a single column ( `data.coffees` or `data["coffees"]` ), the object returned is a `pd.Series`. From that, indexing directly with `[]` works in an integer-location-based manner, and like with numpy arrays, you can take slices ( `[:5]` ). # # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.html # + [markdown] slideshow={"slide_type": "subslide"} # #### How long is the dataset ? # + slideshow={"slide_type": "-"} print("Dataset length :") # len() print(len(data)) # + [markdown] slideshow={"slide_type": "subslide"} # #### What else can we find out ? # - # .describe() data.describe() # + [markdown] slideshow={"slide_type": "fragment"} # Looks like we also have some missing data - we have 671 rows, but the `coffees` column only has 658 entries. # + [markdown] slideshow={"slide_type": "skip"} # **Note :** `.describe()` returns different things based on what's in the dataframe, as we'll see later. For numerical columns, it will return things like the mean, standard deviation, and percentiles. For object columns ( strings or datetimes ), it will return the most frequent entry and the first and last items. For all columns, `.describe()` will return the count of objects in that column ( not counting NaNs ) and the unique number of entries. You can determine what's returned using `.describe()`'s keyword arguments. # # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.describe.html # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's look at the dataframe where `coffees` is null. # - # .isnull() and boolean indexing with [] data[data.coffees.isnull()] # + [markdown] slideshow={"slide_type": "skip"} # **Note :** `.isnull()` returns a boolean array ( an array of `True`s and `False`s ), that you can then use to index the dataframe directly. Here, our boolean array tells us which entries in the `coffees` column are null, and we use that to index against the full dataframe - so we get back every column in the dataframe, but only those rows where `coffees` is null. # # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.isnull.html # + [markdown] slideshow={"slide_type": "subslide"} # #### What type of Python objects are the columns ? # - # .dtypes data.dtypes # + [markdown] slideshow={"slide_type": "fragment"} # The `contributor` column makes sense as `object`, because we expect strings there; but surely the `timestamp` should be a timestamp-type, and `coffees` should be numerical ? # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's inspect what's in the `timestamp` column. # + # print the first element of the series with [] indexing print(data.timestamp[0]) # print its type() print(type(data.timestamp[0])) # + [markdown] slideshow={"slide_type": "fragment"} # It looks like the `timestamp` field was read from CSV as a string. That makes sense - CSV files are very basic. We'll have `pandas` interpret these strings as datetimes for us automatically. # + [markdown] slideshow={"slide_type": "skip"} # **Note :** here's an example of using direct `[]` indexing on a `pd.Series`. We're accessing the first entry, just to see what type of object we have there. # + [markdown] slideshow={"slide_type": "subslide"} # #### On our first pass, what problems did we find ? # # - The `timestamp` column contains strings; these need to be datetimes # - The `coffees` column contains some null values and at least one string # + [markdown] slideshow={"slide_type": "slide"} # ## Cleaning the data # + [markdown] slideshow={"slide_type": "subslide"} # #### The `coffees` column should only contain numerical data. # + # cast the coffees column using pd.to_numeric, and coerce errors data.coffees = pd.to_numeric(data.coffees, errors="coerce") data.head() # + [markdown] slideshow={"slide_type": "subslide"} # #### The `coffees` column contains NaNs. # + # Use .dropna() using a subset, and pass inplace data.dropna(subset=["coffees"], inplace=True) data.head() # + [markdown] slideshow={"slide_type": "subslide"} # #### The `coffees` column is of type `float`. # + # Cast to int using .astype() data.coffees = data.coffees.astype(int) data.head() # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's have pandas parse the `timestamp` strings to datetime objects. # + # pd.to_datetime() data.timestamp = pd.to_datetime(data.timestamp) # Confirm dtypes data.dtypes # + [markdown] slideshow={"slide_type": "subslide"} # #### So where do we stand ? # - # .describe(), passing the include kwarg to see all information data.describe(include="all") # + slideshow={"slide_type": "subslide"} # What do the first few rows look like ? data.head() # + [markdown] slideshow={"slide_type": "skip"} # **Note :** `.describe(include="all")` is describing all attributes of all columns, but some don't make sense based on the column's `dtype`. For example, the contributor column has no `first` and `last` attributes, because those describe the first and last entries in an ordered series. That makes sense for the timestamp - those have an intuitive definition of sorting - but not so much for strings ( alphabetical order doesn't really matter when they're arbitrary strings ). Similary, the timestamp column has no mean or other numerical traits. What does it mean to calculate the mean timestamp ? # + [markdown] slideshow={"slide_type": "slide"} # ## The time-series at a glance # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's begin by visualising the coffee counts. # - # .plot() on the coffees series data.coffees.plot() # + [markdown] slideshow={"slide_type": "fragment"} # `pandas` is plotting the coffees against the index, which is just a series of integers. # + [markdown] slideshow={"slide_type": "skip"} # **Note :** `.plot()` on a `pd.Series` will plot the data against the index. On a `pd.DataFrame`, the `.plot()` method allows plotting of one column against another. # # By default, `.plot()` renders a line graph, but you can specify which type of plot you'd like - bar, line, histogram, area, scatter, etc.. # # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.plot.html # # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's use the dataframe's `plot()` method rather than that of the series. # - # .plot() on the dataframe, setting x to the timestamp, with dot-dash style data.plot(x=data.timestamp, style=".-") # + [markdown] slideshow={"slide_type": "fragment"} # We have some very uneven spacing in places. We might start by cutting off the last few points of this time-series, which is missing a lot of data. # + [markdown] slideshow={"slide_type": "subslide"} # #### We'll inspect the last few points of this time-series. # - # .tail() with ten rows data.tail(n=10) # + [markdown] slideshow={"slide_type": "fragment"} # After mid-March, things start getting spaced rather erratically. # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's cut off the tail of the time-series, anything after 2013-03-01. # + # Use conditional indexing against the timestamp data = data[data.timestamp < "2013-03-01"] data.tail() # + [markdown] slideshow={"slide_type": "skip"} # **Note :** this is another example of boolean indexing. `data.timestamp < "2013-03-01"` is a boolean array, and can be passed into the dataframe immediately in `[]`, much like with a `np.ndarray`. # + [markdown] slideshow={"slide_type": "subslide"} # #### One final look. # - # Once again, plot the data against the timestamp data.plot(x=data.timestamp, style=".-") # + [markdown] slideshow={"slide_type": "slide"} # ## 1. Contributions to the time-series # + [markdown] slideshow={"slide_type": "subslide"} # #### Who are our main contributors ? # - # .value_counts() data.contributor.value_counts() # + [markdown] slideshow={"slide_type": "skip"} # **Note :** `.value_counts()` counts the unique values in a series. It's similar to doing a `.groupby()` followed by a `.count()`, as we'll see soon. # # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.value_counts.html # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's plot this. # - # .plot() a bar chart from the value counts data.contributor.value_counts().plot(kind="bar") # + [markdown] slideshow={"slide_type": "subslide"} # #### On which weekdays were contributions made ? # + # Create a series of the weekdays for each entry using .dt.weekday weekdays = data.timestamp.dt.weekday # assign() it to our dataframe data = data.assign(weekdays=weekdays) data.head() # + [markdown] slideshow={"slide_type": "subslide"} # #### Can we replace these integers with actual weekdays ? # + weekday_names = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"] weekday_dict = {key: weekday_names[key] for key in range(7)} # Use .apply() to apply a custom function to the weekdays column data.weekdays = data.weekdays.apply(lambda x: weekday_dict[x]) data.head() # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's group by these weekdays. # + # .groupby() the weekdays and then .count() rows in each group weekday_counts = data.groupby("weekdays").count() # We can reorder this dataframe by our weekday_names list weekday_counts = weekday_counts.loc[weekday_names] weekday_counts # + [markdown] slideshow={"slide_type": "skip"} # **Note :** this first line could be replaced by `weekday_counts = data.weekdays.value_counts()`, with the only difference being that that would return a series to us, and here, we got back a dataframe. # + [markdown] slideshow={"slide_type": "subslide"} # #### We can now visualise these weekday counts. # - # .plot() a bar chart of data in weekday_counts weekday_counts.timestamp.plot(kind="bar", title="Datapoints added on each weekday") # + [markdown] slideshow={"slide_type": "slide"} # ## 2. Weekday trends # + [markdown] slideshow={"slide_type": "subslide"} # #### First, we'll set our timestamps to the dataframe's index # + # Set the dataframe's .index property data.index = data.timestamp # Let's drop the timestamp column, as we no longer need it data.drop(["timestamp"], axis=1, inplace=True) data.head() # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's add some rows at midnight on every day. # + # pd.date_range, with daily frequency, and normalisation midnights = pd.date_range(data.index[0], data.index[-1], freq="D", normalize=True) midnights # + [markdown] slideshow={"slide_type": "skip"} # **Note :** `pd.date_range` creates a fixed-frequency DatetimeIndex. `normalize=True` ensures these datetimes are at midnight, and not at whatever time the starting point is. # # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.date_range.html # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's take the union of this index and our dataset's index. # + # Take the union of the existing and new indices new_index = midnights.union(data.index) new_index # + [markdown] slideshow={"slide_type": "skip"} # **Note :** the union of these indices is just a new index where entries from both indices are present. It's sorted by time. # + [markdown] slideshow={"slide_type": "subslide"} # #### Now we can reindex our dataframe with this new index. # + # .reindex() the dataframe upsampled_data = data.reindex(new_index) upsampled_data.head(10) # + [markdown] slideshow={"slide_type": "skip"} # **Note :** `.reindex()` keeps any values that conform to the new index, and inserts `NaN`s where we have no values. # # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.reindex.html # + [markdown] slideshow={"slide_type": "subslide"} # #### We can fill in these `NaN`s using interpolation. # + # .interpolate the upsampled_data using the time method upsampled_data = upsampled_data.interpolate(method="time") upsampled_data.head(10) # + [markdown] slideshow={"slide_type": "subslide"} # #### We're now ready to resample the time-series at a daily frequency. # + # .resample() followed by .asfreq() daily_data = upsampled_data.resample("D").asfreq() # Drop the contributor column, we no longer need it daily_data = daily_data.drop(["contributor"], axis=1) # Generate a column of weekday names daily_data["weekdays"] = daily_data.index.weekday_name # We did it the slow way before... daily_data.head() # + slideshow={"slide_type": "subslide"} # Let's plot the data once more, to see how we're doing daily_data.plot(figsize=(15, 4), style=".") # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's begin by figuring out how many coffees are made on any given day. # + slideshow={"slide_type": "subslide"} # Use .diff() on the coffees column; follow up with .shift() coffees_made = daily_data.coffees.diff().shift(-1) # Add this as a column to the dataframe daily_data["coffees_made_today"] = coffees_made daily_data.head() # + [markdown] slideshow={"slide_type": "skip"} # **Note :** we use `.shift()` here because if we look at the `.diff()` between a Monday and a Tuesday, those coffees are attributed to the Tuesday. However, what we want to say is "this many coffees were made at some point on the Monday", so we shift the entire series up one. # + [markdown] slideshow={"slide_type": "subslide"} # #### Now we can group this by weekday. # + # .groupby weekdays, take the mean, and grab the coffees_made_today column coffees_by_day = daily_data.groupby("weekdays").mean().coffees_made_today coffees_by_day # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's order this series and then plot it. # + # Sort coffees_by_day by our list of weekday names coffees_by_day = coffees_by_day[weekday_names] # Plot a bar chart coffees_by_day.plot(kind="bar") # + [markdown] slideshow={"slide_type": "fragment"} # Wednesdays was seminar day... # + [markdown] slideshow={"slide_type": "slide"} # ## 3. Coffee per person # + [markdown] slideshow={"slide_type": "subslide"} # #### We can now pull in data on how many people were in the department. # + # Bring in data/department_members.csv; # have the first column be the index, and parse the dates people = pd.read_csv("data/department_members.csv", index_col=[0], parse_dates=True) people.head() # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's join the datasets. # + # Use an outer join, then interpolate over missing values using nearest values daily_data = daily_data.join(people, how="outer").interpolate(method="nearest") daily_data.head() # + [markdown] slideshow={"slide_type": "skip"} # **Note :** by default, inner joins are performed. That is, if a row from one of the datasets has an index that isn't in the other dataset, that row is dropped. You can specify whether you want outer, left, or right joins, as well plenty of other useful options. The `pandas` API for joining or merging datasets is very developed. # # https://pandas.pydata.org/pandas-docs/stable/merging.html # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's create a column for the number of coffees consumed per person. # + # New column is the ratio of coffees made on a given day to number of members in the department daily_data["coffees_per_person"] = daily_data.coffees_made_today / daily_data.members # Let's drop those remaining NaNs while we're at it daily_data.dropna(inplace=True) daily_data.head() # + [markdown] slideshow={"slide_type": "subslide"} # #### We can now plot this column. # - # Plot the coffees_per_person column daily_data.coffees_per_person.plot() # + [markdown] slideshow={"slide_type": "subslide"} # #### Those are strange plateaus. We'll pull in another dataset, telling us when the machine was broken. # + # pd.read_csv(); try using data/coffee_status.csv # parse_dates as kwarg; also pass index_col machine_status = pd.read_csv("data/coffee_status.csv", parse_dates=["date"], index_col="date") machine_status.head() # + [markdown] slideshow={"slide_type": "skip"} # **Note :** the `parse_dates` keyword argument takes several values. By passing in a list of strings, we're telling `pandas` to attempt to parse the dates in columns with those names. # # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html # + [markdown] slideshow={"slide_type": "subslide"} # #### What values are in the `status` column ? # - # .value_counts() machine_status.status.value_counts() # + [markdown] slideshow={"slide_type": "subslide"} # #### A quick trick to plot this as a time-series... # + # Make a pd.Series from the status series where things are OK numerical_status = machine_status.status == "OK" numerical_status.plot() # + [markdown] slideshow={"slide_type": "skip"} # **Note :** the first line here creates a boolean `pd.Series`, holding the value `True` when `machine_status.status` is `"OK"`, and `False` otherwise. Because it's a `pd.Series`, its index stays the same as that of `machine_status`, which was a `DatetimeIndex`. Then, we can plot the boolean series ( `True` appearing as `1`, and `False` appearing as `0` ), and just quickly scan to see that there are long areas where the coffee machine was operations, with short bouts ( thankfully ! ) of the machine being broken. # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's join the datasets on the date field ! # + slideshow={"slide_type": "-"} # .join() daily_data = daily_data.join(machine_status) daily_data.head() # + [markdown] slideshow={"slide_type": "subslide"} # #### We'll bring in this numerical representation of status column into our dataframe too. # + # Column depicting when the status was "OK" # Cast the series to ints before as you create a new column in the dataframe daily_data["numerical_status"] = (daily_data.status == "OK").astype(int) daily_data.head() # + [markdown] slideshow={"slide_type": "subslide"} # #### Let's plot both the coffees per person and the numerical status. # - # Plot both columns on the same graph, using default args daily_data[["coffees_per_person", "numerical_status"]].plot() # + [markdown] slideshow={"slide_type": "subslide"} # #### We see a strong weekday-weekend effect. Resampling weekly will fix that. # + # Resample weekly, taking the mean of each week to get a weekly value weekly_data = daily_data.resample("W").mean() weekly_data[["coffees_per_person", "numerical_status"]].plot() # + [markdown] slideshow={"slide_type": "slide"} # ## What have we achieved ? # + [markdown] slideshow={"slide_type": "subslide"} # #### Cleaning # # - Cast columns to the correct dtypes # - Dropped rows with no data # - Truncated the time-series when things got sparse # + [markdown] slideshow={"slide_type": "subslide"} # #### Exploring the contributions # # - Discovered who contributed to the dataset and how much # - Established how contributions varied by day of the week # + [markdown] slideshow={"slide_type": "subslide"} # #### Exploring the time-series # # - Resampled the uneven time-series to regular daily intervals # - Interpolated over missing data using our time index # - Discovered on what days of the week coffee was especially popular # - Joined the dataset with others to enrich the information available # - Derived new columns from existing ones # - Smoothed the time-series by weekly downsampling # + [markdown] slideshow={"slide_type": "subslide"} # #### Insights # # - A small number of people contributed most of the data # - Contributions were lacking at the weekends, and Mondays had the most contributions # - Seminar Wednesdays is a strong driver of coffee consumption # - Periods of low coffee drinking correlated strongly with periods where the machine was broken # - A significant dip in consumption occurred in the summer months
coffee_analysis_solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # First Order Initial Value Problem # # # The more general form of a first order Ordinary Differential Equation is: # \begin{equation} # \label{general ODE} # y^{'}=f(t,y). # \end{equation} # This can be solved analytically by integrating both sides but this is not straight forward for most problems. # Numerical methods can be used to approximate the solution at discrete points. # # ## Euler method # # The simplest one step numerical method is the Euler Method named after the most prolific of mathematicians [Leonhard Euler](https://en.wikipedia.org/wiki/Leonhard_Euler) (15 April 1707 – 18 September 1783) . # # The general Euler formula to the first order equation # $$ y^{'} = f(t,y) $$ # approximates the derivative at time point $t_i$ # $$y^{'}(t_i) \approx \frac{w_{i+1}-w_i}{t_{i+1}-t_{i}} $$ # where $w_i$ is the approximate solution of $y$ at time $t_i$. # This substitution changes the differential equation into a __difference__ equation of the form # $$ # \frac{w_{i+1}-w_i}{t_{i+1}-t_{i}}=f(t_i,w_i) $$ # Assuming uniform stepsize $t_{i+1}-t_{i}$ is replaced by $h$, re-arranging the equation gives # $$ w_{i+1}=w_i+hf(t_i,w_i),$$ # This can be read as the future $w_{i+1}$ can be approximated by the present $w_i$ and the addition of the input to the system $f(t,y)$ times the time step. # # + ## Library import numpy as np import math # %matplotlib inline import matplotlib.pyplot as plt # side-stepping mpl backend import matplotlib.gridspec as gridspec # subplots import warnings import pandas as pd warnings.filterwarnings("ignore") # - # ## Population growth # # The general form of the non-linear population growth differential equation is: # $$ y^{'}=\alpha y-\beta y^2 $$ # where $\alpha$ is the growth rate and $\beta$ is the death rate. The initial population at time $ a $ is # $$ y(a)=A $$ # $$ a\leq t \leq b. $$ # # # ## Discrete Interval # The continuous time $a\leq t \leq b $ is discretised into $N$ points seperated by a constant stepsize # $$ h=\frac{b-a}{N}.$$ # Here the interval is $2000\leq t \leq 2002$ # $$ h=\frac{2020-2000}{200}=0.1.$$ # This gives the 21 discrete points: # $$ t_0=2000, \ t_1=0.1, \ ... t_{200}=2020. $$ # This is generalised to # $$ t_i=2000+i0.1, \ \ \ i=0,1,...,200.$$ # The plot below shows the discrete time steps. ### Setting up time t_end=2020.0 t_start=2000.0 N=200 h=(t_end-t_start)/(N) time=np.arange(t_start,t_end+0.01,h) fig = plt.figure(figsize=(10,4)) plt.plot(time,0*time,'o:',color='red') plt.title('Illustration of discrete time points for h=%s'%(h)) plt.xlim((2000,2002)) plt.plot(); # ## Initial Condition # To get a specify solution to a first order initial value problem, an __initial condition__ is required. # For our population problem the intial condition is: # $$y(2000)=6.$$ # ### Growth rate # Given the growth rate $$\alpha=0.2,$$ and death rate $$\beta=0.01,$$ giving the specific differential equation, # $$ y^{'}=0.2 y-0.01 y^2 $$ # The initial population at time $2000$ is # $$ y(2000)=6, $$ # $$ 2000\leq t \leq 2020. $$ # # ## Numerical approximation of Population growth # The differential equation is transformed using the Euler method into a difference equation of the form # $$ w_{i+1}=w_{i}+h (\alpha w_i-\beta w_i\times w_i). $$ # This approximates a series of of values $w_0, \ w_1, \ ..., w_{N}$. # For the specific example of the population equation the difference equation is, # $$ w_{i+1}=w_{i}+h 0.1 [0.2 w_i-0.01 w_i\times w_i], $$ # where $i=0,1,2,...,199$, and $w_0=6$. From this initial condition the series is approximated. # The plot below shows the Euler approximation $w$ in blue squares. # + w=np.zeros(N+1) w[0]=6 for i in range (0,N): w[i+1]=w[i]+h*(0.2*w[i]-0.01*w[i]*w[i]) fig = plt.figure(figsize=(10,4)) plt.plot(time,w,'s:',color='blue',label='Euler') plt.xlim((min(time),max(time))) plt.xlabel('time') plt.legend(loc='best') plt.title('Euler solution') plt.plot(); # - # ### Table # The table below shows the iteration $i$, the discrete time point t[i], and the Euler approximation w[i] of the solution $y$ at time point t[i] for the non-linear population equation. d = {'time t_i': time[0:10], 'Euler (w_i) ':w[0:10]} df = pd.DataFrame(data=d) df # ## Numerical Error # With a numerical solution there are two types of error: # * local truncation error at one time step; # * global error which is the propagation of local error. # # ### Derivation of Euler Local truncation error # The left hand side of a initial value problem $\frac{dy}{dt}$ is approximated by __Taylors theorem__ expand about a point $t_0$ giving: # \begin{equation}y(t_1) = y(t_0)+(t_1-t_0)y^{'}(t_0) + \frac{(t_1-t_0)^2}{2!}y^{''}(\xi), \ \ \ \ \ \ \xi \in [t_0,t_1]. \end{equation} # Rearranging and letting $h=t_1-t_0$ the equation becomes # $$y^{'}(t_0)=\frac{y(t_1)-y(t_0)}{h}-\frac{h}{2}y^{''}(\xi). $$ # From this the local truncation error is # $$\tau y^{'}(t_0)\leq \frac{h}{2}M $$ # where $y^{''}(t) \leq M $. # #### Derivation of Euler Local truncation error for the Population Growth # As the exact solution $y$ is unknown we cannot get an exact estimate of the second derivative # $$y'(t)=0.2 y-0.01 y^2,$$ # differentiate with respect to $t$, # $$y''(t)=0.2 y'-0.01 (2yy'),$$ # subbing the original equation gives # $$y''(t)=0.2 (0.2 y-0.01 y^2)-0.01 \big(2y(0.2 y-0.01 y^2)\big),$$ # which expresses the second derivative as a function of the exact solution $y$, this is still a problem as the value of $y$ is unknown, assuming, # $$max|y''|=M\leq 40,$$ # this gives a local trucation for $h=0.1$ for our non-linear equation is # $$\tau=\frac{h}{2}40=2. $$ # M=40 fig = plt.figure(figsize=(10,4)) plt.plot(time[0:2],0.1*M/2*np.ones(2),'v:' ,color='black',label='Upper Local Truncation') plt.xlabel('time') plt.legend(loc='best') plt.title('Local Truncation Error') plt.plot(); # ### Global truncation error for the population equation # For the population equation specific values $L$ and $M$ can be calculated. # # In this case $f(t,y)=\epsilon y$ is continuous and satisfies a Lipschitz Condition with constant # $$ \left|\frac{\partial f(t,y)}{\partial y}\right|\leq L $$ # $$ \left|\frac{\partial (0.2 y-0.01 y^2)}{\partial y}\right|\leq |0.2-0.01(2\times y)| \leq |0.2-0.01(2\times 50)|\leq 0.8 $$ # # on $D=\{(t,y)|2000\leq t \leq 2015, 0 < y < 50 \}$ and that a constant $M$ # exists with the property that # $$ |y^{''}(t)|\leq M\leq 6. $$ # # __ Specific Theorem Global Error__ # # Let $y(t)$ denote the unique solution of the Initial Value Problem # $$ y^{'}=0.2 y-0.01 y^2, \ \ \ 2000\leq t \leq 2020, \ \ \ y(0)=6, $$ # and $w_0,w_1,...,w_N$ be the approx generated by the Euler method for some # positive integer N. Then for $i=0,1,...,N$ the error is: # $$ |y(t_i)-w_i| \leq \frac{6 h}{2\times 0.8}|e^{0.8(t_i-2000)}-1|. $$ # # ## Non-linear population equation with a temporal oscilation # Given the specific population differential equation with a wiggle, # $$ y^{'}=0.2 y-0.01 y^2+sin(2\pi t), $$ # with the initial population at time $2000$ is # $$ y(2000)=6, $$ # $$ 2000\leq t \leq 2020. $$ # # For the specific example of the population equation the difference equation is # $$ w_{i+1}=w_{i}+h 0.5 (0.2 w_i-0.02 w_i\times w_i+sin(2 \pi t), $$ # for $i=0,1,...,199$, # where $w_0=6$. From this initial condition the series is approximated. # The figure below shows the discrete solution. # + w=np.zeros(N+1) w[0]=6 for i in range (0,N): w[i+1]=w[i]+h*(0.2*w[i]-0.01*w[i]*w[i]+np.sin(2*np.pi*time[i])) fig = plt.figure(figsize=(10,4)) plt.plot(time,w,'s:',color='blue',label='Euler') plt.xlim((min(time),max(time))) plt.xlabel('time') plt.legend(loc='best') plt.title('Euler solution') plt.plot(); # - # ### Table # The table below shows the iteration $i$, the discrete time point t[i], and the Euler approximation w[i] of the solution $y$ at time point t[i] for the non-linear population equation with a temporal oscilation. table = ListTable() table.append(['i','time', 'Euler']) for i in range (0,8): table.append([round(i,3),round(time[i],3), round(w[i],3)]) table d = {'time t_i': time[0:10], 'Euler (w_i) ':w[0:10]} df = pd.DataFrame(data=d) df
Chapter 01 - Euler Methods/.ipynb_checkpoints/102_Euler_method_with_Theorems_nonlinear_Growth_function-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pymongo from pymongo import MongoClient import pprint from IPython.display import clear_output # Replace XXXX with your connection URI from the Atlas UI client = MongoClient('mongodb+srv://dbAdmin:pa55word@mflix.phy3v.mongodb.net/mflix_db?retryWrites=true&w=majority') #filter documents for based on sepcfic condition #.dot notation - to get the first index of the field #filter = { # 'languages': ['Korean', 'English'] #} '''$all - The $all operator selects the documents where the value of a field is an array that contains all the specified elements. To specify an $all expression, use the following prototype''' #filter = { #'languages': {'$all': ['Korean', 'English']} #} filter = { 'languages.0': 'English' } filter = { } #filter = { #'languages.0': {'$all': ['English']} #} #explicitly include and exclude fields to your output of your filter projection = { '_id':0, 'title':1, 'languages':1 } clear_output() pprint.pprint(list(client.mflix_db.movies_scratch.find(filter, projection))) # -
Database_Development/Intro_To_MongoDB/korean_and_english_only.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: nama # language: python # name: nama # --- # %load_ext autoreload # %autoreload 2 # + import numpy as np import random import torch from collections import defaultdict from scipy.sparse import csr_matrix from sklearn.cluster import AgglomerativeClustering from tqdm.auto import tqdm from src.data.filesystem import fopen from src.data.ancestry import load_train_test from src.data.prepare import normalize from src.models.utils import add_padding, remove_padding, build_token_idx_maps, convert_names_to_model_inputs, get_best_matches # - # ### Configure # + sample_size = 0 max_closure_size = 10000 max_distance = 0.22 cluster_distance_threshold = 0.155 super_cluster_distance_threshold = 0.205 num_candidates = 1000 eps = 0.000001 model_filename = '../data/models/anc-triplet-bilstm-100-512-40-05.pth' # process_nicknames = True # werelate_names_filename = 'givenname_similar_names.werelate.20210414.tsv' # nicknames_filename = '../data/models/givenname_nicknames.txt' # name_freqs_filename = 'given-final.normal.txt' # clusters_filename = 'givenname_clusters.tsv' # super_clusters_filename = 'givenname_super_clusters.tsv' werelate_names_filename = '../data/external/surname_similar_names.werelate.20210414.tsv' nicknames_filename = '' name_freqs_filename = '../data/external/surname-final.normal.txt' clusters_filename = '../data/models/ancestry_surname_clusters-20211028.tsv' super_clusters_filename = '../data/models/ancestry_surname_super_clusters-20211028.tsv' is_surname = True # - # ### Read WeRelate names into all_names # Later, we'll want to read frequent FS names into all_names # TODO rewrite this in just a few lines using pandas def load_werelate_names(path, is_surname): name_variants = defaultdict(set) with fopen(path, mode="r", encoding="utf-8") as f: is_header = True for line in f: if is_header: is_header = False continue fields = line.rstrip().split("\t") # normalize should only return a single name piece, but loop just in case for name_piece in normalize(fields[0], is_surname): confirmed_variants = fields[1].strip().split(" ") if len(fields) >= 2 else [] computer_variants = fields[2].strip().split(" ") if len(fields) == 3 else [] variants = confirmed_variants + computer_variants for variant in variants: for variant_piece in normalize(variant, is_surname): name_variants[name_piece].add(variant_piece) return name_variants # + all_names = set() name_variants = load_werelate_names(werelate_names_filename, is_surname) print(len(name_variants)) for k, v in name_variants.items(): all_names.add(add_padding(k)) all_names.update(add_padding(variant) for variant in v) print(len(all_names), next(iter(all_names))) name_variants = None # - # ### Read nicknames and remove from names def load_nicknames(path): nicknames = defaultdict(set) with fopen(path, mode="r", encoding="utf-8") as f: for line in f: names = line.rstrip().split(" ") # normalize should only return a single name piece, but loop just in case for name_piece in normalize(names[0], False): orig_name = add_padding(name_piece) for nickname in names[1:]: for nickname_piece in normalize(nickname, False): nicknames[add_padding(nickname_piece)].add(orig_name) return nicknames name_nicks = defaultdict(set) if not is_surname: nick_names = load_nicknames(nicknames_filename) for nick, names in nick_names.items(): for name in names: name_nicks[name].add(nick) print(next(iter(nick_names.items())), "nick_names", len(nick_names.keys()), "name_nicks", len(name_nicks.keys())) all_names -= set(nickname for nickname in nick_names.keys()) print(len(all_names)) # ### Map names to ids def map_names_to_ids(names): ids = range(len(names)) return dict(zip(names, ids)), dict(zip(ids, names)) name_ids, id_names = map_names_to_ids(all_names) print(next(iter(name_ids.items())), next(iter(id_names.items()))) # ### Read name frequencies # TODO rewrite this using pandas too def load_name_freqs(path, is_surname): name_freqs = defaultdict(int) with fopen(path, mode="r", encoding="utf-8") as f: for line in f: fields = line.rstrip().split("\t") for name_piece in normalize(fields[0], is_surname): name_freqs[name_piece] = int(fields[1]) return name_freqs name_freqs = load_name_freqs(name_freqs_filename, is_surname) # keep only entries in all_names name_freqs = dict((add_padding(k),v) for k,v in name_freqs.items() if add_padding(k) in all_names) print(len(name_freqs), next(iter(name_freqs.items()))) # ### Load model model = torch.load(model_filename) # ### Encode names MAX_NAME_LENGTH=30 char_to_idx_map, idx_to_char_map = build_token_idx_maps() # #### Take a sample because encoded names require a lot of memory if sample_size <= 0 or sample_size >= len(all_names): names_sample = np.array(list(all_names)) else: names_sample = np.array(random.sample(all_names, sample_size)) print(names_sample.shape) # + [markdown] pycharm={"name": "#%% md\n"} # #### Compute encodings # - # Get embeddings names_tensor, _ = convert_names_to_model_inputs(names_sample, char_to_idx_map, MAX_NAME_LENGTH) # + pycharm={"name": "#%%\n"} # Get encodings for the names from the encoder # TODO why do I need to encode in chunks? chunk_size = 10000 nps = [] for begin in tqdm(range(0, len(names_tensor), chunk_size)): nps.append(model(names_tensor[begin:begin+chunk_size], just_encoder=True).detach().numpy()) # + pycharm={"name": "#%%\n"} names_encoded = np.concatenate(nps, axis=0) nps = None names_encoded.shape # - # ### Compute distances # + pycharm={"is_executing": true, "name": "#%%\n"} name_candidates = get_best_matches(names_encoded, names_encoded, names_sample, num_candidates=num_candidates, metric='euclidean') # - # what's going on here? distances = np.hstack((np.repeat(names_sample, num_candidates)[:, np.newaxis], name_candidates.reshape(-1,2))) # remove distances > max_distance distances = distances[distances[:, -1].astype('float') <= max_distance] # sort distances = distances[distances[:, -1].astype('float').argsort()] print(distances.shape) name_candidates = None # ### Compute closures # + # iterate over all distances, create closures and save scores next_closure = 0 closure_ids = {} id_closure = {} row_ixs = [] col_ixs = [] dists = [] max_size = 0 for row in tqdm(distances): name1 = row[0] name2 = row[1] id1 = name_ids[name1] id2 = name_ids[name2] # each distance is in distances twice if id1 > id2: continue distance = max(eps, float(row[2])) closure1 = id_closure.get(id1) closure2 = id_closure.get(id2) if closure1 is None and closure2 is not None: id1, id2 = id2, id1 name1, name2 = name2, name1 closure1, closure2 = closure2, closure1 # add to distance matrix row_ixs.append(id1) col_ixs.append(id2) dists.append(distance) # skip if names are the same if id1 == id2: continue row_ixs.append(id2) col_ixs.append(id1) dists.append(distance) # create closures if closure1 is None: # if closure1 is None, then closure2 must be none also due to the above # so create a new closure with id1 and id2 closure1 = next_closure next_closure += 1 id_closure[id1] = closure1 id_closure[id2] = closure1 closure_ids[closure1] = [id1, id2] next_closure += 1 elif closure2 is None: # put id2 into id1's closure id_closure[id2] = closure1 closure_ids[closure1].append(id2) elif closure1 != closure2 and len(closure_ids[closure1]) + len(closure_ids[closure2]) <= max_closure_size: # move all ids in closure2 into closure1 for id in closure_ids[closure2]: id_closure[id] = closure1 closure_ids[closure1].append(id) del closure_ids[closure2] if len(closure_ids[closure1]) > max_size: max_size = len(closure_ids[closure1]) # create distances matrix dist_matrix = csr_matrix((dists, (row_ixs, col_ixs))) print("max closure_size", max_size) print("number of closures", len(closure_ids), "number of names enclosed", len(id_closure)) # - # ### Compute clusters def compute_clusters(closure_ids, id_names, dist_matrix, linkage, distance_threshold, eps, max_dist): cluster_names = defaultdict(set) name_cluster = {} for closure, ids in tqdm(closure_ids.items()): clusterer = AgglomerativeClustering(n_clusters=None, affinity='precomputed', linkage=linkage, distance_threshold=distance_threshold) X = dist_matrix[ids][:, ids].todense() X[X < eps] = max_dist labels = clusterer.fit_predict(X) for id, label in zip(ids, labels): name = id_names[id] cluster = f'{closure}_{label}' cluster_names[cluster].add(name) name_cluster[name] = cluster return cluster_names, name_cluster # + # try ward, average, single cluster_linkage = 'average' max_dist = 10.0 cluster_names, name_cluster = compute_clusters(closure_ids, id_names, dist_matrix, cluster_linkage, cluster_distance_threshold, eps, max_dist) print(len(cluster_names)) # - # #### Add unclustered names as singleton clusters def add_singleton_names(cluster_names, name_cluster, names_sample): for ix, name in enumerate(names_sample): if name not in name_cluster: cluster = f'{ix}' cluster_names[cluster].add(name) name_cluster[name] = cluster return cluster_names, name_cluster cluster_names, name_cluster = add_singleton_names(cluster_names, name_cluster, names_sample) print(len(cluster_names)) # ### Eval cluster P/R over Ancestry test data # + train, test = load_train_test("../data/raw/records25k_data_train.csv", "../data/raw/records25k_data_test.csv") _, _, candidates_train = train input_names_test, weighted_relevant_names_test, candidates_test = test all_candidates = np.concatenate((candidates_train, candidates_test)) # - def get_precision_recall(names_sample, all_candidates, input_names_test, weighted_relevant_names_test, cluster_names, name_cluster): names_sample_set = set(names_sample.tolist()) all_candidates_set = set(all_candidates.tolist()) precisions = [] recalls = [] for input_name, weighted_relevant_names in zip(input_names_test, weighted_relevant_names_test): if input_name not in names_sample_set: continue cluster_id = name_cluster[input_name] names_in_cluster = cluster_names[cluster_id] & all_candidates_set found_recall = 0.0 total_recall = 0.0 found_count = 0 for name, weight, _ in weighted_relevant_names: if name in names_sample_set: total_recall += weight if name in names_in_cluster: found_recall += weight found_count += 1 if total_recall == 0.0: continue precision = found_count / len(names_in_cluster) if len(names_in_cluster) > 0 else 1.0 recall = found_recall / total_recall precisions.append(precision) recalls.append(recall) avg_precision = sum(precisions) / len(precisions) avg_recall = sum(recalls) / len(recalls) return avg_precision, avg_recall, len(precisions) precision, recall, total = get_precision_recall(names_sample, all_candidates, input_names_test, weighted_relevant_names_test, cluster_names, name_cluster) print("Total=", total, " Precision=", precision, " Recall=", recall) # ### Write clusters def write_clusters(path, cluster_names, name_freqs, name_nicks): cluster_id_name_map = {} with fopen(path, mode="w", encoding="utf-8") as f: for cluster_id, names in cluster_names.items(): # get most-frequent name cluster_name = max(names, key=(lambda name: name_freqs.get(name, 0))) # map cluster id to cluster name cluster_id_name_map[cluster_id] = cluster_name # add nicknames nicknames = set() if name_nicks: for name in names: if name in name_nicks: nicknames.update(name_nicks[name]) # remove padding cluster_name = remove_padding(cluster_name) names = [remove_padding(name) for name in names | nicknames] # write cluster f.write(f'{cluster_name}\t{" ".join(names)}\n') return cluster_id_name_map cluster_id_name_map = write_clusters(clusters_filename, cluster_names, name_freqs, name_nicks) # ### Create super-clusters super_cluster_names, name_super_cluster = compute_clusters(closure_ids, id_names, dist_matrix, cluster_linkage, super_cluster_distance_threshold, eps, max_dist) print(len(super_cluster_names)) super_cluster_names, name_super_cluster = add_singleton_names(super_cluster_names, name_super_cluster, names_sample) print(len(super_cluster_names)) precision, recall, total = get_precision_recall(names_sample, all_candidates, input_names_test, weighted_relevant_names_test, super_cluster_names, name_super_cluster) print("Total=", total, " Precision=", precision, " Recall=", recall) # get cluster names for each name in super cluster super_cluster_clusters = {id: set([cluster_id_name_map[name_cluster[name]] for name in names]) for id, names in super_cluster_names.items()} # ### Write super-clusters _ = write_clusters(super_clusters_filename, super_cluster_clusters, name_freqs, None)
reports/80_cluster_anc_triplet-initial.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # formats: ipynb,py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %% [markdown] # # Trax : Ungraded Lecture Notebook # # In this notebook you'll get to know about the Trax framework and learn about some of its basic building blocks. # # # %% [markdown] # ## Background # # ### Why Trax and not TensorFlow or PyTorch? # # TensorFlow and PyTorch are both extensive frameworks that can do almost anything in deep learning. They offer a lot of flexibility, but that often means verbosity of syntax and extra time to code. # # Trax is much more concise. It runs on a TensorFlow backend but allows you to train models with 1 line commands. Trax also runs end to end, allowing you to get data, model and train all with a single terse statements. This means you can focus on learning, instead of spending hours on the idiosyncrasies of big framework implementation. # # ### Why not Keras then? # # Keras is now part of Tensorflow itself from 2.0 onwards. Also, trax is good for implementing new state of the art algorithms like Transformers, Reformers, BERT because it is actively maintained by Google Brain Team for advanced deep learning tasks. It runs smoothly on CPUs,GPUs and TPUs as well with comparatively lesser modifications in code. # # ### How to Code in Trax # Building models in Trax relies on 2 key concepts:- **layers** and **combinators**. # Trax layers are simple objects that process data and perform computations. They can be chained together into composite layers using Trax combinators, allowing you to build layers and models of any complexity. # # ### Trax, JAX, TensorFlow and Tensor2Tensor # # You already know that Trax uses Tensorflow as a backend, but it also uses the JAX library to speed up computation too. You can view JAX as an enhanced and optimized version of numpy. # # **Watch out for assignments which import `import trax.fastmath.numpy as np`. If you see this line, remember that when calling `np` you are really calling Trax’s version of numpy that is compatible with JAX.** # # As a result of this, where you used to encounter the type `numpy.ndarray` now you will find the type `jax.interpreters.xla.DeviceArray`. # # Tensor2Tensor is another name you might have heard. It started as an end to end solution much like how Trax is designed, but it grew unwieldy and complicated. So you can view Trax as the new improved version that operates much faster and simpler. # # ### Resources # # - Trax source code can be found on Github: [Trax](https://github.com/google/trax) # - JAX library: [JAX](https://jax.readthedocs.io/en/latest/index.html) # # %% [markdown] # ## Installing Trax # # Trax has dependencies on JAX and some libraries like JAX which are yet to be supported in [Windows](https://github.com/google/jax/blob/1bc5896ee4eab5d7bb4ec6f161d8b2abb30557be/README.md#installation) but work well in Ubuntu and MacOS. We would suggest that if you are working on Windows, try to install Trax on WSL2. # # Official maintained documentation - [trax-ml](https://trax-ml.readthedocs.io/en/latest/) not to be confused with this [TraX](https://trax.readthedocs.io/en/latest/index.html) # %% # #!pip install trax==1.3.1 Use this version for this notebook # %% [markdown] # # ## Imports # %% tags=[] import numpy as np # regular ol' numpy from trax import layers as tl # core building block from trax import shapes # data signatures: dimensionality and type from trax import fastmath # uses jax, offers numpy on steroids # %% tags=[] # Trax version 1.3.1 or better # !pip list | grep trax # %% [markdown] # ## Layers # Layers are the core building blocks in Trax or as mentioned in the lectures, they are the base classes. # # They take inputs, compute functions/custom calculations and return outputs. # # You can also inspect layer properties. Let me show you some examples. # # %% [markdown] # ### Relu Layer # First I'll show you how to build a relu activation function as a layer. A layer like this is one of the simplest types. Notice there is no object initialization so it works just like a math function. # # **Note: Activation functions are also layers in Trax, which might look odd if you have been using other frameworks for a longer time.** # %% tags=[] # Layers # Create a relu trax layer relu = tl.Relu() # Inspect properties print("-- Properties --") print("name :", relu.name) print("expected inputs :", relu.n_in) print("promised outputs :", relu.n_out, "\n") # Inputs x = np.array([-2, -1, 0, 1, 2]) print("-- Inputs --") print("x :", x, "\n") # Outputs y = relu(x) print("-- Outputs --") print("y :", y) # %% [markdown] # ### Concatenate Layer # Now I'll show you how to build a layer that takes 2 inputs. Notice the change in the expected inputs property from 1 to 2. # %% tags=[] # Create a concatenate trax layer concat = tl.Concatenate() print("-- Properties --") print("name :", concat.name) print("expected inputs :", concat.n_in) print("promised outputs :", concat.n_out, "\n") # Inputs x1 = np.array([-10, -20, -30]) x2 = x1 / -10 print("-- Inputs --") print("x1 :", x1) print("x2 :", x2, "\n") # Outputs y = concat([x1, x2]) print("-- Outputs --") print("y :", y) # %% [markdown] # ## Layers are Configurable # You can change the default settings of layers. For example, you can change the expected inputs for a concatenate layer from 2 to 3 using the optional parameter `n_items`. # %% tags=[] # Configure a concatenate layer concat_3 = tl.Concatenate(n_items=3) # configure the layer's expected inputs print("-- Properties --") print("name :", concat_3.name) print("expected inputs :", concat_3.n_in) print("promised outputs :", concat_3.n_out, "\n") # Inputs x1 = np.array([-10, -20, -30]) x2 = x1 / -10 x3 = x2 * 0.99 print("-- Inputs --") print("x1 :", x1) print("x2 :", x2) print("x3 :", x3, "\n") # Outputs y = concat_3([x1, x2, x3]) print("-- Outputs --") print("y :", y) # %% [markdown] # **Note: At any point,if you want to refer the function help/ look up the [documentation](https://trax-ml.readthedocs.io/en/latest/) or use help function.** # %% #help(tl.Concatenate) #Uncomment this to see the function docstring with explaination # %% [markdown] # ## Layers can have Weights # Some layer types include mutable weights and biases that are used in computation and training. Layers of this type require initialization before use. # # For example the `LayerNorm` layer calculates normalized data, that is also scaled by weights and biases. During initialization you pass the data shape and data type of the inputs, so the layer can initialize compatible arrays of weights and biases. # %% # Uncomment any of them to see information regarding the function # help(tl.LayerNorm) # help(shapes.signature) # %% tags=[] # Layer initialization norm = tl.LayerNorm() # You first must know what the input data will look like x = np.array([0, 1, 2, 3], dtype="float") # Use the input data signature to get shape and type for initializing weights and biases norm.init(shapes.signature(x)) # We need to convert the input datatype from usual tuple to trax ShapeDtype print("Normal shape:",x.shape, "Data Type:",type(x.shape)) print("Shapes Trax:",shapes.signature(x),"Data Type:",type(shapes.signature(x))) # Inspect properties print("-- Properties --") print("name :", norm.name) print("expected inputs :", norm.n_in) print("promised outputs :", norm.n_out) # Weights and biases print("weights :", norm.weights[0]) print("biases :", norm.weights[1], "\n") # Inputs print("-- Inputs --") print("x :", x) # Outputs y = norm(x) print("-- Outputs --") print("y :", y) # %% [markdown] # ## Custom Layers # This is where things start getting more interesting! # You can create your own custom layers too and define custom functions for computations by using `tl.Fn`. Let me show you how. # %% help(tl.Fn) # %% tags=[] # Define a custom layer # In this example you will create a layer to calculate the input times 2 def TimesTwo(): layer_name = "TimesTwo" #don't forget to give your custom layer a name to identify # Custom function for the custom layer def func(x): return x * 2 return tl.Fn(layer_name, func) # Test it times_two = TimesTwo() # Inspect properties print("-- Properties --") print("name :", times_two.name) print("expected inputs :", times_two.n_in) print("promised outputs :", times_two.n_out, "\n") # Inputs x = np.array([1, 2, 3]) print("-- Inputs --") print("x :", x, "\n") # Outputs y = times_two(x) print("-- Outputs --") print("y :", y) # %% [markdown] # ## Combinators # You can combine layers to build more complex layers. Trax provides a set of objects named combinator layers to make this happen. Combinators are themselves layers, so behavior commutes. # # # %% [markdown] # ### Serial Combinator # This is the most common and easiest to use. For example could build a simple neural network by combining layers into a single layer using the `Serial` combinator. This new layer then acts just like a single layer, so you can inspect intputs, outputs and weights. Or even combine it into another layer! Combinators can then be used as trainable models. _Try adding more layers_ # # **Note:As you must have guessed, if there is serial combinator, there must be a parallel combinator as well. Do try to explore about combinators and other layers from the trax documentation and look at the repo to understand how these layers are written.** # # %% # help(tl.Serial) # help(tl.Parallel) # %% tags=[] # Serial combinator serial = tl.Serial( tl.LayerNorm(), # normalize input tl.Relu(), # convert negative values to zero times_two, # the custom layer you created above, multiplies the input recieved from above by 2 ### START CODE HERE # tl.Dense(n_units=2), # try adding more layers. eg uncomment these lines # tl.Dense(n_units=1), # Binary classification, maybe? uncomment at your own peril # tl.LogSoftmax() # Yes, LogSoftmax is also a layer ### END CODE HERE ) # Initialization x = np.array([-2, -1, 0, 1, 2]) #input serial.init(shapes.signature(x)) #initialising serial instance print("-- Serial Model --") print(serial,"\n") print("-- Properties --") print("name :", serial.name) print("sublayers :", serial.sublayers) print("expected inputs :", serial.n_in) print("promised outputs :", serial.n_out) print("weights & biases:", serial.weights, "\n") # Inputs print("-- Inputs --") print("x :", x, "\n") # Outputs y = serial(x) print("-- Outputs --") print("y :", y) # %% [markdown] # ## JAX # Just remember to lookout for which numpy you are using, the regular ol' numpy or Trax's JAX compatible numpy. Both tend to use the alias np so watch those import blocks. # # **Note:There are certain things which are still not possible in fastmath.numpy which can be done in numpy so you will see in assignments we will switch between them to get our work done.** # %% tags=[] # Numpy vs fastmath.numpy have different data types # Regular ol' numpy x_numpy = np.array([1, 2, 3]) print("good old numpy : ", type(x_numpy), "\n") # Fastmath and jax numpy x_jax = fastmath.numpy.array([1, 2, 3]) print("jax trax numpy : ", type(x_jax)) # %% [markdown] # ## Summary # Trax is a concise framework, built on TensorFlow, for end to end machine learning. The key building blocks are layers and combinators. This notebook is just a taste, but sets you up with some key inuitions to take forward into the rest of the course and assignments where you will build end to end models.
3 - Natural Language Processing with Sequence Models/Week 1/C3W1_L1_Introduction to Trax.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # Company XYZ is an Online Travel Agent, such as Expedia, Booking.com, etc. # # They store their data in JSON files. Each row in the json shows all different cities which have been searched for by a user within the same session (as well as some other info about the user). That is, if I go to company XYZ site and look for hotels in NY and SF within the same session, the corresponding JSON row will show my user id, some basic info about me and the two cities. # # Index # * [Load and clean data](#Load-and-clean-data) # * [Answer question 1](#Answer-question-1) # * [Answer question 2](#Answer-question-2) # * [Answer question 3](#Answer-question-3) # + import itertools from collections import Counter import datetime import json import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('ggplot') from sklearn.preprocessing import normalize from sklearn.decomposition import PCA from sklearn.neighbors import NearestNeighbors # %matplotlib inline # - # ## Load and clean data def clean_json(d): """ clean the json data, make the data easier to be processed """ assert len(d['cities']) == 1 d['cities'] = d['cities'][0] assert len(d['session_id']) == 1 d['session_id'] = d['session_id'][0] assert len(d['unix_timestamp']) == 1 d['timestamp'] = datetime.datetime.utcfromtimestamp(d['unix_timestamp'][0]) del d['unix_timestamp'] # -------- retrieve users user_dict = d['user'] assert len(user_dict) == 1 user_dict = user_dict[0] assert len(user_dict) == 1 user_dict = user_dict[0] d['user_id'] = user_dict['user_id'] d['user_country'] = user_dict['country'] del d['user'] return d # + with open("city_search.json",'rt') as inf: sessions = json.load(inf) for d in sessions: clean_json(d) sessions = pd.DataFrame(sessions) sessions = sessions.set_index('session_id') # - sessions.tail()# get a feeling about the data # empty string will be treated as NA when read back # and some function will dropna=True by default # so give those missing countries another tag sessions.loc[sessions.user_country == '','user_country'] = 'Missing' sessions.tail() # ## Answer question 1 # There was a bug in the code and one country didn't get logged. It just shows up as an empty field (""). Can you guess which country was that? How? # # my basic idea to solve this problem is <span style='color:orange;font-size:1.5em'>checking the 'searching time' habit of each country. the time difference can give us some hint about the geographic location of each country.</span> # count the statistics about each hour and its ratio in a certain country hours_by_country = sessions.groupby("user_country").apply(lambda df: df.timestamp.dt.hour.value_counts(normalize=True)).unstack(fill_value=0) hours_by_country # plot the bar plot about search time ~ ratio countries = ['Missing','US','UK','DE', 'ES', 'FR', 'IT'] fig,axes = plt.subplots(len(countries),1,sharex=True, sharey=True) fig.set_size_inches(20,10) for ax,country in itertools.izip(axes,countries): hours_by_country.loc[country,:].plot(kind='bar',ax=ax) ax.set_title(country) # from above plot, we can see that, "the searching time" habit of the "Missing" and US are very different from 'UK','DE', 'ES', 'FR', 'IT'. ** so 'Missing' country, like US, cannot be in Europe **. # # also from the plot above, ** the Missing country has a time difference about 11~12 hours with US **. # # based on above two facts, I guess the Missing country is in Asia, and it must have good economic to allow people to travel aboard. <span style='color:orange;font-size:1.5em'>Hence, I guess the Missing country may be China, Japan or South Korean</span>. # ## Answer question 2 # For each city, find the most likely city to be also searched for within the same session. # # my basic idea to solve this problem is: # 1. model each city as a vector. i-th value in the vector represents #search by i-th user # 2. then calculate cosine similarity between any two city vectors, get a similarity matrix # 3. similarity[i,j] represents the similarity between i-th and j-th city # 4. sort i-th row or column to get the top similar cities with i-th city sessions['cities'] = sessions.cities.str.split(', ') def count_cities(df): c = Counter(city for cities in df.cities for city in cities) return pd.Series(c) searchcity_by_user = sessions.groupby("user_id").apply(count_cities).unstack(fill_value=0) searchcity_by_user.shape searchcity_by_user = searchcity_by_user.transpose() searchcity_by_user.head() # make each row unit-norm, then dot-product is equivalent to cosine searchcity_by_user_normed = normalize(searchcity_by_user,axis=1) city_similarity = searchcity_by_user_normed.dot(searchcity_by_user_normed.T) city_similarity = pd.DataFrame(city_similarity, index = searchcity_by_user.index, columns = searchcity_by_user.index) city_similarity.head() # + ### find top K most similar of each city def most_similar(s,topk): # [0] must be itself similar_ones = s.sort_values(ascending=False)[1:topk+1].index.values return pd.Series(similar_ones,index = ["similar#{}".format(i) for i in xrange(1,topk+1)]) most_similar_cities = city_similarity.apply(most_similar,topk=1,axis=1) # - most_similar_cities.sample(20)# check the result most_similar_cities.to_csv("most_similar_cities.csv") # ## Answer question 3 # Travel sites are browsed by two kinds of users. Users who are actually planning a trip and users who just dream about a vacation. The first ones have obviously a much higher purchasing intent. Users planning a trip often search for cities close to each other, while users who search for cities far away from each other are often just dreaming about a vacation. That is, a user searching for LA, SF and Las Vegas in the same session is much more likely to book a hotel than a user searching for NY, Paris, Kuala Lumpur (makes sense, right?). Based on this idea, come up with an algorithm that clusters sessions into two groups: high intent and low intent. Explain all assumptions you make along the way. sessions["num_searched"] = sessions.cities.map(len) sessions.tail()# glance the data def search_distance(cities,similar2dist): """ input: cities: a list of cities included in the search similar2dist: a function which maps similarity to distance return: distance: distance among cities in the search """ sumdist = 0 total = len(cities) # if total=1, then distance =0 for i1 in xrange(total-1): city1 = cities[i1] for i2 in xrange(i1+1,total): city2 = cities[i2] similarity = city_similarity.loc[city1,city2] dist = similar2dist(similarity) sumdist += dist # if there are n cities in the search, sum will be calculated 0.5*n*(n-1) times # then the distance biased too much to search which contains many cities # so I decide to divide 'n', then the distance is O(n) instead of O(n2) # which is more fair, but also takes account of "more city in the search, more likely to be irrelevant" return sumdist / total distances = sessions.cities.map(lambda cities: search_distance(cities,lambda s: np.sqrt(1-s*s))) # distances = sessions.cities.map(lambda cities: search_distance(cities,lambda s: 1-s*s)) plt.xticks(np.arange(0,5,0.5)) # filter out distance=0, because a lot of search with one city will have distance=0 distances[distances>0].hist(bins=100,normed=True,figsize=(12,6)) # according to above plot, I decide to choose <span style='color:orange;font-size:1.5em'>distance=0.7 as the cutoff between 'high intent search' and 'low intent search'</span>. if the distance is higher than 0.7, then I classify such search as 'low intent'. dist_cutoff = 0.9 print 'percentage of low intent search: {:.2f}%'.format((distances>dist_cutoff).mean() * 100) low_intent_search = sessions.loc[distances>dist_cutoff,['num_searched','cities']] low_intent_search
13.CitySimilarity/city_search.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Bayesian # language: python # name: bayesian # --- import pandas as pd import seaborn as sns import numpy as np import dowhy from dowhy.do_why import CausalModel import dowhy.datasets data = dowhy.datasets.linear_dataset( beta=10, num_common_causes=3, num_instruments=2, num_samples=10000, treatment_is_binary=True) data.keys() data['treatment_name'] data['outcome_name'] data['common_causes_names'] data['df'].head() data["treatment_name"] data["outcome_name"] model = CausalModel( data=data["df"], treatment=data["treatment_name"], outcome=data["outcome_name"], graph=data["gml_graph"]) identified_estimand = model.identify_effect() estimate = model.estimate_effect(identified_estimand, method_name="backdoor.propensity_score_matching") estimate.params.keys() estimate.params['estimand_type'] estimate.params['estimator_class'] # Refute the obtained estimate using multiple robustness checks. refute_results = model.refute_estimate(identified_estimand, estimate, method_name="random_common_cause") model.view_model() # ## Load Datasets # !ls -lah ../../../datasets/doleta dataset_path = '../../../datasets/doleta/' dict_cols = { "EMPLOYER_NAME": "I", "EMPLOYER_ADDRESS_1": "J", "EMPLOYER_CITY" : "L", "EMPLOYER_STATE": "M", "EMPLOYER_NUM_EMPLOYEES": "R", "EMPLOYER_YR_ESTAB" : "S", "PW_SOC_CODE": "Y", "PW_SOC_TITLE": "Z", "PW_LEVEL_9089": "AA", "PW_AMOUNT_9089": "AB", "PW_UNIT_OF_PAY_9089": "AC", "WAGE_OFFER_FROM_9089": "AH", "WAGE_OFFER_TO_9089": "AI", "JOB_INFO_WORK_CITY": "AK", "JOB_INFO_WORK_STATE": "AL", "JOB_INFO_JOB_TITLE": "AN", "JOB_INFO_EDUCATION": "AO", "JOB_INFO_MAJOR": "AQ", "COUNTRY_OF_CITIZENSHIP": "DD", "FOREIGN_WORKER_INFO_EDUCATION": "DG", "FOREIGN_WORKER_INFO_MAJOR": "DI", "FW_INFO_YR_REL_EDU_COMPLETED": "DJ", "EMPLOYER_DECL_INFO_TITLE": "DR", "NAICS_US_CODE": "DS", "PW_JOB_TITLE_9089": "DU" } selected_cols = list(dict_cols.keys()) selected_cols perm_2019 = perm_2019 = pd.read_excel(dataset_path+ 'PERM_Disclosure_Data_FY2019.xlsx') x = 50 for i in range(x,x+10): print(float(perm_2019['WAGE_OFFER_FROM_9089'].loc[i].replace(",",""))) print(perm_2019['JOB_INFO_JOB_TITLE'].loc[i]) print(int(perm_2019['EMPLOYER_NUM_EMPLOYEES'].loc[i])) print(perm_2019['EMPLOYER_NAME'].loc[i]) print(perm_2019['EMPLOYER_CITY'].loc[i]) print(perm_2019['JOB_INFO_WORK_CITY'].loc[i]) print(perm_2019['JOB_INFO_WORK_STATE'].loc[i]) print(perm_2019['PW_JOB_TITLE_9089'].loc[i]) print(int(perm_2019['FW_INFO_YR_REL_EDU_COMPLETED'].loc[i])) print(perm_2019['FOREIGN_WORKER_INFO_EDUCATION'].loc[i]) print("---"*10) dataframe = perm_2019['WAGE_OFFER_FROM_9089'] dataframe
notebooks/5.dowhy_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Plot breach data # # Using this script you can plot data regarding breaches in a 3Di model. Plots include breach discharge, velocity, width, depth, waterlevel on both sides of the breach. # # - Author: <NAME>, 2019 # # - email: <EMAIL> #import libraries, make sure you have them installed on your Python environment from threedigrid.admin.gridresultadmin import GridH5ResultAdmin from threedigrid.admin.gridadmin import GridH5Admin import os import matplotlib.pyplot as plt import numpy as np import math from pyproj import Proj, transform from google_drive_downloader import GoogleDriveDownloader as gdd #Needs installing using pip, unlike the others # + #download testdata and unzip in testdata directory (https://drive.google.com/open?id=1xNhMZnWKxFP7on1reXxecq7XdTuJCSa2) gdd.download_file_from_google_drive(file_id='1xNhMZnWKxFP7on1reXxecq7XdTuJCSa2', dest_path='./testdata/bergermeer.zip', unzip=True) # + #Define location of raw results and gridadministration result_path = 'testdata/' result_file = os.path.join(result_path,'results_3di.nc') gridadmin_file = os.path.join(result_path,'gridadmin.h5') #Load files into gr object gr = GridH5ResultAdmin(gridadmin_file,result_file) # + #subset only 1D nodes breaches = gr.breaches #Filter on lines with possible breach. #kcu=55:'1d2d connected line possible breach', for more flowline classifiers see https://threedigrid.readthedocs.io/en/latest/miscellaneous.html#threedigrid.admin.utils.KCUDescriptor breach_lines = gr.lines.filter(kcu=55) # + breach_idx = breaches.levl[1:] #Loop over all breaches in the model (in the testdata there is only 1 breach) for breach_id in breach_idx: print("Breach flowline id: {}".format(breach_id)) breach = gr.breaches.filter(levl__eq=breach_id) flowline = gr.lines.filter(id__eq=breach_id) #extract the breach depth and width from the results depth = breach.timeseries(start_time=1,end_time=gr.nodes.timestamps[-1]).breach_depth width = breach.timeseries(start_time=1,end_time=gr.nodes.timestamps[-1]).breach_width #extract the flowline discharge and velocity from the results discharge = flowline.timeseries(start_time=1,end_time=gr.nodes.timestamps[-1]).q velocity = flowline.timeseries(start_time=1,end_time=gr.nodes.timestamps[-1]).u1 #select the waterlevel nodes at the beginning and end of the breach-flowline start_node_id = flowline.line[0][0] end_node_id = flowline.line[1][0] start_node = gr.nodes.filter(id__eq=start_node_id) end_node = gr.nodes.filter(id__eq=end_node_id) #extract the waterlevel at both sides of the breach wl_startnode = start_node.timeseries(start_time=1,end_time=gr.nodes.timestamps[-1]).s1 wl_endnode = end_node.timeseries(start_time=1,end_time=gr.nodes.timestamps[-1]).s1 #extract the timestamps of the simulation ts = gr.nodes.timestamps[1:] #Replace -9999 values as NaN nodatavalue = -9999 depth[depth==nodatavalue]=np.nan width[width==nodatavalue]=np.nan discharge[discharge==nodatavalue]=np.nan velocity[velocity==nodatavalue]=np.nan wl_startnode[wl_startnode==nodatavalue]=np.nan wl_endnode[wl_endnode==nodatavalue]=np.nan fig = plt.figure(figsize=(20,10)) ax1 = plt.subplot2grid((2, 2), (0, 0)) ax2 = plt.subplot2grid((2, 2), (0, 1)) ax3 = plt.subplot2grid((2, 2), (1, 0)) ax4 = plt.subplot2grid((2, 2), (1, 1)) fig.suptitle('Breach with id {}' .format(breach_id,)) ax1.plot(ts, discharge) ax1.set_title("Discharge [m3/s]") ax2.plot(ts, velocity) ax2.set_title("Velocity [m/s]") ax3.plot(ts,wl_startnode) ax3.plot(ts,wl_endnode) ax3.set_title("Water levels [m+MSL]") ax4.plot(ts,width) ax4.set_title("Breach width [m]") # -
Plot breach data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Name # # Data preparation using Spark on YARN with Cloud Dataproc # # # # Label # # Cloud Dataproc, GCP, Cloud Storage, Spark, Kubeflow, pipelines, components, YARN # # # # Summary # # A Kubeflow Pipeline component to prepare data by submitting a Spark job on YARN to Cloud Dataproc. # # # Details # # ## Intended use # # Use the component to run an Apache Spark job as one preprocessing step in a Kubeflow Pipeline. # # ## Runtime arguments # Argument | Description | Optional | Data type | Accepted values | Default | # :--- | :---------- | :--- | :------- | :------| :------| # project_id | The ID of the Google Cloud Platform (GCP) project that the cluster belongs to.|No | GCPProjectID | | | # region | The Cloud Dataproc region to handle the request. | No | GCPRegion | | | # cluster_name | The name of the cluster to run the job. | No | String | | | # main_jar_file_uri | The Hadoop Compatible Filesystem (HCFS) URI of the JAR file that contains the main class. | No | GCSPath | | | # main_class | The name of the driver's main class. The JAR file that contains the class must be either in the default CLASSPATH or specified in `spark_job.jarFileUris`.| No | | | | # args | The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.| Yes | | | | # spark_job | The payload of a [SparkJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob).| Yes | | | | # job | The payload of a [Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs). | Yes | | | | # wait_interval | The number of seconds to wait between polling the operation. | Yes | | | 30 | # # ## Output # Name | Description | Type # :--- | :---------- | :--- # job_id | The ID of the created job. | String # # ## Cautions & requirements # # To use the component, you must: # # # # * Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project). # * [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster). # * The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details. # * Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project. # # # ## Detailed description # # This component creates a Spark job from [Dataproc submit job REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/submit). # # Follow these steps to use the component in a pipeline: # # # # 1. Install the Kubeflow Pipeline SDK: # + # %%capture --no-stderr KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz' # !pip3 install $KFP_PACKAGE --upgrade # - # 2. Load the component using KFP SDK # + import kfp.components as comp dataproc_submit_spark_job_op = comp.load_component_from_url( 'https://raw.githubusercontent.com/kubeflow/pipelines/2df775a28045bda15372d6dd4644f71dcfe41bfe/components/gcp/dataproc/submit_spark_job/component.yaml') help(dataproc_submit_spark_job_op) # - # ### Sample # Note: The following sample code works in an IPython notebook or directly in Python code. # # # #### Set up a Dataproc cluster # [Create a new Dataproc cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster) (or reuse an existing one) before running the sample code. # # # #### Prepare a Spark job # Upload your Spark JAR file to a Cloud Storage bucket. In the sample, we use a JAR file that is preinstalled in the main cluster: `file:///usr/lib/spark/examples/jars/spark-examples.jar`. # # Here is the [source code of the sample](https://github.com/apache/spark/blob/master/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java). # # To package a self-contained Spark application, follow these [instructions](https://spark.apache.org/docs/latest/quick-start.html#self-contained-applications). # # # #### Set sample parameters # + tags=["parameters"] PROJECT_ID = '<Please put your project ID here>' CLUSTER_NAME = '<Please put your existing cluster name here>' REGION = 'us-central1' SPARK_FILE_URI = 'file:///usr/lib/spark/examples/jars/spark-examples.jar' MAIN_CLASS = 'org.apache.spark.examples.SparkPi' ARGS = ['1000'] EXPERIMENT_NAME = 'Dataproc - Submit Spark Job' # - # #### Example pipeline that uses the component import kfp.dsl as dsl import json @dsl.pipeline( name='Dataproc submit Spark job pipeline', description='Dataproc submit Spark job pipeline' ) def dataproc_submit_spark_job_pipeline( project_id = PROJECT_ID, region = REGION, cluster_name = CLUSTER_NAME, main_jar_file_uri = '', main_class = MAIN_CLASS, args = json.dumps(ARGS), spark_job=json.dumps({ 'jarFileUris': [ SPARK_FILE_URI ] }), job='{}', wait_interval='30' ): dataproc_submit_spark_job_op( project_id=project_id, region=region, cluster_name=cluster_name, main_jar_file_uri=main_jar_file_uri, main_class=main_class, args=args, spark_job=spark_job, job=job, wait_interval=wait_interval) # #### Compile the pipeline pipeline_func = dataproc_submit_spark_job_pipeline pipeline_filename = pipeline_func.__name__ + '.zip' import kfp.compiler as compiler compiler.Compiler().compile(pipeline_func, pipeline_filename) # #### Submit the pipeline for execution # + #Specify pipeline argument values arguments = {} #Get or create an experiment and submit a pipeline run import kfp client = kfp.Client() experiment = client.create_experiment(EXPERIMENT_NAME) #Submit a pipeline run run_name = pipeline_func.__name__ + ' run' run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) # - # ## References # # * [Component Python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_submit_spark_job.py) # * [Component Docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile) # * [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataproc/submit_spark_job/sample.ipynb) # * [Dataproc SparkJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) # # ## License # By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
components/gcp/dataproc/submit_spark_job/sample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Overview # # In this notebook, we will explore how to create Azure Purview entity, classication, and lineage using Atlas APIs. # # ## Pre-requsites # # - [Python 3](https://www.python.org/downloads/) # - [Az CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) import json SUBSCRIPTION_ID = "TODO" # fill in RESOURCE_GROUP = "TODO" # fill in PURVIEW_NAME = "TODO" # fill in SERVICE_PRINCIPAL_NAME = "TODO" # fill in # !az login # !az account set --subscription {SUBSCRIPTION_ID} # Create service principal to access Purview endpoint # sp = !az ad sp create-for-rbac \ # --name "http://{SERVICE_PRINCIPAL_NAME}" \ # --role "Purview Data Curator" \ # --scopes /subscriptions/{SUBSCRIPTION_ID}/resourceGroups/{RESOURCE_GROUP}/providers/Microsoft.Purview/accounts/{PURVIEW_NAME} sp_json_string = ''.join(sp[-7:]) sp = json.loads(sp_json_string) # Install Atlas Python client (https://github.com/wjohnson/pyapacheatlas) # !pip install pyapacheatlas # + from pyapacheatlas.auth import ServicePrincipalAuthentication from pyapacheatlas.core import PurviewClient oauth = ServicePrincipalAuthentication( tenant_id=os.environ.get("TENANT_ID", sp['tenant']), client_id=os.environ.get("CLIENT_ID", sp['appId']), client_secret=os.environ.get("CLIENT_SECRET", sp['password']) ) # - # Instantiate PurviewClient client = PurviewClient( account_name = os.environ.get("PURVIEW_NAME", PURVIEW_NAME), authentication=oauth ) # + from pyapacheatlas.core import AtlasEntity # Create an entity # You must provide a name, typeName, qualified_name, and guid # the guid must be a negative number and unique in your batch # being uploaded. input01_qn = "pyapacheatlas://demoinputclassification01" input02_qn = "pyapacheatlas://demoinputclassification02" output01_qn = "pyapacheatlas://demooutput01" dataset_type_name = "DataSet" input01 = AtlasEntity( name="input01", typeName=dataset_type_name, qualified_name=input01_qn, guid="-100" ) input02 = AtlasEntity( name="input02", typeName=dataset_type_name, qualified_name=input02_qn, guid="-101" ) output01 = AtlasEntity( name="output01", typeName=dataset_type_name, qualified_name=output01_qn, guid="-102" ) results = client.upload_entities( batch=[input01, input02, output01] ) # - # After the AtlasEntities are created, you will be able to see these assets within the Purview portal. # # ![Azure Purview Browse Asset Page](./img/purview_browse_asset.png) # # ![Azure Purview Custom Asset Page](./img/purview_custom_assets.png) # + # Get the Guids for us to work with guids = [v for v in results["guidAssignments"].values()] guids # + from pyapacheatlas.core import AtlasClassification # Classify one entity with multiple classifications print(f"Adding multiple classifications to guid: {guids[0]}") one_entity_multi_class = client.classify_entity( guid=guids[0], classifications=[ AtlasClassification("MICROSOFT.PERSONAL.DATE_OF_BIRTH"), AtlasClassification("MICROSOFT.PERSONAL.NAME") ], force_update=True ) print(json.dumps(one_entity_multi_class, indent=2)) # + from pyapacheatlas.core import AtlasClassification from pyapacheatlas.core.util import AtlasException # Classify Multiple Entities with one classification try: multi_entity_single_class = client.classify_bulk_entities( entityGuids=guids, classification=AtlasClassification("MICROSOFT.PERSONAL.IPADDRESS") ) print(json.dumps(multi_entity_single_class, indent=2)) except AtlasException as e: print("One or more entities had the existing classification, so skipping it.") print(e) # - # After entities are classified, you can navigate to individual asset and explore its classifications within the Purview portal. # # ![Azure Purview Classification](./img/purview_classification.png) # + from pyapacheatlas.core import AtlasProcess # The Atlas Process is the lineage component that links the two # entities together. The inputs and outputs need to be the "header" # version of the atlas entities, so specify minimum = True to # return just guid, qualifiedName, and typeName. process_qn = "pyapacheatlas://democustomprocess" process_type_name = "Process" process = AtlasProcess( name="sample process", typeName=process_type_name, qualified_name=process_qn, inputs=[input01, input02], outputs=[output01], guid=-103 ) # Convert the individual entities into json before uploading. results = client.upload_entities( batch=[input01, input02, output01, process] ) print(json.dumps(results, indent=2)) # - # After the AtlasProcess is created, you can navigate to the `sample process` asset and explore its lineage. # # ![Azure Purview Lineage](./img/purview_lineage.png) # # Clean Up # + # Deletes all entities guid_assignment = results['guidAssignments'] for local_guid in guid_assignment: guid = guid_assignment[local_guid] _ = client.delete_entity(guid)
samples/notebooks/Azure_Purview_Entity_Classification_Lineage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="gGP85eLPSNFw" from sklearn.datasets import fetch_california_housing from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import tensorflow as tf import numpy as np from tensorflow import keras # + [markdown] id="tBXEanZUSRQW" # Imports needed # + id="O_ERi2j_Shy4" housing = fetch_california_housing() X_train_full, X_test, y_train_full, y_test = train_test_split(housing.data, housing.target, random_state=42) X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, random_state=42) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_valid = scaler.transform(X_valid) X_test = scaler.transform(X_test) np.random.seed(42) tf.random.set_seed(42) X_train_A, X_train_B = X_train[:, :5], X_train[:, 2:] X_valid_A, X_valid_B = X_valid[:, :5], X_valid[:, 2:] X_test_A, X_test_B = X_test[:, :5], X_test[:, 2:] X_new_A, X_new_B = X_test_A[:3], X_test_B[:3] # + [markdown] id="LM_LU6F9S87S" # Splitting housing data and importing housing data # + id="0EWhAU8CSCRV" class WideAndDeepModel(keras.models.Model): def __init__(self, units=30, activation="relu", **kwargs): super().__init__(**kwargs) self.hidden1 = keras.layers.Dense(units, activation=activation) self.hidden2 = keras.layers.Dense(units, activation=activation) self.main_output = keras.layers.Dense(1) self.aux_output = keras.layers.Dense(1) def call(self, inputs): input_A, input_B = inputs hidden1 = self.hidden1(input_B) hidden2 = self.hidden2(hidden1) concat = keras.layers.concatenate([input_A, hidden2]) main_output = self.main_output(concat) aux_output = self.aux_output(hidden2) return main_output, aux_output model = WideAndDeepModel(30, activation="relu") # + [markdown] id="9-SKs5aESH0a" # Creating the class # + id="YwtSxM6TSFhH" executionInfo={"status": "ok", "timestamp": 1602715614490, "user_tz": -660, "elapsed": 7153, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxpvArds7xLNZB29BI3aCcSZkWbMPAZWZLOm3o=s64", "userId": "03940255243131127536"}} outputId="bb871499-d6bf-4a00-cb58-1aec11049380" colab={"base_uri": "https://localhost:8080/", "height": 394} model.compile(loss="mse", loss_weights=[0.9, 0.1], optimizer=keras.optimizers.SGD(lr=1e-3)) history = model.fit((X_train_A, X_train_B), (y_train, y_train), epochs=10, validation_data=((X_valid_A, X_valid_B), (y_valid, y_valid))) total_loss, main_loss, aux_loss = model.evaluate((X_test_A, X_test_B), (y_test, y_test)) y_pred_main, y_pred_aux = model.predict((X_new_A, X_new_B)) # + [markdown] id="7uApGJTgSJoL" # Training and compiling the model
Hands-on-ML/Code/Chapter 10/subclassing_API.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Quantum circuit for an exponential of pauli strings # # For SUSY QM, the Hamiltonian, $H$ can be qubitized, which results in the Hamiltonian being written as a sum of terms, with each term containing a produce of pauli matrices acting on the qubits. Given some initial state, we can apply the time evolution operator, # \begin{equation} # e^{iHt}. # \end{equation} # To realize this on a quantum computer, we use the Suzuki-Trotter formula # \begin{equation} # e^{i\sum_j H_j t}=\prod_j e^{i H_j \delta t} + \mathcal{O}() # \end{equation} # Since qubitizing the Hamiltonian results in an expression for $H$ in terms of pauli operators, we need to be able to write down the quantum circuit for an exponential of pauli matrices. This is accomplished with the so-called "ladder" circuit, which we now detail. # First we go through some example cases showing that the exponential of the Hamiltonian is a quantum circuit. # + import numpy as np #we will use numpy's kron function for tensor products, and its matmul for matrix multiplication. #definition of the identity, pauli X and Z matrices, and the two-qubit CNOT matrix. ID=np.array([[1,0],[0,1]]) X=np.array([[0,1],[1,0]]) Z=np.array([[1,0],[0,-1]]) CNOT=np.array([[1,0,0,0],[0,1,0,0],[0,0,0,1],[0,0,1,0]]) # - #A quick check that we are doing kronecker products correctly #The CNOT gate is the identity, if the control qubit is zero, and a NOT(X) gate otherwise assert(CNOT.all() == (np.kron([[1,0],[0,0]],ID) + np.kron([[0,0],[0,1]],X)).all()) #To avoid using an algebraic library, like sci-py, I pick specific values for cos(t) and i sin(t)... #cos(t) = A #i sin(t) = B A=0.2 B=0.3 RZ=A*ID + B*Z #a rotation around the z-axis with given values for t (that don't make sense) # Now we can check that the circuit for # \begin{equation} # e^{-i(Z \otimes Z)t} = \text{CNOT}\times(\mathcal{1}\otimes R_z)\times \text{CNOT} # \end{equation} # + LHS=A*np.kron(ID,ID) + B*np.kron(Z,Z) RHS=np.matmul(CNOT,np.matmul(np.kron(ID,RZ),CNOT)) #print(LHS) #print(RHS) assert(LHS.all()==RHS.all()) # + #We now repeat this for a pauli Z applied to 3 qubits. LHS = A*np.kron(ID,np.kron(ID,ID)) + B*np.kron(Z,np.kron(Z,Z)) CNOT1=np.kron(CNOT,ID) CNOT2=np.kron(ID,CNOT) RZ3=np.kron(ID,np.kron(ID,RZ)) RHS=np.matmul(CNOT1,np.matmul(CNOT2,np.matmul(RZ3,np.matmul(CNOT2,CNOT1)))) assert(LHS.all()==RHS.all()) # - # QISKIT already contains a method for implementing Trotterization to a exponential written as a sum of pauli matrices. from qiskit.aqua.operators import I,X,Y,Z, PauliTrotterEvolution from qiskit import QuantumCircuit, transpile operator = ((Z^Z).exp_i()) trotter_op = PauliTrotterEvolution(trotter_mode='suzuki').convert(operator) print(operator) print(trotter_op) qc = QuantumCircuit(2,2) qc.append(trotter_op, [0,1]) transpile(qc, basis_gates = ['cx', 'u1', 'u2', 'u3', 'H', 'X', 'Y', 'Z', 'id']).draw('mpl') transpile(qc, basis_gates = ['cx', 'u1', 'u2', 'u3', 'H', 'X', 'Y', 'Z', 'id'],optimization_level=3).draw('mpl') operator = ((X^Z).exp_i()) trotter_op = PauliTrotterEvolution(trotter_mode='suzuki').convert(operator) print(operator) print(trotter_op) qc = QuantumCircuit(2,2) qc.append(trotter_op, [0,1]) transpile(qc, basis_gates = ['cx', 'u1', 'u2', 'u3', 'H', 'X', 'Y', 'Z', 'id']).draw('mpl') operator = ((X^Y^Z).exp_i()) trotter_op = PauliTrotterEvolution(trotter_mode='suzuki').convert(operator) print(operator) print(trotter_op) qc = QuantumCircuit(3,3) qc.append(trotter_op, [0,1,2]) transpile(qc, basis_gates = ['cx', 'u1', 'u2', 'u3', 'H', 'X', 'Y', 'Z', 'id']).draw('mpl')
tutorials/LadderCircuits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Mask Cover # Percentage of masked pixels import ee ee.Initialize() from geetools import algorithms, ui, cloud_mask, tools from ipygee import * # Area of Interest p = ee.Geometry.Point([-71.33972167968751, -42.737619925503054]) aoi = p.buffer(8000).bounds() # Masked image i = ee.Image('COPERNICUS/S2/20181122T142749_20181122T143353_T18GYT') masked = cloud_mask.sentinel2()(i) masked = masked.clip(aoi) # Compute Mask Cover cover = algorithms.maskCover(masked, aoi, 10) eprint(cover.get('MASK_COVER')) # Show Image on Map Map = Map() Map.show() vis = {'bands': ['B8', 'B11','B4'], 'min':0, 'max':5000} Map.addLayer(masked, vis, 'Masked image') Map.centerObject(aoi) # Make it an image band metadata = cover.metadata('MASK_COVER') tools.image.getValue(metadata, aoi.centroid(1), 10, side='client')
notebooks/algorithms/mask_cover.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: python3 # --- # + # %matplotlib inline import numpy as np from matplotlib import pyplot as plt from xdgmm import XDGMM from sklearn.model_selection import validation_curve from sklearn.model_selection import ShuffleSplit from test_plots import * ''' Due to AstroML still using the deprecated GMM class from scikit-learn (instead of GaussianMixture), this demo will throw numerous errors whenever the XDGMM object calls an AstroML method, such as fit. The lines below will suppress these warnings; comment them out to see everything. This XDGMM class has been updated to use GaussianMixture instead of GMM when necessary, but since it uses an AstroML XDGMM object to store and manipulate the model, it is dependent on AstroML. These warnings will continue to occur until the XDGMM class from AstroML has been updated. ''' import warnings warnings.filterwarnings('ignore') # - def plot_sphere(w=0, center=[0,0,0], r=[1, 1, 1], rotation=[1,1,1], ax=None): ''' plot a sphere surface Input: c: 3 elements list, sphere center r: 3 element list, sphere original scale in each axis ( allowing to draw elipsoids) subdiv: scalar, number of subdivisions (subdivision^2 points sampled on the surface) 是椭球的分辨率 ax: optional pyplot axis object to plot the sphere in. sigma_multiplier: sphere additional scale (choosing an std value when plotting gaussians) Output: ax: pyplot axis object ''' if ax is None: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') u = np.linspace(0, 2 * np.pi, 30) #np.linspace 取等差数列 v = np.linspace(0, np.pi, 30) x = r[0] * np.outer(np.cos(u), np.sin(v)) y = r[1] * np.outer(np.sin(u), np.sin(v)) z = r[2] * np.outer(np.ones(np.size(u)), np.cos(v)) for i in range(len(x)): for j in range(len(x)): #[x[i, j], y[i, j], z[i, j]] = [x[i, j], y[i, j], z[i, j]] + center #spherical专用 [x[i, j], y[i, j], z[i, j]] = np.dot([x[i, j], y[i, j], z[i, j]], rotation.T) + center ax.plot_surface(x, y, z, alpha=0.3, linewidth=0, antialiased=False) return ax # + A=np.load("/Users/mahaixia/DATA/SDSS_DR7/galaxy_DR7/SDSS_box_bins100.npz") data = A["a7"] points=data[:,4:7] fig = plt.figure(figsize=(8, 8)) axes = fig.add_subplot(111, projection='3d') # axes.set_zlim3d(200, 250) # axes.set_xlim3d(-150, -100) # axes.set_ylim3d(50, 100) axes.scatter(points[:,0], points[:,1], points[:,2], s = 2.0, alpha = 0.5) Xerr = np.zeros(points.shape + points.shape[-1:]) # - # Instantiate an XDGMM model: xdgmm = XDGMM(method='Bovy') # + # Define the range of component numbers, and get ready to compute the BIC for each one: param_range = np.arange(25, 35, 1) # Loop over component numbers, fitting XDGMM model and computing the BIC: bic, optimal_n_comp, lowest_bic = xdgmm.bic_test(points, Xerr, param_range, no_err=True) # - plot_bic(param_range, bic, optimal_n_comp) # ## Model Fitting # # Now that we know the best number of components to use, we can fit the data. First set the number of components, then perform the fit. # + # xdgmm.n_components = optimal_n_comp # xdgmm.n_components = 31 # a0 # xdgmm.n_components = 36 # a1 # xdgmm.n_components = 54 # a2 # xdgmm.n_components = 54 # a3 # xdgmm.n_components = 78 # a4 # xdgmm.n_components = 60 # a5 # xdgmm.n_components = 66 # a6 xdgmm.n_components = 35 # a7 xdgmm = xdgmm.fit(points, Xerr) print("xdgmm fitting: done!") index = xdgmm.predict(points, Xerr) # - def find_fraction(points, center=[0,0,0], r=[1, 1, 1], rotation=[1,1,1]): inner = 0.0 outer = 0.0 midx = [] x = points[:,4] - center[0] y = points[:,5] - center[1] z = points[:,6] - center[2] r = r # 1 sigma球 for j in range(len(x)): [x[j], y[j], z[j]] = np.dot([x[j], y[j], z[j]], np.linalg.inv(rotation)) for i in range(points.shape[0]): distance = np.square(x[i]/r[0]) + np.square(y[i]/r[1]) + np.square(z[i]/r[2]) if distance > 1.0: outer +=1.0 elif distance < 1.0: inner +=1.0 midx.append(points[i]) return inner, outer, midx # + figxd = plt.figure(figsize=(8, 8)) axesxd = figxd.add_subplot(111, projection='3d') index = xdgmm.predict(points, Xerr) member = [] N_member = [] for i in range(xdgmm.n_components): covariances = xdgmm.V[i][:3, :3] v, u = np.linalg.eigh(covariances) r = np.sqrt(v) plot_sphere(xdgmm.weights[i], xdgmm.mu[i], r, u, ax=axesxd) group = data[np.where(index == i)] inner, outer, mlist = find_fraction(group, center=xdgmm.mu.T[:3,i], r=r, rotation=u) member.append(mlist) N_member.append(inner) # - plt.yscale("log") plt.hist(N_member) with open("Groups_bins100.txt", "ab") as f: np.savetxt(f,np.c_[np.array(N_member).T, xdgmm.mu[:, :3]], fmt=' '.join(['%i'] + ['%1.5f']*3)) # ## Gaussian 置信区间
SDSS/SDSS_bins100.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from sklearn.datasets import make_moons, make_circles, make_blobs from matplotlib.colors import ListedColormap from sklearn.model_selection import train_test_split from numpy import arange, meshgrid, savetxt, c_ from matplotlib import pyplot from pandas import DataFrame, read_csv # - names = ['moon','circle','blob'] def plot(datasets): figure = pyplot.figure(figsize=(17, 9)) i = 1 h = .02 for X, y in datasets: X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4) x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = meshgrid(arange(x_min, x_max, h), arange(y_min, y_max, h)) cm = pyplot.cm.RdBu cm_bright = ListedColormap(['#FF0000', '#0000FF']) ax = pyplot.subplot(len(datasets), 1, i) ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm_bright) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) i += 1 figure.subplots_adjust(left=.02, right=.98) pyplot.show() datasets_ll = [make_moons(n_samples=2000,random_state=32,noise=0.05), make_circles(n_samples=2000,factor=0.7,noise=0.05,random_state=55), make_blobs(n_samples=2000,centers=[(2,2),(1.5,1.5)],cluster_std=[0.05, 0.05], random_state=8)] datasets_lh = [make_moons(n_samples=2000,random_state=32,noise=0.3), make_circles(n_samples=2000,factor=0.7,noise=0.3,random_state=55), make_blobs(n_samples=2000,centers=[(2,2),(1.5,1.5)],cluster_std=[0.3, 0.3], random_state=8)] datasets_sl = [make_moons(n_samples=200,random_state=32,noise=0.05), make_circles(n_samples=200,factor=0.7,noise=0.05,random_state=55), make_blobs(n_samples=200,centers=[(2,2),(1.5,1.5)],cluster_std=[0.05, 0.05], random_state=8)] datasets_sh = [make_moons(n_samples=200,random_state=32,noise=0.3), make_circles(n_samples=200,factor=0.7,noise=0.3,random_state=55), make_blobs(n_samples=200,centers=[(2,2),(1.5,1.5)],cluster_std=[0.3, 0.3], random_state=8)] plot(datasets_ll) plot(datasets_sl) plot(datasets_lh) plot(datasets_sh) def saveDataset(data,size,noise): i = 0 for dt in data: print(dt) X = dt[0] y = dt[1] savetxt('../extracts/%s_%s_%s.txt' % (names[i],size,noise), c_[X,y], fmt='%f', delimiter=" ", newline="\n") i+=1 saveDataset(datasets_ll,'large','low') saveDataset(datasets_lh,'large','high') saveDataset(datasets_sl,'small','low') saveDataset(datasets_sh,'small','high')
notebooks/generate_dummy_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: neuro # language: python # name: neuro # --- # + import os from nipype.interfaces import fsl from nipype.interfaces.io import SelectFiles, DataSink import nipype.pipeline.engine as pe import nipype.interfaces.utility as util import nipype.algorithms.modelgen as model from config import root from IPython.display import Image fsl.FSLCommand.set_default_output_type("NIFTI_GZ") # + def make_contrasts(contrasts, names, con_type="T", show_output=True): """Make contrasts as read into FSL""" contrasts_fmt = [] for contrast in contrasts: # create contrast title title_left, title_right = [], [] for k, v in contrast.items(): title_left += [k] if v > 0 else [] title_right += [k] if v < 0 else [] title = ", ".join(title_left) + " > " + ", ".join(title_right) weights_per_regr = [] for name in names: weight = contrast[name] if name in contrast.keys() else 0 weights_per_regr.append(weight) contrasts_fmt.append((title, con_type, names, weights_per_regr)) if show_output: for con in contrasts_fmt: print("="*20) print(con[0]) print("-"*20) print(con[1]) for reg, weight in zip(con[2], con[3]): print(reg, "\t", weight) print("="*20) print("\n") return contrasts_fmt def pop_decay(in_file="", duration=.2, **kwargs): from nipype.interfaces.base import Bunch import pandas as pd # the data regs = pd.read_csv(in_file, header=0) conditions = regs.columns.tolist() conditions.remove("onset") onsets = [regs.onset] * len(conditions) durations = [ [duration] * len(regs) ] * len(conditions) amplitudes = regs[conditions].values.T.tolist() output = Bunch(conditions=conditions, onsets=onsets, durations=durations, amplitudes=amplitudes) return output # - # # Prepare # + # pathnames base_dir = os.path.join(root, "data") output_dir = os.path.join(root, "data", "output") working_dir = os.path.join(root, "data", "working_dir") # MNI standard file standard = os.path.join(root, "data", "in_analysis", "nii", "standard") mni_standard_path = os.path.join(standard, "MNI152_T1_2mm_brain.nii.gz") mni_brain_standard_path = os.path.join(standard, "MNI152_T1_2mm_brain.nii.gz") mni_brain_mask_standard_path = os.path.join(standard, "MNI152_T1_2mm_brain_mask.nii.gz") # + # Params glm_prefix = "pop_decay_submean_" TR = 2.1 filter_cutoff = 60 # Lists sub_list = ["sub_%03d" % i for i in range(1, 7)] # - # ## Input and output nodes # + # iterate over subjects infosource = pe.Node( util.IdentityInterface( fields=["sub_id"]), name="infosource") # Define iterable attributes infosource.iterables = [("sub_id", sub_list)] # File templates for different subjects and sessions templates = { "runs" : "output/highpass/ses_*{sub_id}/_hp_filter*/run_*_st_mcf_warp_dtype_bet_intnorm_smooth_hpf.nii.gz", # "runs" : "output/subtractmean/ses_*{sub_id}/_subtractmean*/run_*_st_mcf_warp_dtype_bet_intnorm_smooth_hpf_sub.nii.gz", "behavior" : "search/regs/{sub_id}_ses_*_scn_*.txt", "func1" : "output/highpass/ses_000{sub_id}/_hp_filter0/run_000_st_mcf_warp_dtype_bet_intnorm_smooth_hpf.nii.gz", "warp_field" : "output/register_to_standard/{sub_id}/orig_field.nii.gz", "premat" : "output/register_to_standard/{sub_id}/inplane_brain_bbreg_{sub_id}.mat", } # SelectFiles Node to handle session specific file templates files = pe.Node( SelectFiles( templates, base_directory=base_dir, sort_filelist=True), name="files") # Create datasink to store important # files in useful, more accessable locations. datasink = pe.Node( DataSink( base_directory=base_dir, container="output"), name="datasink") # Remove unwanted lengthy strings from filenames. datasink.inputs.substitutions = [("_sub_id_", "")] # - # ## Make contrasts # + contrasts = [{ 'loc1': 1, 'loc2': -0.3333333333333333, 'loc3': -0.3333333333333333, 'loc4': -0.3333333333333333 }, { 'loc2': 1, 'loc1': -0.3333333333333333, 'loc3': -0.3333333333333333, 'loc4': -0.3333333333333333 }, { 'loc3': 1, 'loc1': -0.3333333333333333, 'loc2': -0.3333333333333333, 'loc4': -0.3333333333333333 }, { 'loc4': 1, 'loc1': -0.3333333333333333, 'loc2': -0.3333333333333333, 'loc3': -0.3333333333333333 }, { 'loc_lag1': 0.5, 'loc_lag2': 0.5, }, { 'loc_lag1': 1.0 }, { 'loc_lag2': 1.0, }, { 'clr_lag1': 0.5, 'clr_lag2': 0.5, }, { 'clr_lag1': 1.0 }, { 'clr_lag2': 1.0 }, { 'loc1': 0.25, 'loc2': 0.25, 'loc3': 0.25, 'loc4': 0.25 }] conditions = ["loc1", "loc2", "loc3", "loc4", "loc_lag1", "loc_lag2", "clr_lag1", "clr_lag2"] # Create contrasts for FSL contrasts_large = make_contrasts(contrasts, conditions, show_output=True) # - # ## Setup GLM # + glm_function = pe.MapNode( util.Function( input_names=["in_file"], output_names=["subject_info"], function=pop_decay), iterfield=["in_file"], name=glm_prefix+"glm_function") # Specify model specifymodel = pe.Node( model.SpecifyModel( high_pass_filter_cutoff=filter_cutoff, time_repetition=TR, input_units="secs", ), name=glm_prefix+"specifymodel" ) # Level 1 design level1design = pe.Node( fsl.Level1Design( bases={"dgamma" : {"derivs": True}}, interscan_interval=TR, model_serial_correlations=True, # Prewhitening contrasts=contrasts_large, ), name=glm_prefix+"level1design" ) # model featmodel = pe.MapNode( interface=fsl.FEATModel(), name=glm_prefix+"featmodel_ses1", iterfield=["fsf_file", "ev_files"] ) # FILMGLS filmgls = pe.MapNode( interface=fsl.FILMGLS( threshold=-1500, ), iterfield=["design_file", "in_file", "tcon_file"], name=glm_prefix+"filmgls" ) # + firstlevel = pe.Workflow(name=glm_prefix+"firstlevel", base_dir=working_dir) firstlevel.connect(glm_function, "subject_info", specifymodel, "subject_info") firstlevel.connect(specifymodel, "session_info", level1design, "session_info") firstlevel.connect(level1design, "fsf_files", featmodel, "fsf_file") firstlevel.connect(level1design, "ev_files", featmodel, "ev_files") firstlevel.connect(featmodel, "design_file", filmgls, "design_file") firstlevel.connect(featmodel, "con_file", filmgls, "tcon_file") firstlevel.write_graph(simple_form=True, graph2use="hierarchical", dotfilename="./graph_hierarchical.dot") # Visualize graph Image(width=768, filename="graph_hierarchical.png") # - # ## Second level num_contrasts = len(contrasts) con_list = ["%d" % (i+1) for i in range(num_contrasts)] # + meanfunc = pe.Node( interface=fsl.ImageMaths( op_string="-Tmean", suffix="_mean"), name="meanfunc") meanfuncmask = pe.Node( interface=fsl.BET( mask=True, no_output=True, frac=0.05), name="meanfuncmask") dilatemask = pe.Node( interface=fsl.ImageMaths( suffix="_dil", op_string="-dilF"), name="dilatemask") # Concatenate copes before feeding to Flameo merge_copes = pe.MapNode( interface=fsl.Merge( dimension="t"), iterfield=["in_files"], name="merge_copes") merge_varcopes = pe.MapNode( interface=fsl.Merge( dimension="t"), iterfield=["in_files"], name="merge_varcopes") # Create a level2 model l2model_fixed = pe.Node( interface=fsl.L2Model(), name="l2model_fixed") # A fixed effects FLAMEO node, with copes and varcopes as inputs. fixed_flameo = pe.MapNode( interface=fsl.FLAMEO( run_mode="fe"), iterfield=["cope_file", "var_cope_file"], name="fixed_flameo") applywarp_copes = pe.MapNode( fsl.ApplyWarp( ref_file=mni_standard_path), iterfield=["in_file"], name="applywarp_copes", overwrite=False) applywarp_varcopes = pe.MapNode( fsl.ApplyWarp( ref_file=mni_standard_path), iterfield=["in_file"], name="applywarp_varcopes", overwrite=False) applywarp_zstats = pe.MapNode( fsl.ApplyWarp( ref_file=mni_standard_path), iterfield=['in_file'], name='applywarp_zstats', overwrite=False) # + fixed_fx = pe.Workflow(name=glm_prefix+"fixed_fx", base_dir=working_dir) def num_copes(files): return len(files) fixed_fx.connect(meanfunc, "out_file", meanfuncmask, "in_file") fixed_fx.connect(meanfuncmask, "mask_file", dilatemask, "in_file") fixed_fx.connect(dilatemask, "out_file", fixed_flameo, "mask_file") fixed_fx.connect(merge_copes, "merged_file", fixed_flameo, "cope_file") fixed_fx.connect(merge_varcopes, "merged_file", fixed_flameo, "var_cope_file") fixed_fx.connect(l2model_fixed, "design_mat", fixed_flameo, "design_file") fixed_fx.connect(l2model_fixed, "design_con", fixed_flameo, "t_con_file") fixed_fx.connect(l2model_fixed, "design_grp", fixed_flameo, "cov_split_file") fixed_fx.connect(fixed_flameo, "copes", applywarp_copes, "in_file") fixed_fx.connect(fixed_flameo, "var_copes", applywarp_varcopes, "in_file") fixed_fx.connect(fixed_flameo, "zstats", applywarp_zstats, "in_file") fixed_fx.write_graph(simple_form=True, graph2use="hierarchical", dotfilename="./graph_hierarchical.dot") Image(width=768, filename="graph_hierarchical.png") # + glm = pe.Workflow(name="glm_submean", base_dir=working_dir) # Inputs first level glm.connect(files, "behavior", firstlevel, glm_prefix+"glm_function.in_file") glm.connect(files, "runs", firstlevel, glm_prefix+"specifymodel.functional_runs") glm.connect(files, "runs", firstlevel, glm_prefix+"filmgls.in_file") glm.connect(files, "func1", fixed_fx, "meanfunc.in_file") # Inputs warp glm.connect(infosource, "sub_id", files, "sub_id") glm.connect(files, "premat", fixed_fx, "applywarp_copes.premat") glm.connect(files, "warp_field", fixed_fx, "applywarp_copes.field_file") glm.connect(files, "premat", fixed_fx, "applywarp_varcopes.premat") glm.connect(files, "warp_field", fixed_fx, "applywarp_varcopes.field_file") glm.connect(files, "premat", fixed_fx, "applywarp_zstats.premat") glm.connect(files, "warp_field", fixed_fx, "applywarp_zstats.field_file") def sort_copes(files): numelements = len(files[0]) outfiles = [] for i in range(numelements): outfiles.insert(i,[]) for j, elements in enumerate(files): outfiles[i].append(elements[i]) return outfiles def num_copes(files): return len(files) # Connect first to second level glm.connect(firstlevel, (glm_prefix+"filmgls.copes", sort_copes), fixed_fx, "merge_copes.in_files") glm.connect(firstlevel, (glm_prefix+"filmgls.varcopes", sort_copes), fixed_fx, "merge_varcopes.in_files") glm.connect(firstlevel, (glm_prefix+"filmgls.copes", num_copes), fixed_fx, "l2model_fixed.num_copes") # Outputs Level 1 glm.connect(firstlevel, glm_prefix+"filmgls.zstats", datasink, glm_prefix+"filmgls.@zstats") glm.connect(firstlevel, glm_prefix+"filmgls.param_estimates", datasink, glm_prefix+"filmgls.@param_estimates") glm.connect(firstlevel, glm_prefix+"filmgls.copes", datasink, glm_prefix+"filmgls.@copes") glm.connect(firstlevel, glm_prefix+"filmgls.varcopes", datasink, glm_prefix+"filmgls.@varcopes") glm.connect(firstlevel, glm_prefix+"filmgls.dof_file", datasink, glm_prefix+"filmgls.@dof_file") glm.connect(firstlevel, glm_prefix+"filmgls.logfile", datasink, glm_prefix+"filmgls.@logfile") glm.connect(firstlevel, glm_prefix+"filmgls.residual4d", datasink, glm_prefix+"filmgls.@residual4d") glm.connect(firstlevel, glm_prefix+"filmgls.sigmasquareds", datasink, glm_prefix+"filmgls.@sigmasquareds") glm.connect(firstlevel, glm_prefix+"filmgls.tstats", datasink, glm_prefix+"filmgls.@tstats") # Outputs Level 2 glm.connect(fixed_fx, "dilatemask.out_file", datasink, glm_prefix + "L2_fixedfx"+".funcmask") glm.connect(fixed_fx, "fixed_flameo.copes", datasink, glm_prefix + "L2_fixedfx"+".copes") glm.connect(fixed_fx, "fixed_flameo.var_copes", datasink, glm_prefix + "L2_fixedfx"+".varcopes") glm.connect(fixed_fx, "fixed_flameo.zstats", datasink, glm_prefix + "L2_fixedfx"+".zstats") glm.connect(fixed_fx, "applywarp_copes.out_file", datasink, glm_prefix + "L2_fixedfx"+"_warped"+".copes") glm.connect(fixed_fx, "applywarp_varcopes.out_file", datasink, glm_prefix + "L2_fixedfx"+"_warped"+".varcopes") glm.connect(fixed_fx, "applywarp_zstats.out_file", datasink, glm_prefix + "L2_fixedfx"+"_warped"+".zstats") glm.write_graph( simple_form=True, graph2use="hierarchical", dotfilename="./graph_hierarchical.dot" ) Image(width=768, filename="graph_hierarchical.png") # - glm.run(plugin='MultiProc')
notebooks/FMRI_level_12.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tensorflow Introduction # # There are a number of really good Tensorflow tutorials. # # [https://www.tensorflow.org/tutorials/](https://www.tensorflow.org/tutorials/) # # 1. [Tensors](https://www.tensorflow.org/tutorials/customization/basics) # # 2. [Titanic with Linear Models](https://www.tensorflow.org/tutorials/estimator/linear) # # 3. [Titanic with Boosted Trees](https://www.tensorflow.org/tutorials/estimator/boosted_trees) # # 4. [Regression with Tensorflow](https://www.tensorflow.org/tutorials/keras/regression) # # Tensorflow Tabular Data # # 1. [Feature Columns](https://www.tensorflow.org/tutorials/structured_data/feature_columns) # # 2. [Preprocessing Layers](https://www.tensorflow.org/tutorials/structured_data/preprocessing_layers) # # # # # # Tensorflow NLP # # 1.
site/_build/jupyter_execute/notebooks/09-deep-learning1/Tensorflow Introduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=["remove_cell"] # # Bernstein-Vazirani 알고리즘 # - # 이 섹션에서는 먼저 Bernstein-Vazirani 문제와 그 고전적 솔루션 및 이 문제를 해결하기 위한 양자 알고리즘을 소개합니다. 그런 다음 Qiskit을 사용하여 양자 알고리즘을 구현하고 시뮬레이터와 실제 양자 백엔드 모두에서 실행해 보겠습니다. # ## 1. Bernstein-Vazirani 알고리즘<a id="algorithm"></a> # # Reference [1]에서 처음 소개된 Bernstein-Vazirani 알고리즘은 지난 섹션에서 다룬 Deutsch-Jozsa 알고리즘의 확장으로 볼 수 있습니다. Deutsch-Jozsa 문제보다 더 복잡한 문제의 해결에 양자 컴퓨터를 사용하면 이점이 있을 수 있음을 보여주었습니다. # # ### 1.1 Bernstein-Vazirani 문제<a id="bvproblem"> </a> # # 다시 입력으로 비트 문자열($x$)을 사용하고 $0$ 또는 $1$를 반환하는 블랙박스 함수 $f$가 제공됩니다. 즉 $$f({x_0,x_1,x_2, ...}) \rightarrow 0 \textrm{ or } 1 \textrm{ where } x_n \textrm{ is }0 \textrm{ or } 1 $$입니다. # # Deutsch-Jozsa 문제처럼 함수가 상수 함수이거나 균형 함수인 것이 아니라, 이제 함수는 문자열 $s$에 따라 입력의 비트 곱을 반환하게 됩니다. 즉, 입력 $x$, $f(x) = s \cdot x , \text{(mod 2)}$가 주어지며 $s$를 찾아내야 합니다. 고전적인 가역 회로로서 Bernstein-Vazirani 오라클은 다음과 같습니다. # # ![고전적인 가역 회로](images/bv2.png) # # ### 1.2 고전적 해법<a id="classical-solution"> </a> # # 일반적으로 오라클은 $$f_s(x) = s \cdot x \mod 2$$를 입력하면 $x$를 반환합니다. 따라서 숨겨진 비트 문자열 $s$는 여러가지 입력값들로 오라클을 탐색하여 드러낼 수 있습니다. # # input(x) # :-: # 100...0 # 010...0 # 001...0 # 000...1 # # 여기서 각 쿼리는 $s$의 서로 다른 비트($s_i$ 비트)를 나타냅니다. 예를 들어 `x = 1000...0` 일 때 $s$의 최하위 비트를 얻을 수 있고 `x = 0100...0` 일 때 다음 최하위 비트를 찾을 수 있는 방식입니다. 따라서 $f_s(x)$ 함수를 $n$ 번 호출해야 한다는 것을 의미합니다. # ### 1.3 양자 솔루션<a id="quantum-solution"> </a> # # 양자 컴퓨터를 사용하면 $f(x)$ 함수를 한 번만 호출하면 100% 확실하게이 문제를 해결할 수 있습니다. 숨겨진 비트 문자열을 찾는 양자 Bernstein-Vazirani 알고리즘은 매우 간단합니다. # # 1. 입력 큐비트를 $|0\rangle^{\otimes n}$ 상태로 초기화하고 출력 큐비트를 $|{-}\rangle$로 초기화합니다. # 2. 입력 레지스터에 Hadamard 게이트 적용 # 3. 오라클 탐색 # 4. 입력 레지스터에 Hadamard 게이트 적용 # 5. 측정 # # ![Bernstein-Vazirani 양자 회로](images/bv1.png) # # 알고리즘을 설명하기 위해 각 큐비트에 H-게이트를 적용할 때 어떤 일이 발생하는지 더 자세히 살펴보겠습니다. $n$-큐비트 $|a\rangle$인 상태에 H-gate를 적용하고 어떤 변환이 일어나는지 살펴봅시다. # # $$ |a\rangle \xrightarrow{H^{\otimes n}} \frac{1}{\sqrt{2^n}} \sum_{x\in {0,1}^n} (-1)^ {a\cdot x}|x\rangle. $$ # # <details><summary>방정식 설명(확대하려면 클릭)</summary> 우리는 Hadamard가 하나의 큐비트에서 다음 변환을 수행한다는 것을 기억합니다.</details> # # $$ H|0\rangle = \tfrac{1}{\sqrt{2}}(|0\rangle + |1\rangle) $$ $$ H|1\rangle = \tfrac{1}{\sqrt{ 2}}(|0\rangle - |1\rangle) $$ # # 합 표기을 사용하여 다음과 같이 다시 작성할 수 있습니다. # # $$ H|a\rangle = \frac{1}{\sqrt{2}}\sum_{x\in {0,1}} (-1)^{a\cdot x}|x\rangle. $$ # # 두 개의 큐비트에 대해 각각에 Hadamard를 적용하면 다음 변환이 수행됩니다. # # $$ H^{\otimes 2}|00\rangle = \tfrac{1}{2}(|00\rangle + |01\rangle + |10\rangle + |11\rangle) $$ $$ H^{ \otimes 2}|01\rangle = \tfrac{1}{2}(|00\rangle - |01\rangle + |10\rangle - |11\rangle) $$ $$ H^{\otimes 2}| 10\rangle = \tfrac{1}{2}(|00\rangle + |01\rangle - |10\rangle - |11\rangle) $$ $$ H^{\otimes 2}|11\rangle = \ tfrac{1}{2}(|00\rangle - |01\rangle - |10\rangle + |11\rangle) $$ # # 합을 사용하여 아래와 같이 표현할 수 있습니다. # # $$ H^{\otimes 2}|a\rangle = \frac{1}{2}\sum_{x\in {0,1}^2} (-1)^{a\cdot x}|x\rangle $$ # # 이제 위의 수식을 도출해 보도록 합시다. # # # # # 특히, 양자 레지스터 $|00\dots 0\rangle$ 상태로 시작하여 $n$ Hadamard 게이트를 적용하면 익숙한 양자 중첩 상태를 얻게 됩니다. # # $$ |00\dots 0\rangle \xrightarrow{H^{\otimes n}} \frac{1}{\sqrt{2^n}} \sum_{x\in {0,1}^n} |x \rangle $$ # # 이 경우 $a=0$이므로 위상 항 $(-1)^{a\cdot x}$는 사라집니다. 따라서 $(-1)^{a\cdot x} = 1$입니다. # # 고전적 오라클 $f_s$는 $s \cdot x\mod 2 = 1$인 입력 $x$에 대해 $1$를 반환하고 그렇지 않으면 $0$를 반환합니다. Deutsch-Jozsa 알고리즘처럼 위상 반동 기법을 사용하고 $|{-}\rangle$ 상태의 큐비트에 대해 적용하면 다음 변환을 얻습니다. # # $$ |x \rangle \xrightarrow{f_s} (-1)^{s\cdot x} |x \rangle $$ # # 숨겨진 비트 문자열을 드러내는 알고리즘은 $|00\dots 0\rangle$의 Hadamard 변환에서 얻은 양자 중첩으로 양자 오라클 $f_s$를 쿼리하여 얻어집니다. 즉, # # $$ |00\dots 0\rangle \xrightarrow{H^{\otimes n}} \frac{1}{\sqrt{2^n}} \sum_{x\in {0,1}^n} |x \rangle \xrightarrow{f_a} \frac{1}{\sqrt{2^n}} \sum_{x\in {0,1}^n} (-1)^{a\cdot x}|x\rangle $$ # # $n$ Hadamard 게이트의 역수는 다시 $n$ Hadamard 게이트이므로 $a$를 얻을 수 있습니다. # # $$ \frac{1}{\sqrt{2^n}} \sum_{x\in {0,1}^n} (-1)^{a\cdot x}|x\rangle \xrightarrow{H^ {\otimes n}} |a\rangle $$ # ## 2. 예제<a id="example"></a> # # $n=2$ 큐비트와 숨겨진 문자열 $s=11$에 대한 구체적인 예를 살펴보겠습니다. 단 하나의 레지스터를 사용하여 Bernstein-Vazirani 양자 오라클 회로를 생성 과정은 참조 [2]의 공식을 따르고 있음에 유의하십시오. # # <ol> # <li>두 큐비트의 레지스터는 0으로 초기화됩니다.</li> # </ol> # # $$\lvert \psi_0 \rangle = \lvert 0 0 \rangle$$ # # # # # <li>두 큐비트에 Hadamard 게이트를 적용합니다.</li> # # # $$\lvert \psi_1 \rangle = \frac{1}{2} \left( \lvert 0 0 \rangle + \lvert 0 1 \rangle + \lvert 1 0 \rangle + \lvert 1 1 \rangle \right) $$ # # # # # <li>$s=11$ 문자열에 대해 양자 오라클은 $$ |x \rangle \xrightarrow{f_s} (-1)^{x\cdot 11} |x \rangle$$ 연산을 수행합니다.</li> # # # $$\lvert \psi_2 \rangle = \frac{1}{2} \left( (-1)^{00\cdot 11}|00\rangle + (-1)^{01\cdot 11}|01\ rangle + (-1)^{10\cdot 11}|10\rangle + (-1)^{11\cdot 11}|11\rangle \right)$$ # # $$\lvert \psi_2 \rangle = \frac{1}{2} \left( \lvert 0 0 \rangle - \lvert 0 1 \rangle - \lvert 1 0 \rangle + \lvert 1 1 \rangle \right) $$ # # # # # <li>두 큐비트에 Hadamard 게이트를 적용합니다.</li> # # # $$\lvert \psi_3 \rangle = \lvert 1 1 \rangle$$ # # # # # <li>숨겨진 문자열 $s=11$을 찾기 위한 측정</li> # # # # # # 아래 `bv_widget` 위젯을 사용합니다. 버튼을 눌러 다른 과정을 적용하고 알고리즘을 따라해 보십시오. 첫 두 개의 인수를 사용해 입력 큐비트 수와 숨겨진 문자열의 값을 변경할 수 있습니다. from qiskit_textbook.widgets import bv_widget bv_widget(2, "11") # ## 3. Qiskit 구현<a id="implementation"></a> # # 이제 $s=011$인 3비트 함수에 대한 Qiskit의 Bernstein-Vazirani 알고리즘 구현을 살펴보겠습니다. # + # initialization import matplotlib.pyplot as plt import numpy as np # importing Qiskit from qiskit import IBMQ, Aer from qiskit.providers.ibmq import least_busy from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, transpile # import basic plot tools from qiskit.visualization import plot_histogram # - # 먼저 실험에 사용될 큐비트의 수와 알고리즘에서 찾을 숨겨진 비트 문자열 $s$를 설정합니다. 숨겨진 비트 문자열 $s$는 양자 오라클의 회로를 결정합니다. n = 3 # number of qubits used to represent s s = '011' # the hidden binary string # 그런 다음 Qiskit을 사용하여 Bernstein-Vazirani 알고리즘을 프로그래밍합니다. # + # We need a circuit with n qubits, plus one auxiliary qubit # Also need n classical bits to write the output to bv_circuit = QuantumCircuit(n+1, n) # put auxiliary in state |-> bv_circuit.h(n) bv_circuit.z(n) # Apply Hadamard gates before querying the oracle for i in range(n): bv_circuit.h(i) # Apply barrier bv_circuit.barrier() # Apply the inner-product oracle s = s[::-1] # reverse s to fit qiskit's qubit ordering for q in range(n): if s[q] == '0': bv_circuit.i(q) else: bv_circuit.cx(q, n) # Apply barrier bv_circuit.barrier() #Apply Hadamard gates after querying the oracle for i in range(n): bv_circuit.h(i) # Measurement for i in range(n): bv_circuit.measure(i, i) bv_circuit.draw() # - # ### 3a. 시뮬레이터 실험<a id="simulation"></a> # # 시뮬레이터를 사용해 위의 회로를 실행해 봅시다. # + # use local simulator aer_sim = Aer.get_backend('aer_simulator') shots = 1024 results = aer_sim.run(bv_circuit).result() answer = results.get_counts() plot_histogram(answer) # - # 측정 결과가 숨겨진 문자열 `011` 임을 알 수 있습니다. # ### 3b. 실제 양자 백엔드로 실험<a id="device"></a> # # 아래와 같이 실제 장치에서 회로를 실행할 수 있습니다. # + tags=["uses-hardware"] # Load our saved IBMQ accounts and get the least busy backend device with less than or equal to 5 qubits IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') provider.backends() backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits <= 5 and x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational==True)) print("least busy backend: ", backend) # + tags=["uses-hardware"] # Run our circuit on the least busy backend. Monitor the execution of the job in the queue from qiskit.tools.monitor import job_monitor shots = 1024 transpiled_bv_circuit = transpile(bv_circuit, backend) job = backend.run(transpiled_bv_circuit, shots=shots) job_monitor(job, interval=2) # + tags=["uses-hardware"] # Get the results from the computation results = job.result() answer = results.get_counts() plot_histogram(answer) # - # 보시다시피 가장 높은 확률의 결과는 `011` 입니다. 다른 결들은 양자 계산의 에러로 인한 것입니다. # ## 4. 연습 <a id="problems"></a> # # 1. 아래 위젯을 사용하여 다양한 오라클에서 작동하는 Bernstein-Vazirani 알고리즘을 확인해 보십시오. from qiskit_textbook.widgets import bv_widget bv_widget(3, "011", hide_oracle=False) # 1. 위의 Bernstein-Vazirani 알고리즘의 [구현](#implementation) 은 숨겨진 비트 문자열 $s = 011$에 대한 것입니다. 숨겨진 문자열 $s = 1011$에 대하여 수정해서 구현해 봅시다. 결과가 예상대로입니까? 설명해 봅시다. # 2. 위의 Bernstein-Vazirani 알고리즘의 [구현](#implementation) 은 숨겨진 비트 문자열 $s = 011$에 대한 것입니다. 숨겨진 문자열 $s = 11101101$에 대하여 수정해서 구현해 봅시다. 결과가 예상대로입니까? 설명해 봅시다. # ## 5. 참고문헌<a id="references"></a> # # 1. <NAME> and <NAME> (1997) "Quantum Complexity Theory" SIAM Journal on Computing, Vol. 26, No. 5: 1411-1473, [doi:10.1137/S0097539796300921](https://doi.org/10.1137/S0097539796300921). # 2. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2001) "Implementation of a quantum algorithm to solve the Bernstein-Vazirani parity problem without entanglement on an ensemble quantum computer", Phys. Rev. A 64, 042306, [10.1103/PhysRevA.64.042306](https://doi.org/10.1103/PhysRevA.64.042306), [arXiv:quant-ph/0012114](https://arxiv.org/abs/quant-ph/0012114). import qiskit.tools.jupyter # %qiskit_version_table
translations/ko/ch-algorithms/bernstein-vazirani.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib.pyplot as plt import seaborn as sns import pandas as pd from sklearn.manifold import TSNE import random from matplotlib import pyplot sns.set() import numpy as np from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.preprocessing import FunctionTransformer from keras.layers import Dense, Dropout from keras.models import Sequential pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) pd.set_option('display.width', 2000) from keras.optimizers import SGD from sklearn.impute import SimpleImputer from sklearn.pipeline import make_pipeline, Pipeline, FeatureUnion import keras from tensorflow.python import debug as tf_debug from tensorflow.python.client import device_lib #print(device_lib.list_local_devices()) import import_ipynb from Helpers import * from PandasFeatureUnion import PandasFeatureUnion # + train = pd.read_csv('train.csv', index_col='Id') test = pd.read_csv('test.csv',index_col='Id') test_ids = test.index y_train = np.log(train['SalePrice']) np.random.seed(1) random.seed(1) total_bath= lambda x: x.FullBath+x.HalfBath*0.5 +x.BsmtHalfBath*0.5+x.BsmtFullBath bath_columns = ['FullBath','HalfBath','BsmtHalfBath', 'BsmtFullBath'] bath_pipeline = Pipeline([ ('SelectBathColumns', ColumnSelector(columns=bath_columns)), ('fillNaAsZero', FillNA(columns=bath_columns, fill_value=0)), ('combine', ApplyTransformer(fn=total_bath, name='Bath')), # ('drop',DropColumns(columns=bath_columns)) ]) important_columns = [ 'GrLivArea', 'MasVnrArea', 'LotArea', '1stFlrSF', 'YearBuilt','YearRemodAdd', 'OverallQual','MSSubClass', 'Neighborhood', 'SaleType', 'SaleCondition', 'BldgType','LotFrontage','GarageCars','CentralAir', 'PoolQC', 'BsmtQual','ExterQual', 'KitchenQual', 'MasVnrType', 'HouseStyle' ] # No improovement # 'BldgType' cs = ColumnSelector(columns=important_columns) scdt = DummiesTransformer(column='MSSubClass', keys=[ 20, 30, 40, 45, 50, 60, 70, 75, 80, 85, 90, 120, 150, 160, 180, 190]) oqdt = DummiesTransformer(column='OverallQual', keys=range(1,11)) neigbTrans = DummiesTransformer(column='Neighborhood', keys=["Blmngtn", "Blueste", "BrDale", "BrkSide", "ClearCr", "CollgCr", "Crawfor", "Edwards", "Gilbert", "IDOTRR", "MeadowV", "Mitchel", "Names", "NoRidge", "NPkVill", "NridgHt", "NWAmes", "OldTown", "SWISU", "Sawyer", "SawyerW", "Somerst", "StoneBr", "Timber", "Veenker"]) bdTypeTrans= DummiesTransformer(column='BldgType', keys=['1Fam' ,'TwnhsE', 'Twnhs', 'Duplex', '2fmCon']) saleTypeTrans= DummiesTransformer(column='SaleType', keys=["WD","CWD","VWD","New","COD","Con","ConLw","ConLI","ConLD","Oth"]) bsmtQualTrans = DummiesTransformer(column='BsmtQual', keys=['Ex','Gd','TA','Fa','Po','None']) extQualTrans = DummiesTransformer(column='ExterQual', keys=['Ex','Gd','TA','Fa','Po','None']) kitQualTrans = DummiesTransformer(column='KitchenQual', keys=['Ex','Gd','TA','Fa','Po','None']) saleCondTrans = DummiesTransformer(column='SaleCondition', keys=['Normal', 'Abnorml', 'AdjLand','Alloca', 'Family', 'Partial']) masVnrTypeTrans = DummiesTransformer(column='MasVnrType', keys=['BrkCmn', 'BrkFace', 'CBlock','None', 'Stone']) lotConfigTrans = DummiesTransformer(column='LotConfig', keys=['Inside', 'Corner', 'CulDSac', 'FR2', 'FR3']) aircTrans = DummiesTransformer(column='CentralAir', keys=['Y', 'N']) houseStyleTrans = DummiesTransformer(column='HouseStyle', keys=['1Story','1.5Fin','1.5Unf', '2Story','2.5Fin','2.5Unf', 'SFoyer','SLvl']) core_pipeline = Pipeline( [('SelectColumns', cs), ('DummiesMSSubClass', scdt), ('OverallQual', oqdt), ('DummiesNeighborhood', neigbTrans), ('DummiesSaleType', saleTypeTrans), ('DummiesBldgType',bdTypeTrans), ('fillNaAsZero', FillNA(columns=['LotFrontage','GarageCars','MasVnrArea'], fill_value=0)), ('fillNaAsNone', FillNA(columns=['BsmtQual','ExterQual','KitchenQual', 'SaleCondition','MasVnrType'], fill_value='None')), ('LogLotFrontage', ApplyTransformer(fn=lambda x:np.log1p(x.LotFrontage), name='LotFrontage')), ('LogGrLivArea', ApplyTransformer(fn=lambda x:np.log1p(x.GrLivArea), name='GrLivArea')), ('LogMasVnrArea', ApplyTransformer(fn=lambda x:np.log1p(x.GrLivArea), name='MasVnrArea')), ('Log1stFlrSF', ApplyTransformer(fn=lambda x:np.log1p(x['1stFlrSF']), name='1stFlrSF')), ('PoolQC_Exists', ApplyTransformer(fn=lambda x:0 if (pd.isnull(x['PoolQC'])) else 1, name='PoolQC')), ('BsmtQualTrans', bsmtQualTrans), ('ExtQualTrans', extQualTrans), ('kitQualTrans', kitQualTrans), ('saleCondTrans', saleCondTrans), ('houseStyletrans', houseStyleTrans), ('masVnrTypeTrans',masVnrTypeTrans), ('aircTrans', aircTrans) ]) pp = PandasFeatureUnion([ ('bath',bath_pipeline), ('core', core_pipeline)], n_jobs=None) X_train = pp.fit_transform(train) X_test = pp.transform(test) # + # define model # cc=keras.constraints.MinMaxNorm(min_value=0.0, max_value=2.0, rate=1, axis=0) model = Sequential() input_layer = Dense(20, # kernel_constraint=cc, # kernel_regularizer=keras.regularizers.l1_l2(l1=0.01, l2=0.01), # bias_regularizer=keras.regularizers.l1_l2(l1=0.01, l2=0.01), input_dim=len(X_train.columns), activation='relu', kernel_initializer='normal') model.add(input_layer) model.add(Dropout(rate=0.1)) model.add(Dense(50, activation='relu', kernel_initializer='normal') ) model.add(Dense(25, activation='relu', kernel_initializer='normal') ) model.add(Dense(10, activation='relu', kernel_initializer='normal') ) model.add(Dense(1, activation='linear')) optimizer = keras.optimizers.SGD(lr=0.001) #Adam(lr=0.0015); # compile model model.compile(loss='mean_squared_error', optimizer=optimizer) # + trainX, testX, trainy, testy = train_test_split(X_train, y_train, test_size=0.3, random_state=1) trainX_ids = trainX.index scaler = preprocessing.MinMaxScaler(feature_range=(0, 1)) scaler.fit(trainX) trainX= scaler.transform(trainX) testX= scaler.transform(testX) testData = X_test X_test=scaler.transform(X_test) # fit model early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=100, verbose=1, mode='auto', baseline=None, restore_best_weights=True) tb_callback =keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None, update_freq='epoch') history = model.fit(trainX, trainy, validation_data=(testX, testy), epochs=4000, verbose=0, batch_size=32, callbacks=[early_stop, tb_callback]) drawTFModel(history, model, trainX, trainy, testX, testy) # - result = model.predict(X_test) dfR = pd.DataFrame(test_ids,columns=['Id']) dfR['SalePrice']=np.exp(result) dfR.to_csv('predictions.csv',index = None, header=True) # + tt = pd.DataFrame(testy.copy()) trainedPrice = model.predict(testX) tt['PredictedLog'] = trainedPrice tt['Predicted']=np.exp(trainedPrice) tt['Real']=np.exp(testy) tt['Delta'] = tt.Predicted - tt.Real tt['DeltaLog'] =tt.PredictedLog - tt.SalePrice plt.scatter(tt.index, tt.Delta) plt.show() plt.scatter(tt.index, tt.DeltaLog) plt.show() problematicIDs = tt[np.abs(tt.DeltaLog) >0.2] train.loc[problematicIDs.index, :] # - tt.describe() df=train # + # #!rm -rf logs # - pd.DataFrame(X_test, columns=testData.columns).describe().T test_ids testData.index testData.loc[2577,:] n=lambda x: (1 if (pd.isnull(x)) else 0) n(np.nan) n(55) # + # Utility function to visualize the outputs of PCA and t-SNE def fashion_scatter(x, colors): # choose a color palette with seaborn. num_classes = len(np.unique(colors)) palette = np.array(sns.color_palette("hls", num_classes)) # create a scatter plot. f = plt.figure(figsize=(8, 8)) ax = plt.subplot(aspect='equal') sc = ax.scatter(x[:,0], x[:,1], lw=0, s=40, c=palette[colors] ) ax.grid(linestyle='-', linewidth='0.5', color='red') plt.xlim(-25, 25) plt.ylim(-25, 25) ax.axis('off') ax.axis('tight') # add the labels for each digit corresponding to the label txts = [] for i in range(num_classes): # Position of each label at median of data points. xtext, ytext = np.median(x[colors == i, :], axis=0) txt = ax.text(xtext, ytext, str(i), fontsize=24) txt.set_path_effects([ PathEffects.Stroke(linewidth=5, foreground="w"), PathEffects.Normal()]) txts.append(txt) return f, ax, sc, txts # - n(None) pd.isnull('sss') d1 = X_train.copy() d1['SalePrice'] = y_train import time time_start = time.time() tsne = TSNE(random_state=1, n_components=2, perplexity=2500).fit_transform(d1) print( 't-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start)) fashion_scatter(tsne, pd.Series([0])) tsne[0] len(tsne)
TF_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # ## Setup # To use panda-client API in notebook, you need to set it up first. # # #### Setup Jupyter interface # + pycharm={"name": "#%%\n"} from pandatools import panda_jupyter panda_jupyter.setup() # - # #### Import panda-client API # + pycharm={"name": "#%%\n"} from pandatools import Client # - # #### Get an OIDC ID token # You need to get an OIDC ID token unless you use X509 authentication with grid middleware. # The following method triggers the device code authentication flow and gives # a PanDA IAM URL to redirect to your identity provider. # + pycharm={"name": "#%%\n"} Client.hello() # + [markdown] pycharm={"name": "#%% md\n"} # If you already have a token you will immediately see an OK message. # Otherwise, you will see something like # ```html # INFO : Please go to https://panda-iam-doma.cern.ch/device?user_code=EEPGDH and sign in. Waiting until authentication is completed # INFO : Ready to get ID token? # [y/n] # ``` # Note that you need to get a token to the URL before entering **y** in the prompt. # # Once you go to the URL using your web browser, such as Chrome, FireFox, etc, # you will be asked to sign in with your own ID provider. # # <img src="https://github.com/PanDAWMS/panda-docs/blob/main/docs/source/client/images/iam-1.png?raw=true" width="300" class="center" /> # # Then you will be navigated to a federated IAM platform, CILogon, to choose your own # identity provider. # # <br> # # <img src="https://github.com/PanDAWMS/panda-docs/blob/main/docs/source/client/images/iam-2.png?raw=true" width="300" class="center" /> # <br> # <br> # <br> # # When you successfully sign in you need to approve the virtual organization to retrieve # your profile, such as name and email address, from your identity provider. # # <br> # # <img src="https://github.com/PanDAWMS/panda-docs/blob/main/docs/source/client/images/iam-4.png?raw=true" width="300" class="center" /> # <br> # <br> # <br> # # You approved it and see a succeeded message in your browser, and now you can # enter **y** in the notebook prompt to get a token. # ```html # [y/n] # y # INFO : All set # OK # ``` # Generally you don't have to repeat the procedure once you get a token since it is automatically renewed. # # ---- # # Download [notebook](https://raw.githubusercontent.com/PanDAWMS/panda-docs/main/docs/source/client/notebooks/jupyter_setup.ipynb)
docs/source/client/notebooks/jupyter_setup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.0 # language: julia # name: julia-1.6 # --- # # Working with GPX files using Unitful, UnitfulRecipes using Dates using DataFrames using GPX using Geodesy using GarishPrint using Statistics # First, we load our GPX file. We also pretty-print the loaded representation to get a better idea of its structure, which is helpful when figuring out which elements to select. gpx = GPX.read_gpx_file("03-Jul-2021-1609.gpx"); GarishPrint.pprint(gpx; color=false) gpxpoints = gpx.tracks.collection[1].segments[1].points; points = DataFrame(gpxpoints) function Geodesy.LLA(point::eltype(gpxpoints); flatten=false) Geodesy.LLA(point.lat, point.lon, (flatten ? zero(point.ele) : point.ele)) end geo_points = Geodesy.LLA.(gpxpoints) function Geodesy.euclidean_distance(points::typeof(geo_points)) [Geodesy.euclidean_distance(points[n], points[n+1]) for n in 1:(length(points)-1)] end distances = Geodesy.euclidean_distance(geo_points)u"m" sum(distances) |> u"ft" geo_points_flat = Geodesy.LLA.(gpxpoints; flatten=true) distances_flat = Geodesy.euclidean_distance(geo_points_flat)u"m" sum(distances_flat) |> u"ft" timediffs = diff(points.time) speeds = (distances ./ timediffs) mean(speeds) |> u"m/s"
Working with GPX files.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # GroupBy # ### Introduction: # # GroupBy can be summarized as Split-Apply-Combine. # # Special thanks to: https://github.com/justmarkham for sharing the dataset and materials. # # Check out this [Diagram](http://i.imgur.com/yjNkiwL.png) # ### Step 1. Import the necessary libraries import pandas as pd # ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/drinks.csv). pd.read_csv(____) # ### Step 3. Assign it to a variable called drinks and look at the first 10 rows of the DF. # + drinks = pd.read_csv(____) drinks.____(____) # - # ### Step 4. Which continent drinks more beer on average? drinks.groupby(____)[_____].mean() # ### Step 5. For each continent print the summary statistics for wine consumption. drinks.groupby('continent')[____]._____() # ### Step 6. Print the mean alcoohol consumption per continent for every column _____ # ### Step 7. Print the median alcoohol consumption per continent for every column ____ # ### Step 8. Print the mean, min and max values for spirit consumption. # drinks.groupby('continent')['spirit_servings'].aggregate([____]) # ### Step 9. Print the mean, min and max values for beer consumption and the mean and std deviation for wine consumption. import numpy as np ____ # ### Step 10. Create a custom aggregation function and apply it to some of the columns
Phase_1/Pandas_groupby/alcohol_consumption_practice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # This python script demonstrates the creation of 3D volumes from points using extrude and rotate methods # + import paramak rotated_straights = paramak.RotateStraightShape( points=[ (400, 100), (400, 200), (600, 200), (600, 100) ], rotation_angle = 180 ) rotated_straights.show() # + rotated_spline = paramak.RotateSplineShape( points=[ (500, 0), (500, -20), (400, -300), (300, -300), (400, 0), (300, 300), (400, 300), (500, 20), ], rotation_angle = 180 ) rotated_spline.show() # + rotated_mixed = paramak.RotateMixedShape( points=[ (100, 0, 'straight'), (200, 0, 'circle'), (250, 50, 'circle'), (200, 100, 'straight'), (150, 100, 'spline'), (140, 75, 'spline'), (110, 45, 'spline'), ], rotation_angle = 180 ) rotated_mixed.show() # - # This makes a circular shape and rotates it to make a solid # + rotated_circle = paramak.RotateCircleShape( points=[(50, 0)], radius=5, rotation_angle=180 ) rotated_circle.show() # + rotated_circle = paramak.ExtrudeCircleShape( points=[(50, 0)], radius=5, distance=15 ) rotated_circle.show() # - # This makes a banana shape with straight edges and extrudes it to make a solid # + extruded_straight = paramak.ExtrudeStraightShape( points=[ (300, -300), (400, 0), (300, 300), (400, 300), (500, 0), (400, -300), ], distance=200 ) extruded_straight.show() # - # This makes a banana shape and rotates it to make a solid # + extruded_spline = paramak.ExtrudeSplineShape( points=[ (500, 0), (500, -20), (400, -300), (300, -300), (400, 0), (300, 300), (400, 300), (500, 20), ], distance=200, ) extruded_spline.show() # - # This makes a shape with straight, circular and spline edges and extrudes it to make a solid # + extruded_mixed = paramak.ExtrudeMixedShape( points=[ (100, 0, 'straight'), (200, 0, 'circle'), (250, 50, 'circle'), (200, 100, 'straight'), (150, 100, 'spline'), (140, 75, 'spline'), (110, 45, 'spline'), ], distance=200 ) extruded_mixed.show() # -
examples/example_parametric_shapes/make_CAD_from_points.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Chessbits Solver # Solve the chessbits puzzle using with help from <NAME>'s [Sudoku solver](http://norvig.com/sudoku.html) # + num_bits = 8 num_elements = 128 bit_mappings = defaultdict(list) for i in range(num_elements): bit_mappings[i].append(i) for bit_index in range(num_bits - 1): mask = 1 << bit_index mask2 = num_elements - 1 bit_mappings[i].append((i ^ mask) & mask2) assert all(len(bit_mappings[i]) == 8 for i in bit_mappings) # + # New Version from collections import defaultdict # Update the units, rows, peers, digits digits = ''.join(range(64)) # '12345678' rows = 'ABCDEFGHIJKLMNOP' cols = digits squares = cross(rows, cols) symmetric_map = dict(zip(squares, squares[::-1])) units = {} units = defaultdict(list) for i, s in enumerate(squares): for mapped_bit in bit_mappings[i]: new_unit = [squares[remapped_bit] for remapped_bit in bit_mappings[mapped_bit]] units[s].append(new_unit) # get the squares for this unit peers = {s: set(sum(units[s],[]))-set([s]) for s in squares} # - digits = ''.join(str(x) for x in range(64)) # '12345678' rows = ''.join(str(x) for x in range(64)) # '12345678' print(digits) print(rows) # + def test(): assert len(squares) == 128 #assert all(len(peers[s]) == 15 for s in squares) print("All tests pass") test() # + # code for parsing def parse_grid(grid): """Convert grid to a dict of possible values, {square: digits}, or return False if a contradiction is detected.""" ## To start, every square can be any digit; then assign values from the grid. values = dict((s, digits) for s in squares) for s,d in grid_values(grid).items(): if d in digits and not assign(values, s, d): return False ## (Fail if we can't assign d to square s.) return values def grid_values(grid): "Convert grid into a dict of {square: char} with '0' or '.' for empties." chars = [c for c in grid if c in digits or c in '0.'] assert len(chars) == num_elements return dict(zip(squares, chars)) # + # Code for constraint propagation def assign(values, s, d): """Eliminate all the other values (except d) from values[s] and propagate. Return values, except return False if a contradiction is detected.""" other_values = values[s].replace(d, '') if all(eliminate(values, s, d2) for d2 in other_values): # Find the symmetric square and eliminate those values as well # return values symmetric_square = symmetric_map[s] if all(eliminate(values, symmetric_square, d2) for d2 in other_values): return values else: return False def eliminate(values, s, d): """Eliminate d from values[s]; propagate when values or places <= 2. Return values, except return False if a contradiction is detected.""" if d not in values[s]: return values ## Already eliminated values[s] = values[s].replace(d,'') ## (1) If a square s is reduced to one value d2, then eliminate d2 from the peers. if len(values[s]) == 0: return False ## Contradiction: removed last value elif len(values[s]) == 1: d2 = values[s] if not all(eliminate(values, s2, d2) for s2 in peers[s]): return False ## (2) If a unit u is reduced to only one place for a value d, then put it there. for u in units[s]: dplaces = [s for s in u if d in values[s]] if len(dplaces) == 0: return False ## Contradiction: no place for this value elif len(dplaces) == 1: # d can only be in one place in unit; assign it there if not assign(values, dplaces[0], d): return False return values # + # Code for displaying def display(values): "Display these values as a 2-D grid." width = 1+max(len(values[s]) for s in squares) line = '-'* width*8 print(line) for r in rows: print(''.join(values[r+c].center(width) for c in cols)) print(line) print() grid1 = "12030400050000000600000000000000070000000000000000000000000000800800000000000000000000000000007000000000000000600000005000403021" display(parse_grid(grid1)) # - # It looks like just constraint propagation is not enough, sadly. We will have to continue searching. # + def solve(grid): return search(parse_grid(grid)) def search(values): "Using depth-first search and propagation, try all possible values." if not values: return False ## Failed earlier if all(len(values[s]) == 1 for s in squares): return values ## Solved! # display(values) # print("press enter") # _ = input() ## Chose the unfilled square s with the fewest possibilities n,s = min((len(values[s]), s) for s in squares if len(values[s]) > 1) return some(search(assign(values.copy(), s, d)) for d in values[s]) def some(seq): "Return some element of seq that is true." for e in seq: if e: return e return False result = solve(grid1) if(result): display(result) else: print("no solution found") # - [result[s] for s in squares]
chessbits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Planning # + [markdown] tags=[] # # Lesson 1: # # - **Topics:** # 1. la forma de continuar por tu cuenta # - busca ejercicios en blanco # - pandas_exercises # - resolving-python-machine-leanrning # - explicarte la forma # - videos de youtube... # - razonamiento general # - otras formas de buscar ejercicios # 2. practica de linear regression # 3. tal # - **Task for Next Lesson:** # 1. [ ] tal # 2. [ ] tal # 3. [ ] tal # - import seaborn as sns df = sns.load_dataset('mpg') import matplotlib.pyplot as plt plt type(sns.scatterplot(x='weight', y='mpg', data=df)) sns.scatterplot(x='weight', y='mpg', data=df) plt.xlim(1500, 3500) # + [markdown] tags=[] # Especialidades y asignaturas # # E1. Anatíca de Datos (Data Analytics) - <NAME> # # Esta especialidad se dirige a introducir al estudiante en los conceptos, los métodos, las técnicas y las herramientas que utilizan los sistemas de inteligencia de negocio, macrodatos (big data) y ciencia de datos, con casos prácticos y el uso de software especializado. # # Se compone de las siguientes asignaturas: # # ## Fundamentos de inteligencia de negocio (6 créditos): # # en esta asignatura el estudiante se familiariza con un sistema completo de inteligencia de negocio (la «fábrica de información») y con los diferentes componentes: el almacén de datos, los procesos de extracción y transformación, la creación del almacén de datos, el análisis multidimensional y la elaboración de informes y cuadros de mando. El estudiante trabaja con diferentes herramientas (Pentaho, MySQL, Tableau) y sobre bases de conocimiento de la consultora Gartner. # # ## Fundamentos del big data (6 créditos): # # en esta asignatura el estudiante trabaja lo que algunos han llamado la «gestión extrema de la información», es decir, la transformación del enorme volumen de datos oculto en el interior de la propia organización o presente a su alrededor, los distintos tipos de datos e información y su aplicación en la empresa. Se estudia el ciclo de vida de la gestión de macrodatos y los aspectos tecnológicos, legales y éticos. El estudiante trabaja con universos de datos propios de la universidad, cedidos por empresas o procedentes de las redes sociales, por medio de herramientas como Apache Hadoop y Apache Spark. # # - terminal # # ## Fundamentos de data science (6 créditos): # # esta asignatura presenta los conceptos y la tipología de análisis de diferentes tipos de datos, los modelos y los algoritmos de uso más frecuente de clasificación y agrupación, y las metodologías y los estándares profesionales y científicos que se utilizan en analítica de negocio y la ciencia de datos aplicada. En esta asignatura, el estudiante trabaja principalmente con R y Rstudio, aunque pueden hacerse ejercicios con otras herramientas. # # E2. Gestión de Datos (Data Management) - SEGUNDO SEMESTRE # # Esta especialidad se dirige a proporcionar al profesional de perfil tecnológico capacidades prácticas para gestionar y almacenar datos relacionales y no relacionales, así como gestionar los datos como un activo de valor por medio del gobierno de datos. # # Se compone de las siguientes asignaturas: # # · Data governance (6 créditos): en esta asignatura, el estudiante se familiariza con el gobierno de datos, una práctica que une personas, procesos y tecnología para cambiar la forma en que los datos son adquiridos, gestionados, mantenidos, transformados en información, compartidos en el contexto de la organización como conocimiento común y sistemáticamente obtenidos por la empresa para mejorar la rentabilidad. El estudiante trabaja con herramientas ofimáticas (DOC, XLS, PPT) y con herramientas especializadas para el desarrollo de un programa de gobierno de datos (Trifacta o Talend). # # · Bases de datos analíticos (6 créditos): en esta asignatura se aprende a crear un almacén de datos adecuado que ofrezca apoyo en la toma de decisiones de la organización. Se presenta de manera conceptual la arquitectura de almacenamiento (data warehousing) y se dan pautas para construir este tipo de sistemas. La puesta en práctica se lleva a cabo mediante la resolución de un caso práctico extenso para el que se usan varias herramientas especializadas (Pentaho, Microsoft, Oracle y PostgreSQL). # # · Bases de datos NoSQL (6 créditos): las bases de datos NoSQL constituyen una alternativa a las bases de datos relacionales y son especialmente adecuadas para ciertos dominios de aplicación: dominios que trabajan con grandes volúmenes de datos, dominios donde se necesite una distribución o disponibilidad altas, dominios que trabajan con datos poco estructurados y dominios en los que se establecen múltiples y complejas interrelaciones de los datos. En esta asignatura se presentan los principios y los conceptos de este tipo de bases de datos, los modelos de datos subyacentes y los problemas que presenta la distribución en el almacenamiento y la gestión de los datos. Se trabajan varios tipos de bases de datos NoSQL (clave-valor, documentos, orientadas a columnas y grafos) con herramientas como Riak, MongoDB o Neo4j. # -
00 Get Your Shit Together/Planning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Clean data from Data_Gathering notebook # Data was collected by week and candidate (Biden, Klobuchar, Sanders, Warren) and includes all kinds of tweets. I wanted to clean so that each candidate has one corpus of tweets and to exclude retweets to get only original tweets, so analysis is easier. # Import necessary modules, name the sentiment intensity analyzer, name list of stopwords, and name tweet tokernizer "tt" import os import json from nltk.tokenize import TweetTokenizer, WordPunctTokenizer import datetime from collections import Counter import string from nltk.corpus import stopwords from nltk.sentiment.vader import SentimentIntensityAnalyzer import random sid = SentimentIntensityAnalyzer() stoplist=stopwords.raw('english').split('\n') tt = TweetTokenizer() # Name new working directory and download functions from functions folder # %run functions.ipynb # Show how data is organized in folders and the number of documents in each folder for item in os.listdir('data'): if item.startswith('.'): continue num_of_docs = len([f for f in os.listdir(os.path.join('data', item)) if f.endswith('.json')]) print('{}\t{} texts'.format(item, num_of_docs)) # ### Cleaning the tweets and starting analysis # The purpose of cleaning the data in this way is to get all the tweets in one corpus for analysis and to get only original tweets about candidates # # #### For all of the tweets about Biden: # * Load all of the tweets from each week and name # * Make a compound list of dictionaries of all the tweets using a for loop and the extend function # * Make a list of dictionaries of the tweets that does not include retweets using loop, if statements and append function # # + Biden1 = load_tweets('data/BidenWk1/@JoeBiden_2020-02-03_to_2020-02-09.json') Biden2 = load_tweets('data/BidenWk1/Biden_2020-02-03_to_2020-02-09.json') Biden3 = load_tweets('data/BidenWk2/@JoeBiden_2020-02-10_to_2020-02-16.json') Biden4 = load_tweets('data/BidenWk2/Biden_2020-02-10_to_2020-02-16.json') Biden5 = load_tweets('data/BidenWk3/@JoeBiden_2020-02-17_to_2020-02-23.json') Biden6 = load_tweets('data/BidenWk3/Biden_2020-02-17_to_2020-02-23.json') Biden7 = load_tweets('data/BidenWk4/@JoeBiden_2020-02-24_to_2020-03-01.json') Biden8 = load_tweets('data/BidenWk4/Biden_2020-02-24_to_2020-03-01.json') Biden9 = load_tweets('data/BidenWk5/@JoeBiden_2020-03-02_to_2020-03-05.json') Biden10 = load_tweets('data/BidenWk5/Biden_2020-03-02_to_2020-03-05.json') # - Biden_tweets = [] for tweets in (Biden1, Biden2, Biden3, Biden4, Biden5, Biden6, Biden7, Biden8, Biden9, Biden10): Biden_tweets.extend(tweets) Biden_tweets_nrt = [] for tweet in Biden_tweets: if 'RT @' not in tweet['text']: Biden_tweets_nrt.append(tweet) # #### For all of the tweets about Klobuchar: # * Load all of the tweets from each week and name # * Make a compound list of dictionaries of all the tweets using a for loop and the extend function # * Make a list of dictionaries of the tweets that does not include retweets using loop, if statements and append function # + Klobuchar1 = load_tweets('data/KlobucharWk1/@amyklobuchar_2020-02-03_to_2020-02-09 (1).json') Klobuchar2 = load_tweets('data/KlobucharWk1/klobuchar_2020-02-03_to_2020-02-09 (1).json') Klobuchar3 = load_tweets('data/KlobucharWk2/@amyklobuchar_2020-02-10_to_2020-02-16.json') Klobuchar4 = load_tweets('data/KlobucharWk2/klobuchar_2020-02-10_to_2020-02-16.json') Klobuchar5 = load_tweets('data/KlobucharWk3/@amyklobuchar_2020-02-17_to_2020-02-23.json') Klobuchar6 = load_tweets('data/KlobucharWk3/klobuchar_2020-02-17_to_2020-02-23.json') Klobuchar7 = load_tweets('data/KlobucharWk4/@amyklobuchar_2020-02-24_to_2020-03-01.json') Klobuchar8 = load_tweets('data/KlobucharWk4/klobuchar_2020-02-24_to_2020-03-01.json') Klobuchar9 = load_tweets('data/KlobucharWk5/@amyklobuchar_2020-03-02_to_2020-03-05.json') Klobuchar10 = load_tweets('data/KlobucharWk5/klobuchar_2020-03-02_to_2020-03-05.json') # - Klob_tweets = [] for tweets in (Klobuchar1, Klobuchar2, Klobuchar3, Klobuchar4, Klobuchar5, Klobuchar6, Klobuchar7, Klobuchar8, Klobuchar9, Klobuchar10): Klob_tweets.extend(tweets) Klob_tweets_nrt = [] for tweet in Klob_tweets: if 'RT @' not in tweet['text']: Klob_tweets_nrt.append(tweet) # #### For all of the tweets about Sanders: # * Load all of the tweets from each week and name # * Make a compound list of dictionaries of all the tweets using a for loop and the extend function # * Make a list of dictionaries of the tweets that does not include retweets using loop, if statements and append function # # + Sanders1 = load_tweets('data/SandersWk1/@BernieSanders_2020-02-03_to_2020-02-09 (1).json') Sanders2 = load_tweets('data/SandersWk1/bernie_2020-02-03_to_2020-02-09 (1).json') Sanders3 = load_tweets('data/SandersWk2/@BernieSanders_2020-02-10_to_2020-02-16.json') Sanders4 = load_tweets('data/SandersWk2/bernie_2020-02-10_to_2020-02-16.json') Sanders5 = load_tweets('data/SandersWk3/@BernieSanders_2020-02-17_to_2020-02-23.json') Sanders6 = load_tweets('data/SandersWk3/bernie_2020-02-17_to_2020-02-23.json') Sanders7 = load_tweets('data/SandersWk4/@BernieSanders_2020-02-24_to_2020-03-01.json') Sanders8 = load_tweets('data/SandersWk4/bernie_2020-02-24_to_2020-03-01.json') Sanders9 = load_tweets('data/SandersWk5/@BernieSanders_2020-03-02_to_2020-03-05.json') Sanders10 = load_tweets('data/SandersWk5/bernie_2020-03-02_to_2020-03-05.json') # - Sanders_tweets = [] for tweets in (Sanders1, Sanders2, Sanders3, Sanders4, Sanders5, Sanders6, Sanders7, Sanders8, Sanders9, Sanders10): Sanders_tweets.extend(tweets) Sanders_tweets_nrt = [] for tweet in Sanders_tweets: if 'RT @' not in tweet['text']: Sanders_tweets_nrt.append(tweet) # #### For all of the tweets about Warren: # * Load all of the tweets from each week and name # * Make a compound list of dictionaries of all the tweets using a for loop and the extend function # * Make a list of dictionaries of the tweets that does not include retweets using loop, if statements and append function # # + Warren1 = load_tweets('data/WarrenWk1/@ewarren_2020-02-03_to_2020-02-09 (1).json') Warren2 = load_tweets('data/WarrenWk1/warren_2020-02-03_to_2020-02-09 (1).json') Warren3 = load_tweets('data/WarrenWk2/@ewarren_2020-02-10_to_2020-02-16.json') Warren4 = load_tweets('data/WarrenWk2/warren_2020-02-10_to_2020-02-16.json') Warren5 = load_tweets('data/WarrenWk3/@ewarren_2020-02-17_to_2020-02-23.json') Warren6 = load_tweets('data/WarrenWk3/warren_2020-02-17_to_2020-02-23.json') Warren7 = load_tweets('data/WarrenWk4/@ewarren_2020-02-24_to_2020-03-01.json') Warren8 = load_tweets('data/WarrenWk4/warren_2020-02-24_to_2020-03-01.json') Warren9 = load_tweets('data/WarrenWk5/@ewarren_2020-03-02_to_2020-03-05.json') Warren10 = load_tweets('data/WarrenWk5/warren_2020-03-02_to_2020-03-05.json') # - Warren_tweets = [] for tweets in (Warren1, Warren2, Warren3, Warren4, Warren5, Warren6, Warren7, Warren8, Warren9, Warren10): Warren_tweets.extend(tweets) Warren_tweets_nrt = [] for tweet in Warren_tweets: if 'RT @' not in tweet['text']: Warren_tweets_nrt.append(tweet) # Take the data that was cleaned and make into neat files to be able to transfer to other notebooks with open('move_data/corpus_data1.json', 'w') as out: out.write(json.dumps(Biden_tweets_nrt)) with open('move_data/corpus_data2.json', 'w') as out: out.write(json.dumps(Klob_tweets_nrt)) with open('move_data/corpus_data3.json', 'w') as out: out.write(json.dumps(Sanders_tweets_nrt)) with open('move_data/corpus_data4.json', 'w') as out: out.write(json.dumps(Warren_tweets_nrt))
previous_final_project_examples/cochroch/Data_Cleaning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="YJ12sG4P92gx" outputId="e0517a03-1452-4731-ec65-5051ae2412e2" from collections import deque class Graph: def __init__(self, adjacency_list): self.adjacency_list = adjacency_list def get_neighbors(self, v): return self.adjacency_list[v] def h(self, n): H = { 'A': 1, 'B': 1, 'C': 1, 'D': 1 } return H[n] def a_star_algorithm(self, start_node, stop_node): open_list = set([start_node]) closed_list = set([]) g = {} g[start_node] = 0 parents = {} parents[start_node] = start_node while len(open_list) > 0: n = None for v in open_list: if n == None or g[v] + self.h(v) < g[n] + self.h(n): n = v; if n == None: print('Path does not exist!') return None if n == stop_node: reconst_path = [] while parents[n] != n: reconst_path.append(n) n = parents[n] reconst_path.append(start_node) reconst_path.reverse() print('Path found: {}'.format(reconst_path)) return reconst_path for (m, weight) in self.get_neighbors(n): if m not in open_list and m not in closed_list: open_list.add(m) parents[m] = n g[m] = g[n] + weight else: if g[m] > g[n] + weight: g[m] = g[n] + weight parents[m] = n if m in closed_list: closed_list.remove(m) open_list.add(m) open_list.remove(n) closed_list.add(n) print('Path does not exist!') return None adjacency_list = { 'A': [('B', 1), ('C', 3), ('D', 7)], 'B': [('D', 5)], 'C': [('D', 12)] } graph1 = Graph(adjacency_list) graph1.a_star_algorithm('A', 'D') # + id="CiXr-DVs_ujY"
A_search.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # A scratch notebook - anything in here can be safely deleted import pickle import torch file = r'/groups/bishop/bishoplab/projects/probabilistic_model_synthesis/results/real_data/gnlr/same_cond_transfer_analysis/v19/fold_str_base_14_tgt_1/fold_0/subj_10/comb/debug.pt' with open(file, 'rb') as f: rs = pickle.load(f) rs['sp'].keys() rs['sp']['elbos']['train']
development/scratch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .sh # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Bash # language: bash # name: bash # --- # # FABRIC API Examples: Jupyter Notebooks # # Your FABRIC JupyterHub environment comes with a set of example notebooks. Below is a list of many of the examples. Click the links to open the example notebooks. # # ### Update the Example Notebooks # # Occasionally, we will add example notebooks and update existing examples. The examples are stored in this [github repo](https://github.com/fabric-testbed/jupyter-examples). # # Run the following Jupyter cell to ensure you have the most current notebooks. You will need to close and reopen any updated notebooks including the `start_here` notebook that you are currently reading. cd /home/fabric/work/jupyter-examples git stash git pull origin master # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ## Getting Started Tutorials # # - [Hello, FABRIC](./fabric_examples/basic_examples/hello_fabric.ipynb): Simple First Slice Example # - Intro to FABRIC Tutorial (TODO) # # ## Basic Examples # # # ### Testbed Sites and Resources # # - [Find Available Resources](./fabric_examples/basic_examples/get_available_resources.ipynb): Query for a list of currently available resources. # # ### Managing Slices # # - [Create Slice](./fabric_examples/basic_examples/create_slice.ipynb): Slice construction using Fabric Information model # - [Delete Slice](./fabric_examples/basic_examples/delete_slice.ipynb): Delete a slice # - [Delete All Slices](./fabric_examples/basic_examples/delete_all_slices.ipynb): Delete all slices you own. # - [Get an Existing Slice](./fabric_examples/basic_examples/get_slice.ipynb): Get an existing slice. # - [Renew a Slice Reservation](./fabric_examples/basic_examples/update_slice_renew.ipynb): Renew a slice reservation # # # ### Compute Nodes # # - [Get Nodes](./fabric_examples/basic_examples/get_nodes.ipynb): Get the nodes from an existing slice. # - [Log into a Node with SSH](./fabric_examples/basic_examples/log_into_node_with_ssh.ipynb): Show how to remotely log into a running FABRIC node. # - [Setting Node Capacities](./fabric_examples/basic_examples/setting_node_capaciites.ipynb): Discusses several options for setting node capacities. # - [Create a Node with Components](./fabric_examples/basic_examples/create_node.ipynb): Create a single node with components # # ### Storage Components # # - [Basic NVMe Devices](./fabric_examples/basic_examples/basic_nvme_devices.ipynb): Create a node with a NVMe block device. # - [Benchmarking FABRIC Storage: Local disk and NVMe](./fabric_examples/basic_examples/benchmarking_storage.ipynb): Create and benchmark a node with local disk and NVMe device. # # ### GPU Components # # - [Basic GPUs Devices](./fabric_examples/basic_examples/basic_gpu_devices.ipynb): Create a node with a GPU # # ### Networking # # - [Create a simple Layer 2 Bridge](./fabric_examples/basic_examples/create_network_l2bridge.ipynb): # - [Create Layer 2 Bridge with Smart NICs](./fabric_examples/basic_examples/create_network_l2bridge_smart_nic.ipynb): # - [Create Layer 2 Bridge with Smart NICs and VLAN Tagged Interfaces](./fabric_examples/basic_examples/create_network_l2bridge_smart_nic_tagged.ipynb): # - [Create Layer 2 Point-to-point Circuit](./fabric_examples/basic_examples/create_network_l2ptp.ipynb): # - [Create Layer 2 Site-to-site Network](./fabric_examples/basic_examples/create_network_l2sts.ipynb ): # - [Create Layer 2 Site-to-site Network with VLAN Tagged Interfaces](./fabric_examples/basic_examples/create_network_l2sts_tagged.ipynb): # # ## Complex Recipes # # - [Kubernetes](./fabric_examples/complex_recipes/kubernetes-simple.ipynb): Basic Creating and managing a Kubernetes cluster. # # # ## Workshop Demos # # ### FABRIC Workshop (Spring 2021) # # - [Simple Demo](./fabric_examples/public_demos/FABRIC-Workshop-Spring2021/simple-workshop-demo.ipynb). A basic demos that walks through the essential steps of running Slices, including token creation/usage, resource availability, and the basics of building, creating, managing and deleting Slices. # - [Latency Demo](./fabric_examples/public_demos/FABRIC-Workshop-Spring2021/latency-workshop-demo.ipynb). An example of how to create links between nodes and monitor latency # # # -
start_here.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Programming Assignment: # ## Готовим LDA по рецептам # Как вы уже знаете, в тематическом моделировании делается предположение о том, что для определения тематики порядок слов в документе не важен; об этом гласит гипотеза «мешка слов». Сегодня мы будем работать с несколько нестандартной для тематического моделирования коллекцией, которую можно назвать «мешком ингредиентов», потому что на состоит из рецептов блюд разных кухонь. Тематические модели ищут слова, которые часто вместе встречаются в документах, и составляют из них темы. Мы попробуем применить эту идею к рецептам и найти кулинарные «темы». Эта коллекция хороша тем, что не требует предобработки. Кроме того, эта задача достаточно наглядно иллюстрирует принцип работы тематических моделей. # # Для выполнения заданий, помимо часто используемых в курсе библиотек, потребуются модули *json* и *gensim*. Первый входит в дистрибутив Anaconda, второй можно поставить командой # # *pip install gensim* # # Построение модели занимает некоторое время. На ноутбуке с процессором Intel Core i7 и тактовой частотой 2400 МГц на построение одной модели уходит менее 10 минут. # ### Загрузка данных # Коллекция дана в json-формате: для каждого рецепта известны его id, кухня (cuisine) и список ингредиентов, в него входящих. Загрузить данные можно с помощью модуля json (он входит в дистрибутив Anaconda): import json with open("recipes.json") as f: recipes = json.load(f) print(recipes[0]) # ### Составление корпуса from gensim import corpora, models import numpy as np # Наша коллекция небольшая, и целиком помещается в оперативную память. Gensim может работать с такими данными и не требует их сохранения на диск в специальном формате. Для этого коллекция должна быть представлена в виде списка списков, каждый внутренний список соответствует отдельному документу и состоит из его слов. Пример коллекции из двух документов: # # [["hello", "world"], ["programming", "in", "python"]] # # Преобразуем наши данные в такой формат, а затем создадим объекты corpus и dictionary, с которыми будет работать модель. texts = [recipe["ingredients"] for recipe in recipes] dictionary = corpora.Dictionary(texts) # составляем словарь corpus = [dictionary.doc2bow(text) for text in texts] # составляем корпус документов print(texts[0]) print(corpus[0]) # У объекта dictionary есть полезная переменная dictionary.token2id, позволяющая находить соответствие между ингредиентами и их индексами. # ### Обучение модели # Вам может понадобиться [документация](https://radimrehurek.com/gensim/models/ldamodel.html) LDA в gensim. # __Задание 1.__ Обучите модель LDA с 40 темами, установив количество проходов по коллекции 5 и оставив остальные параметры по умолчанию. # # # Затем вызовите метод модели *show_topics*, указав количество тем 40 и количество токенов 10, и сохраните результат (топы ингредиентов в темах) в отдельную переменную. Если при вызове метода *show_topics* указать параметр *formatted=True*, то топы ингредиентов будет удобно выводить на печать, если *formatted=False*, будет удобно работать со списком программно. Выведите топы на печать, рассмотрите темы, а затем ответьте на вопрос: # # Сколько раз ингредиенты "salt", "sugar", "water", "mushrooms", "chicken", "eggs" встретились среди топов-10 всех 40 тем? При ответе __не нужно__ учитывать составные ингредиенты, например, "hot water". # # Передайте 6 чисел в функцию save_answers1 и загрузите сгенерированный файл в форму. # # У gensim нет возможности фиксировать случайное приближение через параметры метода, но библиотека использует numpy для инициализации матриц. Поэтому, по утверждению автора библиотеки, фиксировать случайное приближение нужно командой, которая написана в следующей ячейке. __Перед строкой кода с построением модели обязательно вставляйте указанную строку фиксации random.seed.__ np.random.seed(76543) # здесь код для построения модели: lda = models.LdaModel(corpus = corpus, num_topics = 40, id2word = dictionary, passes = 5) wrds = lda.show_topics(num_topics=40,num_words=10, formatted= False) words_to_search = ["salt" , "sugar", "water", "mushrooms", "chicken", "eggs"] words_found = [] #c_salt, c_sugar, c_water, c_mushrooms, c_chicken, c_eggs = 0 for topics, main_words in wrds: for word, probably in main_words: words_found.append(word) c_salt =0 c_sugar = 0 c_water = 0 c_mushrooms = 0 c_chicken =0 c_eggs = 0 for elements in words_found: if str(elements) == str("salt"): c_salt += 1 if elements == 'sugar': c_sugar += 1 if elements == 'water': c_water += 1 if elements == 'mushrooms': c_mushrooms += 1 if elements == 'chicken': c_chicken += 1 if elements == 'eggs': c_eggs += 1 print(c_salt, c_sugar, c_water, c_mushrooms, c_chicken, c_eggs) def save_answers1(c_salt, c_sugar, c_water, c_mushrooms, c_chicken, c_eggs): with open("cooking_LDA_pa_task1.txt", "w") as fout: fout.write(" ".join([str(el) for el in [c_salt, c_sugar, c_water, c_mushrooms, c_chicken, c_eggs]])) # ### Фильтрация словаря # В топах тем гораздо чаще встречаются первые три рассмотренных ингредиента, чем последние три. При этом наличие в рецепте курицы, яиц и грибов яснее дает понять, что мы будем готовить, чем наличие соли, сахара и воды. Таким образом, даже в рецептах есть слова, часто встречающиеся в текстах и не несущие смысловой нагрузки, и поэтому их не желательно видеть в темах. Наиболее простой прием борьбы с такими фоновыми элементами — фильтрация словаря по частоте. Обычно словарь фильтруют с двух сторон: убирают очень редкие слова (в целях экономии памяти) и очень частые слова (в целях повышения интерпретируемости тем). Мы уберем только частые слова. import copy dictionary2 = copy.deepcopy(dictionary) # __Задание 2.__ У объекта dictionary2 есть переменная *dfs* — это словарь, ключами которого являются id токена, а элементами — число раз, сколько слово встретилось во всей коллекции. Сохраните в отдельный список ингредиенты, которые встретились в коллекции больше 4000 раз. Вызовите метод словаря *filter_tokens*, подав в качестве первого аргумента полученный список популярных ингредиентов. Вычислите две величины: dict_size_before и dict_size_after — размер словаря до и после фильтрации. # # Затем, используя новый словарь, создайте новый корпус документов, corpus2, по аналогии с тем, как это сделано в начале ноутбука. Вычислите две величины: corpus_size_before и corpus_size_after — суммарное количество ингредиентов в корпусе (для каждого документа вычислите число различных ингредиентов в нем и просуммируйте по всем документам) до и после фильтрации. # # Передайте величины dict_size_before, dict_size_after, corpus_size_before, corpus_size_after в функцию save_answers2 и загрузите сгенерированный файл в форму. inp_dict = dictionary2.dfs dict_size_before = len(inp_dict) bad_ids = [] keys = dictionary2.dfs.keys() for i in range(len(inp_dict)): if inp_dict[i] > 4000: bad_ids.append(i) bad_ids dict_size_before dictionary2.filter_tokens(bad_ids = bad_ids) new_dict = dictionary2.dfs dict_size_after = len(new_dict) corpus_size_before = np.sum([len(i) for i in corpus]) print("corpus_size_before =", corpus_size_before) corpus2 = [dictionary2.doc2bow(text) for text in texts] corpus_size_after = np.sum([len(i) for i in corpus2]) print("corpus_size_after =", corpus_size_after) save_answers2(dict_size_before, dict_size_after, corpus_size_before, corpus_size_after) def save_answers2(dict_size_before, dict_size_after, corpus_size_before, corpus_size_after): with open("cooking_LDA_pa_task2.txt", "w") as fout: fout.write(" ".join([str(el) for el in [dict_size_before, dict_size_after, corpus_size_before, corpus_size_after]])) # ### Сравнение когерентностей # __Задание 3.__ Постройте еще одну модель по корпусу corpus2 и словарю dictionary2, остальные параметры оставьте такими же, как при первом построении модели. Сохраните новую модель в другую переменную (не перезаписывайте предыдущую модель). Не забудьте про фиксирование seed! # # Затем воспользуйтесь методом *top_topics* модели, чтобы вычислить ее когерентность. Передайте в качестве аргумента соответствующий модели корпус. Метод вернет список кортежей (топ токенов, когерентность), отсортированных по убыванию последней. Вычислите среднюю по всем темам когерентность для каждой из двух моделей и передайте в функцию save_answers3. # %%time np.random.seed(76543) # здесь код для построения модели: lda2 = models.LdaModel(corpus = corpus2, num_topics = 40, id2word = dictionary2, passes = 5) # %%time np.random.seed(76543) topics2 = lda2.top_topics(corpus = corpus2) topics1 = lda.top_topics(corpus = corpus) coherence = [] coherence2 = [] for tpcs2 in topics2: coherence2.append(tpcs2[1]) for tpcs in topics1: coherence.append(tpcs[1]) coherence = np.mean(coherence) coherence2 = np.mean(coherence2) save_answers3(coherence, coherence2) print(coherence, coherence2) def save_answers3(coherence, coherence2): with open("cooking_LDA_pa_task3.txt", "w") as fout: fout.write(" ".join(["%3f"%el for el in [coherence, coherence2]])) # Считается, что когерентность хорошо соотносится с человеческими оценками интерпретируемости тем. Поэтому на больших текстовых коллекциях когерентность обычно повышается, если убрать фоновую лексику. Однако в нашем случае этого не произошло. # ### Изучение влияния гиперпараметра alpha # В этом разделе мы будем работать со второй моделью, то есть той, которая построена по сокращенному корпусу. # # Пока что мы посмотрели только на матрицу темы-слова, теперь давайте посмотрим на матрицу темы-документы. Выведите темы для нулевого (или любого другого) документа из корпуса, воспользовавшись методом *get_document_topics* второй модели: lda2.get_document_topics(corpus2[0]) # Также выведите содержимое переменной *.alpha* второй модели: lda2.alpha # У вас должно получиться, что документ характеризуется небольшим числом тем. Попробуем поменять гиперпараметр alpha, задающий априорное распределение Дирихле для распределений тем в документах. # __Задание 4.__ Обучите третью модель: используйте сокращенный корпус (corpus2 и dictionary2) и установите параметр __alpha=1__, passes=5. Не забудьте про фиксацию seed! Выведите темы новой модели для нулевого документа; должно получиться, что распределение над множеством тем практически равномерное. Чтобы убедиться в том, что во второй модели документы описываются гораздо более разреженными распределениями, чем в третьей, посчитайте суммарное количество элементов, __превосходящих 0.01__, в матрицах темы-документы обеих моделей. Другими словами, запросите темы модели для каждого документа с параметром *minimum_probability=0.01* и просуммируйте число элементов в получаемых массивах. Передайте две суммы (сначала для модели с alpha по умолчанию, затем для модели в alpha=1) в функцию save_answers4. # %%time np.random.seed(76543) lda3 = models.LdaModel(corpus = corpus2, passes = 5, id2word= dictionary2, alpha = 1, num_topics = 40) count_model3 = 0 for i in corpus2: count_model3 += len(lda3.get_document_topics(i, minimum_probability=0.01)) count_model3 count_model2 = 0 for i in corpus2: count_model2 += len(lda2.get_document_topics(i, minimum_probability=0.01)) count_model2 def save_answers4(count_model2, count_model3): with open("cooking_LDA_pa_task4.txt", "w") as fout: fout.write(" ".join([str(el) for el in [count_model2, count_model3]])) # Таким образом, гиперпараметр __alpha__ влияет на разреженность распределений тем в документах. Аналогично гиперпараметр __eta__ влияет на разреженность распределений слов в темах. save_answers4(count_model2,count_model3) # ### LDA как способ понижения размерности # Иногда, распределения над темами, найденные с помощью LDA, добавляют в матрицу объекты-признаки как дополнительные, семантические, признаки, и это может улучшить качество решения задачи. Для простоты давайте просто обучим классификатор рецептов на кухни на признаках, полученных из LDA, и измерим точность (accuracy). # # __Задание 5.__ Используйте модель, построенную по сокращенной выборке с alpha по умолчанию (вторую модель). Составьте матрицу $\Theta = p(t|d)$ вероятностей тем в документах; вы можете использовать тот же метод get_document_topics, а также вектор правильных ответов y (в том же порядке, в котором рецепты идут в переменной recipes). Создайте объект RandomForestClassifier со 100 деревьями, с помощью функции cross_val_score вычислите среднюю accuracy по трем фолдам (перемешивать данные не нужно) и передайте в функцию save_answers5. from sklearn.ensemble import RandomForestClassifier from sklearn.cross_validation import cross_val_score len(recipes) theta_matr = np.zeros((39774, 40)) theta_matr.shape for i in range(len(corpus2)): clf = RandomForestClassifier(n_estimators=100) def save_answers5(accuracy): with open("cooking_LDA_pa_task5.txt", "w") as fout: fout.write(str(accuracy)) # Для такого большого количества классов это неплохая точность. Вы можете попроовать обучать RandomForest на исходной матрице частот слов, имеющей значительно большую размерность, и увидеть, что accuracy увеличивается на 10–15%. Таким образом, LDA собрал не всю, но достаточно большую часть информации из выборки, в матрице низкого ранга. # ### LDA — вероятностная модель # Матричное разложение, использующееся в LDA, интерпретируется как следующий процесс генерации документов. # # Для документа $d$ длины $n_d$: # 1. Из априорного распределения Дирихле с параметром alpha сгенерировать распределение над множеством тем: $\theta_d \sim Dirichlet(\alpha)$ # 1. Для каждого слова $w = 1, \dots, n_d$: # 1. Сгенерировать тему из дискретного распределения $t \sim \theta_{d}$ # 1. Сгенерировать слово из дискретного распределения $w \sim \phi_{t}$. # # Подробнее об этом в [Википедии](https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation). # # В контексте нашей задачи получается, что, используя данный генеративный процесс, можно создавать новые рецепты. Вы можете передать в функцию модель и число ингредиентов и сгенерировать рецепт :) def generate_recipe(model, num_ingredients): theta = np.random.dirichlet(model.alpha) for i in range(num_ingredients): t = np.random.choice(np.arange(model.num_topics), p=theta) topic = model.show_topic(t, topn=model.num_terms) topic_distr = [x[1] for x in topic] terms = [x[0] for x in topic] w = np.random.choice(terms, p=topic_distr) print w # ### Интерпретация построенной модели # Вы можете рассмотреть топы ингредиентов каждой темы. Большиснтво тем сами по себе похожи на рецепты; в некоторых собираются продукты одного вида, например, свежие фрукты или разные виды сыра. # # Попробуем эмпирически соотнести наши темы с национальными кухнями (cuisine). Построим матрицу $A$ размера темы $x$ кухни, ее элементы $a_{tc}$ — суммы $p(t|d)$ по всем документам $d$, которые отнесены к кухне $c$. Нормируем матрицу на частоты рецептов по разным кухням, чтобы избежать дисбаланса между кухнями. Следующая функция получает на вход объект модели, объект корпуса и исходные данные и возвращает нормированную матрицу $A$. Ее удобно визуализировать с помощью seaborn. import pandas import seaborn from matplotlib import pyplot as plt # %matplotlib inline def compute_topic_cuisine_matrix(model, corpus, recipes): # составляем вектор целевых признаков targets = list(set([recipe["cuisine"] for recipe in recipes])) # составляем матрицу tc_matrix = pandas.DataFrame(data=np.zeros((model.num_topics, len(targets))), columns=targets) for recipe, bow in zip(recipes, corpus): recipe_topic = model.get_document_topics(bow) for t, prob in recipe_topic: tc_matrix[recipe["cuisine"]][t] += prob # нормируем матрицу target_sums = pandas.DataFrame(data=np.zeros((1, len(targets))), columns=targets) for recipe in recipes: target_sums[recipe["cuisine"]] += 1 return pandas.DataFrame(tc_matrix.values/target_sums.values, columns=tc_matrix.columns) def plot_matrix(tc_matrix): plt.figure(figsize=(10, 10)) seaborn.heatmap(tc_matrix, square=True) # Визуализируйте матрицу # Чем темнее квадрат в матрице, тем больше связь этой темы с данной кухней. Мы видим, что у нас есть темы, которые связаны с несколькими кухнями. Такие темы показывают набор ингредиентов, которые популярны в кухнях нескольких народов, то есть указывают на схожесть кухонь этих народов. Некоторые темы распределены по всем кухням равномерно, они показывают наборы продуктов, которые часто используются в кулинарии всех стран. # Жаль, что в датасете нет названий рецептов, иначе темы было бы проще интерпретировать... # ### Заключение # В этом задании вы построили несколько моделей LDA, посмотрели, на что влияют гиперпараметры модели и как можно использовать построенную модель.
unsupervised-learning/week4/edit_CookingLDA_PA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.5 64-bit (''ga15pyd'': conda)' # language: python # name: python3 # --- # # Script for Getting Screenshots of Route Maps # # <NAME> # # Licensed under the MIT license # # This program shows how to use selenium to generate .png versions of the .html route maps generated by route_maps_builder_v2. Automating this screenshot process is much easier than getting manual screenshots of each map. # + import os from selenium import webdriver # See https://www.selenium.dev/documentation/ ff_driver = webdriver.Firefox() # For the above line to execute correctly in Windows, # you'll need to download geckodriver, then add the path to the folder # containing the .exe file to your system environment variable. # ALternately, you could specify the path to the geckodriver.exe file within # webdriver.Firefox(), as follows:: # ff_driver = webdriver.Firefox( # executable_path=r'C:\Users\kburc\Downloads\geckodriver-v0.30.0-win64\geckodriver.exe') # This was based on Nesa's answer at # https://stackoverflow.com/a/42122284/13097194 # ff_driver.set_window_position(0,0) # See https://www.selenium.dev/documentation/webdriver/browser/windows/#set-window-size # for set_window_position and set_window_size window_width = 2200 # I experimented with this setting until I found a # dimension that produced decent (if not perfect) representations of # the domestic and international route map views. Interestingly, this # setting (when plugged into set_window_size below) # resulted in dimensions of 4378 x 2294 pixels. This may be because I have # my monitor set to 200% scaling. At any rate, the output (4378 x 2294) produced # pretty good image files. ff_driver.set_window_size(window_width,window_width*(9/16)) # 9/16 is used to maintain the 16:9 aspect ratio seen in HD, 4K, etc. screens. # ff_driver.set_window_size(3840,2160) resulted in a 7658x4140 image for me import time path_to_maps_folder = \ r'C:\Users\kburc\D1V1\Documents\!Dell64docs\Programming\py\kjb3_programs\route_maps_builder\folium_maps' # - for root, dirs, files in os.walk('folium_maps'): # See https://docs.python.org/3/library/os.html file_list = files file_list file_list[0][:-4] # + for i in range(len(file_list)): ff_driver.get(path_to_maps_folder+'\\'+file_list[i]) # See https://www.selenium.dev/documentation/webdriver/browser/navigation/ time.sleep(2) # This gives the page sufficient # time to load the map tiles before the screenshot is taken. screenshot_test = ff_driver.get_screenshot_as_file( 'folium_map_screenshots\\'+file_list[i][:-4]+'png') # Based on https://www.selenium.dev/selenium/docs/api/java/org/openqa/selenium/TakesScreenshot.html # The file name equals the name of the .html file of each route map (except # for the 'html' component) plus png to represent the image type. ff_driver.quit() # Based on https://www.selenium.dev/documentation/webdriver/browser/windows/
get_screenshots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # from pathlib import Path import os from citlab_python_util.parser.xml.page.page import Page from PIL import Image, ImageDraw import numpy as np from pathlib import Path import cv2 path = '/Users/davoodwadi/ICPR 2020 Text Segmentation/simple_pages_train' pathxmls = os.path.join(path,'xmls') pathimages = os.path.join(path,'images') save_folder = os.path.join(path,'save') save_folder p = Path(pathimages) # for image in p.iterdir(): # # print(image.suffix) # name = image.stem # suffix = image.suffix # os.rename(pathimages+f'/{name}{suffix}', pathimages+f'/{name}.png') pathxmls p # + for imggg in p.iterdir(): # get a page res = 600 imageName = imggg.stem path_to_page = os.path.join(pathxmls,f'{imageName}.xml') path_to_image = os.path.join(pathimages,f'{imageName}.png') page_filename = os.path.basename(path_to_page) save_path = os.path.join(save_folder,page_filename) # Generate page object page_obj = Page(path_to_xml=path_to_page) # all lines of the image list_of_image_lines = page_obj.get_textlines() # all regions of the image (without graphic regions) # These are the main blocks that need to be identified and assigned to the test set list_of_image_regions = page_obj.get_text_regions() factor = 2 img = Image.open(path_to_image) img = img.convert(mode='RGBA')#.resize((res, (int(res*img.size[1]//img.size[0])))) alpha = Image.new('L', img.size) draw = ImageDraw.Draw(alpha) for i in range(len(list_of_image_regions)): tmp = list_of_image_regions[i] draw.polygon(tmp.points.points_list, fill=i+1) imgArr = np.array(img) imArr = np.array(img) imArr [:,:,3] = alpha imgArr = cv2.resize(imgArr, (res, (int(res*img.size[1]//img.size[0]))), interpolation=cv2.INTER_NEAREST) imArr = cv2.resize(imArr, (res, (int(res*img.size[1]//img.size[0]))), interpolation=cv2.INTER_NEAREST) imgArr1 = imgArr[:int(imgArr.shape[0]/factor), :, :] imgArr2 = imgArr[int(imgArr.shape[0]/factor):, :, :] imArr1 = imArr[:int(imgArr.shape[0]/factor), :, :] imArr2 = imArr[int(imgArr.shape[0]/factor):, :, :] img1 = Image.fromarray(imgArr1) img2 = Image.fromarray(imgArr2) im1 = Image.fromarray(imArr1) im2 = Image.fromarray(imArr2) # im.getchannel(3).save(path+f'/masks/{imageName}.png') img1.save(path+f'/images2_600/{imageName}_1.png') img2.save(path+f'/images2_600/{imageName}_2.png') im1.getchannel(3).save(path+f'/masks2_600/{imageName}_1.png') im2.getchannel(3).save(path+f'/masks2_600/{imageName}_2.png') # - # ## instance masks for different factors # in corresponding factor directory/ corresponding factor naming # + for imggg in p.iterdir(): # get a page # res = 800 imageName = imggg.stem path_to_page = os.path.join(pathxmls,f'{imageName}.xml') path_to_image = os.path.join(pathimages,f'{imageName}.png') page_filename = os.path.basename(path_to_page) save_path = os.path.join(save_folder,page_filename) # Generate page object page_obj = Page(path_to_xml=path_to_page) # all lines of the image list_of_image_lines = page_obj.get_textlines() # all regions of the image (without graphic regions) # These are the main blocks that need to be identified and assigned to the test set list_of_image_regions = page_obj.get_text_regions() factor = 2 img = Image.open(path_to_image) img = img.convert(mode='RGBA')#.resize((res, (int(res*img.size[1]//img.size[0])))) alpha = Image.new('L', img.size) draw = ImageDraw.Draw(alpha) for i in range(len(list_of_image_regions)): tmp = list_of_image_regions[i] draw.polygon(tmp.points.points_list, fill=i+1) imgArr = np.array(img) imArr = np.array(img) imArr [:,:,3] = alpha # imgArr = cv2.resize(imgArr, (res, (int(res*img.size[1]//img.size[0]))), interpolation=cv2.INTER_NEAREST) # imArr = cv2.resize(imArr, (res, (int(res*img.size[1]//img.size[0]))), interpolation=cv2.INTER_NEAREST) imgArrList=[] imArrList=[] amount = int(imgArr.shape[0]/factor) for i in range(factor): if i==factor-1: tmpArr = imgArr[i*amount:, :, :] Image.fromarray(tmpArr).save(path+f'/images{factor}/{imageName}_{factor}_{i+1}.png') tmArr = imArr[i*amount:, :, :] Image.fromarray(tmArr).getchannel(3).save(path+f'/masks{factor}/{imageName}_{factor}_{i+1}.png') tmpArr = imgArr[i*amount:(i+1)*amount, :, :] Image.fromarray(tmpArr).save(path+f'/images{factor}/{imageName}_{factor}_{i+1}.png') tmArr = imArr[i*amount:(i+1)*amount, :, :] Image.fromarray(tmArr).getchannel(3).save(path+f'/masks{factor}/{imageName}_{factor}_{i+1}.png') # imgArr1 = imgArr[:int(imgArr.shape[0]/factor), :, :] # imgArr2 = imgArr[int(imgArr.shape[0]/factor):, :, :] # imArr1 = imArr[:int(imgArr.shape[0]/factor), :, :] # imArr2 = imArr[int(imgArr.shape[0]/factor):, :, :] # img1 = Image.fromarray(imgArr1) # img2 = Image.fromarray(imgArr2) # im1 = Image.fromarray(imArr1) # im2 = Image.fromarray(imArr2) # # im.getchannel(3).save(path+f'/masks/{imageName}.png') # img1.save(path+f'/images2_600/{imageName}_1.png') # img2.save(path+f'/images2_600/{imageName}_2.png') # im1.getchannel(3).save(path+f'/masks2_600/{imageName}_1.png') # im2.getchannel(3).save(path+f'/masks2_600/{imageName}_2.png') # + # Alternate resize - split2 for imggg in p.iterdir(): # get a page res = 800 imageName = imggg.stem path_to_page = os.path.join(pathxmls,f'{imageName}.xml') path_to_image = os.path.join(pathimages,f'{imageName}.png') page_filename = os.path.basename(path_to_page) save_path = os.path.join(save_folder,page_filename) # Generate page object page_obj = Page(path_to_xml=path_to_page) # all lines of the image list_of_image_lines = page_obj.get_textlines() # all regions of the image (without graphic regions) # These are the main blocks that need to be identified and assigned to the test set list_of_image_regions = page_obj.get_text_regions() factor = 2 img = Image.open(path_to_image) img = img.convert(mode='RGBA')#.resize((res, (int(res*img.size[1]//img.size[0])))) alpha = Image.new('L', img.size) draw = ImageDraw.Draw(alpha) for i in range(len(list_of_image_regions)): tmp = list_of_image_regions[i] ff = 1 if i%2==0 else 2 draw.polygon(tmp.points.points_list, fill=ff) imgArr = np.array(img) imArr = np.array(img) imArr [:,:,3] = alpha imgArr = cv2.resize(imgArr, (res, (int(res*img.size[1]//img.size[0]))), interpolation=cv2.INTER_NEAREST) imArr = cv2.resize(imArr, (res, (int(res*img.size[1]//img.size[0]))), interpolation=cv2.INTER_NEAREST) imgArr1 = imgArr[:int(imgArr.shape[0]/factor), :, :] imgArr2 = imgArr[int(imgArr.shape[0]/factor):, :, :] imArr1 = imArr[:int(imgArr.shape[0]/factor), :, :] imArr2 = imArr[int(imgArr.shape[0]/factor):, :, :] img1 = Image.fromarray(imgArr1) img2 = Image.fromarray(imgArr2) im1 = Image.fromarray(imArr1) im2 = Image.fromarray(imArr2) # im.getchannel(3).save(path+f'/masks/{imageName}.png') img1.save(path+f'/imagesAlternate/{imageName}_1.png') img2.save(path+f'/imagesAlternate/{imageName}_2.png') im1.getchannel(3).save(path+f'/masksAlternate/{imageName}_1.png') im2.getchannel(3).save(path+f'/masksAlternate/{imageName}_2.png') # + # Alternate resize - whole for imggg in p.iterdir(): # get a page res = 800 imageName = imggg.stem path_to_page = os.path.join(pathxmls,f'{imageName}.xml') path_to_image = os.path.join(pathimages,f'{imageName}.png') page_filename = os.path.basename(path_to_page) save_path = os.path.join(save_folder,page_filename) # Generate page object page_obj = Page(path_to_xml=path_to_page) # all lines of the image list_of_image_lines = page_obj.get_textlines() # all regions of the image (without graphic regions) # These are the main blocks that need to be identified and assigned to the test set list_of_image_regions = page_obj.get_text_regions() factor = 2 img = Image.open(path_to_image) img = img.convert(mode='RGBA')#.resize((res, (int(res*img.size[1]//img.size[0])))) alpha = Image.new('L', img.size) draw = ImageDraw.Draw(alpha) for i in range(len(list_of_image_regions)): tmp = list_of_image_regions[i] ff = 1 if i%2==0 else 2 draw.polygon(tmp.points.points_list, fill=ff) imgArr = np.array(img) imArr = np.array(img) imArr [:,:,3] = alpha imgArr = cv2.resize(imgArr, (res, (int(res*img.size[1]//img.size[0]))), interpolation=cv2.INTER_NEAREST) imArr = cv2.resize(imArr, (res, (int(res*img.size[1]//img.size[0]))), interpolation=cv2.INTER_NEAREST) # imgArr1 = imgArr[:int(imgArr.shape[0]/factor), :, :] # imgArr2 = imgArr[int(imgArr.shape[0]/factor):, :, :] # imArr1 = imArr[:int(imgArr.shape[0]/factor), :, :] # imArr2 = imArr[int(imgArr.shape[0]/factor):, :, :] img = Image.fromarray(imgArr) # img2 = Image.fromarray(imgArr2) im = Image.fromarray(imArr) # im2 = Image.fromarray(imArr2) # im.getchannel(3).save(path+f'/masks/{imageName}.png') img.save(path+f'/imagesAlternate/{imageName}.png') # img2.save(path+f'/imagesAlternate/{imageName}_2.png') im.getchannel(3).save(path+f'/masksAlternate/{imageName}.png') # im2.getchannel(3).save(path+f'/masksAlternate/{imageName}_2.png') # - im1.getchannel(3) resized_image = cv2.resize(np.array(im1), (800, 800), interpolation=cv2.INTER_LINEAR) resized_image.shape altM=path+'/masksAlternate/' # + ii=Image.open(altM+os.listdir(altM)[2]) iiArr = np.array(ii)*100 Image.fromarray(iiArr) # - altM+os.listdir(altM)[2] # + listy = list(p.iterdir()) img = Image.open(listy[15]) np.array(img).shape # - im1.save(path+'/pil.png') # + # tmp = list_of_image_lines[0] # tmp.set_article_id() # tmp.get_article_id() # + # all regions of the image (without graphic regions) # These are the main blocks that need to be identified and assigned to the test set list_of_image_regions = page_obj.get_text_regions() # for the test set # list_of_image_regions[0].id = 'new' # page_obj.set_text_regions(# give the list_of_image_regions) # + tmp = list_of_image_regions[1] # tmp.points.points_list # [(91, 44), (1446, 65), (1442, 727), (1304, 722), (1304, 771), (85, 756)] # tmp.points.to_string() # '91,44 1446,65 1442,727 1304,722 1304,771 85,756' # - img = Image.open(path_to_image) img = img.convert(mode='RGBA') img img.mode alpha = img.getchannel('A') np.array(alpha) draw = ImageDraw.Draw(alpha) draw.polygon(tmp.points.points_list, fill='blue') alpha np.unique(np.array(alpha)) # + imgArr = np.array(img) imgArr [:,:,3] = np.where(np.array(alpha)==29, 255, 0) np.unique(imgArr[:,:,3]) im = Image.fromarray(imgArr) im im.getchannel(3).save(path+'/save/1.png') # - path
extracting_masks-complex.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="dvaUszUz3R4K" outputId="e80657d9-08ea-455c-b380-bd7fbbaaa352" from google.colab import drive drive.mount('/content/drive') # + colab={"base_uri": "https://localhost:8080/"} id="XnQUVFw6z0J6" outputId="6f9a9ff6-50bd-4699-ec06-259abe79b4a2" import tensorflow from tensorflow.python.client import device_lib def get_available_gpus(): local_device_protos = device_lib.list_local_devices() return [x.name for x in local_device_protos if x.device_type == 'GPU'] get_available_gpus() # + id="iBLGB7i13fNG" # !mkdir dataset # !unzip -q "/content/drive/MyDrive/Saarthi Internship Task/task_data.zip" -d "/content/dataset" # + colab={"base_uri": "https://localhost:8080/"} id="Puab8jln5aYz" outputId="e5781369-32cb-4226-d878-5f31987d4dcb" # !pip -q install transformers # !pip -q install datasets # !pip -q install jiwer # + id="Hbzach7B5a8w" import json import librosa import pandas as pd from tqdm import tqdm_notebook from datasets import Dataset, load_metric from transformers import Wav2Vec2CTCTokenizer from transformers import Wav2Vec2FeatureExtractor from transformers import Wav2Vec2Processor from transformers import Trainer from transformers import TrainingArguments # + id="yx_TbS7_8Gp3" class Config: """This class has all the parameters that we require to set""" globals_ = { 'seed': 42, 'sr': 8000, 'dataset_bs': 4, 'dataset_num_workers': 2, 'base_dir': '/content/dataset/task_data', 'processor_out_dir': '/content/speech', 'train_model_out_dir': '/content/res', 'vocab_out_dir': '/content' } model_ = { 'name': 'facebook/wav2vec2-base' } train_params_ = { } # + id="6kEDKN123kH4" def audio_feature(df): audio = list() duration = list() for each in tqdm_notebook(df.path): speech_array, sampling_rate = librosa.load(f"{Config.globals_['base_dir']}/{each}", sr=Config.globals_['sr']) d = librosa.get_duration(y=speech_array, sr=sampling_rate) audio.append(speech_array) duration.append(d) return audio, duration def get_dataset(): train_data = pd.read_csv(f"{Config.globals_['base_dir']}/train_data.csv") train_data["transcription"] = train_data["transcription"].str.replace("[\’\'\,\.\?]",'').str.lower() audio, duration = audio_feature(train_data) train_data["data"] = audio train_data["duration"] = duration valid_data = train_data.sample(frac=1, random_state=Config.globals_['seed'])[9000:].reset_index() train_data = train_data.sample(frac=1, random_state=Config.globals_['seed'])[:9000].reset_index() train_data=train_data.drop(['index', 'path', 'action', 'object', 'location'], axis=1) valid_data=valid_data.drop(['index', 'path', 'action', 'object', 'location'], axis=1) train_data = Dataset.from_pandas(train_data) valid_data = Dataset.from_pandas(valid_data) return train_data, valid_data # + id="GDiZUAQSOSWK" #train_data = train_data[train_data.duration>1] #valid_data = valid_data[valid_data.duration>1] # + id="VPWQe2MJAhXu" colab={"base_uri": "https://localhost:8080/", "height": 117, "referenced_widgets": ["26493f58bb36467cab3c0d840ed53427", "<KEY>", "<KEY>", "8d3d750cda1f49128e9294f861365f27", "f92607be17444ae7945b07ab8dad41b6", "<KEY>", "f4680981152a425ab452c7d46f18bde2", "eb08069b354e40ee9d069e345e0b9a4a"]} outputId="8b5c56d9-175d-4ae4-e77f-9599b75b72b2" train_data, valid_data = get_dataset() # + id="ek6yPLKJBbhX" class Vocab: def __init__(self, train_data, valid_data): self.train_data=train_data self.valid_data=valid_data def extract_all_chars(self, batch): all_text = " ".join(batch["transcription"]) vocab = list(set(all_text)) return {"vocab": [vocab], "all_text": [all_text]} def get_vocab(self): vocab_train = self.train_data.map(self.extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=self.train_data.column_names) vocab_test = self.valid_data.map(self.extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=self.valid_data.column_names) vocab_list = list(set(vocab_train["vocab"][0]) | set(vocab_test["vocab"][0])) vocab_dict = {v: k for k, v in enumerate(vocab_list)} vocab_dict["|"] = vocab_dict[" "] del vocab_dict[" "] vocab_dict["[UNK]"] = len(vocab_dict) vocab_dict["[PAD]"] = len(vocab_dict) with open(f"{Config.globals_['vocab_out_dir']}/vocab.json", 'w') as vocab_file: json.dump(vocab_dict, vocab_file) # + colab={"base_uri": "https://localhost:8080/", "height": 115, "referenced_widgets": ["85060b11268746cd84561fe334db0fe5", "<KEY>", "bae191bb368d4800b6dcf0d9b9482ece", "98bbbe3dc0e945da97be847f059b47fd", "83101fb537ea4cf8af725df0aced6bd6", "883d280729344eb5898a6e4f73eeb590", "860a572cfa27496792ac7ef5d62b5221", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "80533f21b76542cdbd73ed0f1f90ad70", "<KEY>", "<KEY>", "38b763a672414520abd93303b486a1b1", "<KEY>"]} id="0oIhtBsj_CR4" outputId="bc1a703e-9f3b-4aad-9717-e7906cef49f1" Vocab(train_data, valid_data).get_vocab() # + id="272aJxCyCB8-" colab={"base_uri": "https://localhost:8080/"} outputId="83d72f76-01bd-40cb-a51f-e01406609ffd" # !mkdir speech tokenizer = Wav2Vec2CTCTokenizer(f"{Config.globals_['vocab_out_dir']}/vocab.json", unk_token="[UNK]", pad_token="[PAD]", word_delimiter_token="|") feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=Config.globals_['sr'], padding_value=0.0, do_normalize=True, return_attention_mask=False) processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) processor.save_pretrained(Config.globals_['processor_out_dir']) # + colab={"base_uri": "https://localhost:8080/", "height": 115, "referenced_widgets": ["e2bb65d55d334c9980dffba0ce3ea405", "07b10666eed54a4fb0d7b40815f16dbe", "1e8c49b16dc74e1fba99edff6ef12f9e", "497cb044217745b087ff0c8d5e1f83ac", "ca6a73ffb89443c8afdbd01e45be795e", "<KEY>", "8dbdf2fbc7fe425dbc5e3b5ef3138e98", "<KEY>", "41ce865923b644a895ceac389d21250a", "f8182682359044db84a8549894380535", "34af16530a0d49f09aff0150a21d1443", "b7f272dac93548d6aaa09d14f0ab80d7", "9590e553bb6b40d888725635dfb4e7db", "<KEY>", "3b99461d6f0242169b00699b354a9111", "b702d9b263f24b36a41ea6413e74a66d"]} id="0MXVg3heCB31" outputId="027b0c6f-21ec-444e-ccc2-9c76874532a3" import numpy as np def speech_file_to_array_fn(batch): batch["speech"] = batch["data"] batch["sampling_rate"] = Config.globals_['sr'] batch["target_text"] = batch["transcription"] return batch train_data = train_data.map(speech_file_to_array_fn, remove_columns=train_data.column_names) valid_data = valid_data.map(speech_file_to_array_fn, remove_columns=valid_data.column_names) # + id="c7LY7D3JCB00" import IPython.display as ipd import numpy as np import random def get_random_example(df): rand_int = random.randint(0, len(df)) print("Target text:", df[rand_int]["target_text"]) print("Input array shape:", np.asarray(df[rand_int]["speech"]).shape) print("Sampling rate:", df[rand_int]["sampling_rate"]) return ipd.Audio(data=np.asarray(df[rand_int]["speech"]), autoplay=True, rate=Config.globals_['sr']) # + colab={"base_uri": "https://localhost:8080/", "height": 126} id="JqEyCCYfCBx7" outputId="d90f0396-8f69-442d-c6ca-349db70b9049" get_random_example(valid_data) # + colab={"base_uri": "https://localhost:8080/", "height": 437, "referenced_widgets": ["cbdab2a4f9d34463b8e86f5309696e4b", "<KEY>", "77be97ef8c2249b98e87e252b3999cee", "b30c9ca96e7b419186aa7b1da9c026d6", "70a19dc80967422f985cbeda3a1df338", "f7d977857bee409b964ebdbb710b9128", "08600636622940d88c0ad7ebac1ed198", "e00a7829d3494b92851b7ea0b9ab5c7a", "<KEY>", "3747ada1f8804c75819e8e54a15d5d67", "<KEY>", "0686ca3a2fb7479ca5dd52a01677f8e7", "<KEY>", "2b256a7247904c1692277a290f6f18da", "<KEY>", "69c0cd7d998e4d97be3c6153144d5fa0", "<KEY>", "<KEY>", "5bad2c0c3fe44e448467a37a2cfe09d4", "561f5dd2650a44c6a4b7a77213589826", "<KEY>", "<KEY>", "1a288db1fd154feabab9a71d97e221c9", "a63933afea174e6caffe4beab7baea73", "<KEY>", "<KEY>", "39b5cd13fe0f4f6e87314aa77575c760", "<KEY>", "<KEY>", "aff4cabc2e9842a999c0452e7ef31905", "07528e85711f4ef8afdb12ce70c48884", "5e68bacd780c4e5dbebcc6504ffeb9d0"]} id="YXyuCLrtEwVM" outputId="eef9de0f-6604-4037-ec86-72657efb759d" def prepare_dataset(batch): assert ( len(set(batch["sampling_rate"])) == 1 ), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}." batch["input_values"] = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0]).input_values with processor.as_target_processor(): batch["labels"] = processor(batch["target_text"]).input_ids return batch train_data = train_data.map(prepare_dataset, remove_columns=train_data.column_names, batch_size=Config.globals_['dataset_bs'], num_proc=Config.globals_['dataset_num_workers'], batched=True) valid_data = valid_data.map(prepare_dataset, remove_columns=valid_data.column_names, batch_size=Config.globals_['dataset_bs'], num_proc=Config.globals_['dataset_num_workers'], batched=True) # + id="l1AsrxePEwRi" import torch from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union @dataclass class DataCollatorCTCWithPadding: """ Data collator that will dynamically pad the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). max_length_labels (:obj:`int`, `optional`): Maximum length of the ``labels`` returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ processor: Wav2Vec2Processor padding: Union[bool, str] = True max_length: Optional[int] = None max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lenghts and need # different padding methods input_features = [{"input_values": feature["input_values"]} for feature in features] label_features = [{"input_ids": feature["labels"]} for feature in features] batch = self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) with self.processor.as_target_processor(): labels_batch = self.processor.pad( label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors="pt", ) # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch["labels"] = labels return batch data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True) # + id="fGq9dHF-5p1J" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["0e1ef6b764af45fc962d94464376be00", "502a0b64eff64c62a6c0c1b437d36c4e", "3a6950c28e6e414a9e878cd0b281d234", "921b6ad3b0ec41a4a73aaf7d27f21c99", "08eda41982924d9383df511ffa957256", "dec6a859dc9c4007a6a4bc6c2c302ab0", "14dd472e28aa4be391cd13db9e120d66", "cf43e5553b1a499088ed5ced110c8239"]} outputId="4958c6ba-f180-4816-d7f2-d1dfed6a43dc" def compute_metrics(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we do not want to group tokens when computing the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer = wer_metric.compute(predictions=pred_str, references=label_str) return {"wer": wer} wer_metric = load_metric("wer") # + colab={"base_uri": "https://localhost:8080/", "height": 237, "referenced_widgets": ["<KEY>", "85059fff80a24105893988bdd9e7374d", "465c4b35a9944d749d158ea280da7f18", "af9371ac163245a9b6e2895ab822adc0", "<KEY>", "27f0f9f2c9b34952a449da9c659d4e5a", "48e90d0c0b8041c492e0dd45a0502193", "d5937e46b1ca4e10abbc84c07b1a8dde", "<KEY>", "<KEY>", "7013eba73cf54997a50f9206eee47de3", "01f2464e914945bfb7929ad7c75bafed", "<KEY>", "<KEY>", "<KEY>", "45feb699ec524c488e8e3560cc253366"]} id="-1D0Lmkt5puj" outputId="c7f02881-cc82-4fe4-919e-d9f10e9640fd" from transformers import Wav2Vec2ForCTC model = Wav2Vec2ForCTC.from_pretrained( Config.model_['name'], gradient_checkpointing=True, ctc_loss_reduction="mean", pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer) ) model.config.ctc_zero_infinity = True #https://discuss.huggingface.co/t/wav2vec2-how-to-correct-for-nan-in-training-and-validation-loss/6089 model.freeze_feature_extractor() print(f'Model defined') # + id="ybpF3enr5prF" # !mkdir res training_args = TrainingArguments( output_dir=Config.globals_['train_model_out_dir'], group_by_length=True, per_device_train_batch_size=16, gradient_accumulation_steps=2, evaluation_strategy="steps", num_train_epochs=15, fp16=True, save_steps=200, eval_steps=200, logging_steps=200, learning_rate=1e-4, warmup_steps=400, save_total_limit=1, ) trainer = Trainer( model=model, data_collator=data_collator, args=training_args, compute_metrics=compute_metrics, train_dataset=train_data, eval_dataset=valid_data, tokenizer=processor.feature_extractor, ) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="aRRxuzOJJYwR" outputId="b9df9a36-eb71-4362-80cf-5ed4a47547c4" trainer.train() # + id="QS0iXSu28pjj"
src/transformer_speech/Speech.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import random import torch import torchvision.transforms as standard_transforms import scipy.io as sio import matplotlib import pandas as pd import misc.transforms as own_transforms import warnings from torch.autograd import Variable from torch.utils.data import DataLoader from PIL import Image, ImageOps from matplotlib import pyplot as plt from tqdm import trange, tqdm from misc.utils import * from models.CC import CrowdCounter from config import cfg import CCAugmentation as cca from datasets.SHHB.setting import cfg_data from load_data import CustomDataset torch.cuda.set_device(0) torch.backends.cudnn.benchmark = True warnings.filterwarnings('ignore') mean_std = ([0.452016860247, 0.447249650955, 0.431981861591],[0.23242045939, 0.224925786257, 0.221840232611]) img_transform = standard_transforms.Compose([ standard_transforms.ToTensor(), standard_transforms.Normalize(*mean_std) ]) restore = standard_transforms.Compose([ own_transforms.DeNormalize(*mean_std), standard_transforms.ToPILImage() ]) pil_to_tensor = standard_transforms.ToTensor() # + model_path = './exp/11-27_01-18_SHHB_VGG_1e-05_[noAug]/all_ep_055_mae_10.52_mse_19.15.pth' model_path = './exp/11-27_03-14_SHHB_VGG_1e-05_[noAug]/all_ep_059_mae_10.21_mse_18.90.pth' model_path = './exp/11-27_05-10_SHHB_VGG_1e-05_[noAug]/all_ep_130_mae_9.47_mse_18.55.pth' net = CrowdCounter(cfg.GPU_ID,cfg.NET) net.load_state_dict(torch.load(model_path)) net.cuda() net.eval() val_pipeline = cca.Pipeline( cca.examples.loading.SHHLoader("/dataset/ShanghaiTech", "test", "B"), [] ).execute_generate() val_loader = DataLoader(CustomDataset(val_pipeline), batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=1, drop_last=False) val_img = list(val_loader) # + start = 0 N = 3 for vi, data in enumerate(val_img[start:start+N], 0): img, gt_map = data with torch.no_grad(): img = Variable(img).cuda() pred_map = net.test_forward(img) pred_map = pred_map.data.cpu().numpy() new_img = img.data.cpu().numpy() new_img = np.moveaxis(new_img, 1, 2) new_img = np.moveaxis(new_img, 2, 3) new_img = np.squeeze(new_img)[:,:,::-1] pred_cnt = np.sum(pred_map[0])/10000.0 gt_count = np.sum(gt_map.data.cpu().numpy())/10000.0 fg, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(16, 5)) plt.suptitle(' '.join([ 'count_label:', str(round(gt_count, 3)), 'count_prediction:', str(round(pred_cnt, 3)) ])) ax0.imshow(np.uint8(new_img)) ax1.imshow(np.squeeze(gt_map), cmap='jet') ax2.imshow(np.squeeze(pred_map), cmap='jet') plt.show() # + mae = np.empty(len(val_img)) mse = np.empty(len(val_img)) for vi, data in enumerate(tqdm(val_img), 0): img, gt_map = data with torch.no_grad(): img = Variable(img).cuda() pred_map = net.test_forward(img) pred_map = pred_map.data.cpu().numpy() pred_cnt = np.sum(pred_map[0])/10000.0 gt_count = np.sum(gt_map.data.cpu().numpy())/10000.0 mae[vi] = np.abs(gt_count-pred_cnt) mse[vi] = (gt_count-pred_cnt)**2 print('MAE:', round(mae.mean(),2)) print('MSE:', round(np.sqrt(mse.mean()),2)) # - #
shb-vgg/exp/11-30_21-39_SHHB_VGG_1e-05_[flip+crop]/code/vis-vgg.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <p> <center> <a href="../Start_Here.ipynb">Home Page</a> </center> </p> # # # <div> # <span style="float: left; width: 33%; text-align: left;"><a href="1.Introduction-to-Distributed-Deep-Learning.ipynb">Previous Notebook</a></span> # <span style="float: left; width: 33%; text-align: center;"> # <a href="1.Introduction-to-Distributed-Deep-Learning.ipynb">1</a> # <a >2</a> # <a href="3.Hands-on-Multi-GPU.ipynb">3</a> # <a href="4.Convergence.ipynb">4</a> # </span> # <span style="float: left; width: 33%; text-align: right;"><a href="3.Hands-on-Multi-GPU.ipynb">Next Notebook</a></span> # </div> # # Introduction to Distributed Deep Learning - Part 2 # # **Contents of this notebook:** # # - [Understanding System Topology](#Understanding-System-Topology) # - [Communication concepts](#Communication-concepts) # - [Intra-Node Communication Topology](#Intra-Node-communication-Topology) # - [Performance variation due to system topology](#Performance-variation-due-to-system-topology) # - [Profiling using DLProf](#Profiling-using-DLProf) # - [NCCL](#NCCL) # - [NCCL_P2P_LEVEL=0 or P2P Disabled](#NCCL_P2P_LEVEL=0-or-P2P-Disabled) # - [NCCL_P2P_LEVEL=1 or P2P via PCIe](#NCCL_P2P_LEVEL=1-or-P2P-via-PCIe) # - [Benchmarking the system topology](#Benchmarking-the-system-topology) # # **By the End of this Notebook you will:** # # - Understand how system topolgy plays a role in Distributed training. # - Understand intra-node topology and underlying technologies like P2P and their implication on program performance # # Understanding System Topology # # In our previous notebook when we calculated the Throughput of deep learning training with different parameters , we saw a slight dip when we scaled from 4 to 8 GPUs , let us try to reason it by understanding the underlying the system. # # Before we begin, let us define two important terms: # # * **Latency:** The amount of time it takes to take a unit of data from point A to point B. For example, if 4B of data can be transferred from point A to B in 4 $\mu$s, that is the latency of transfer. # * **Bandwidth:** The amount of data that can be transferred from point A to point B in a unit of time. For example, if the width of the bus is 64KiB and latency of transfer between point A and B is 4 $\mu$s, the bandwidth is 64KiB * (1/4$\mu$s) = 1.6 GiB/s. # # ### Setting up the GPU # # To verify that our system has multiple GPUs in each node, run the command below: # !nvidia-smi # If the output is unclear, you can launch a Terminal session by clicking on `File` $\rightarrow$ `New` $\rightarrow$ `Terminal` or by following the steps as shown: # # ![open_terminal_session](images/open_terminal.png) # # ## Communication concepts # # There are many ways in which GPUs can transfer data between one another , let us look at two of the most used copy operations. Understanding these will help us with the further sections of the notebook when we benchmark and toggle different options available to us. # # #### Host Staging of Copy Operations # # The path taken by the data in both the cases is denoted by the red arrow as follows: # # <center><img src="images/memcpy_host_staging.png"/></center> # # That is, in the above GPU-to-GPU memory copy, the data traverses from GPU 0 the PCIe bus to the CPU, where it is staged in a buffer before being copied to GPU 1. This is called "host staging" and it decreases the bandwidth while increasing the latency of the operation. If we eliminate host staging, we can usually improve the performance of our application. # # #### Peer-to-Peer Memory Access # # P2P allows devices to address each other's memory from within device kernels and eliminates host staging by transferring data either through the PCIe switch or through NVLink as denoted by the red arrow below. # # <center><img src="images/memcpy_p2p_overview.png"/></center> # # Peer-to-Peer (P2P) memory access requires GPUs to share a Unified Virtual Address Space (UVA). UVA means that a single address space is used for the host and all modern NVIDIA GPU devices (specifically, those with compute capibility of 2.0 or higher). # # Let us now try to understand the Intra-node topology. # ## Intra-Node communication Topology # # Run the command below to display your node's GPU and NIC communication topology: # !nvidia-smi topo -m # Output of running the command on DGX-1 : # # ![nvidia_smi_topo_output](images/nvidia_smi_topo_output.png) # # Focus one a particular row, say GPU 0. The output states that GPUs 1 through 4 are connected to it via NVLink (in addition to PCIe) and GPUs 5 through 7 are connected to it via PCIe as well as an "SMP" interconnect. We have a dual-socket system and the CPUs in these sockets are connected by an interconnect known as SMP interconnect. # # Thus, GPU 0 to GPU 5 communication happens via not just PCIe, but also over the inter-socket interconnect within the same node. Clearly, this is a longer path than say the one between GPU 0 and GPU 1, which are connected via NVLink directly. # # Even within the GPUs connected via NVLink, we see different annotations such as `NV1` and `NV2` that affect the communication bandwidth and hence the performance. In this section, we will explore the nuances associated with a diverse intra-node GPU communication topology like in the output above. Specifically, in our system, the communication topology is as follows: # # ![dgx1_8x_tesla_v100_topo](images/dgx1_8x_tesla_v100_topo.png) # # Qualitatively, the bandwidth and latency vary with the topology as follows: # # <center><img src="images/intra_node_topology_map.png"/></center> # # Host staging implies traversing through the CPU and the travel path taken is one of PHB, NODE, and SYS. In contrast, if the path taken is either NV1, NV2, or PIX, then P2P is available. PXB implies that the GPUs belong to different PCIe hubs and P2P is usually not supported in this case. # # A double NVLink connection provides twice the bandwidth compared to a single NVLink. # # For a pair of 2 GPUs, the peak bidirectional bandwidth are as follows: # * PCIe: Using PIX topology, 15.75GB/s for PCIe Gen 3.0 and 31.5GB/s for PCIe Gen 4.0. # * NVLink: Using NV# topology, 50GB/s per connection. So a double NVLink connection has 100GB/s peak bidirectional bandwidth. # # Let us understand what difference the underlying communication topology can make to the application performance in the following sub-section. # # **Note:** If your command output doesn't show any NVLink connection or if there's no difference in connection type (PIX, PXB, PHB, NODE, SYS, NV#) between any 2 pair of GPUs, then the communication bandwidth and latency will likely be the same between any pair and the following sub-sections will not display any performance difference. # ### Performance variation due to system topology # # So far, we have run the application specifying the number of GPUs to use. To specify which GPU to use, we can supply the `CUDA_VISIBLE_DEVICES` environment variable to the executable to run our code on specific GPUs. If we want to run on only 2 GPUs, namely GPU 0 and GPU 3, we use set the environment variable `CUDA_VISIBLE_DEVICES="0,3"` while executing the command. Let us also include the `NCCL_DEBUG=INFO` variable to understand how the GPUs are connected. We will take a closer look into the NCCL library in the upcoming section. # # Let us now run the command with two GPUs and compare the throughput achieved in both cases. # # # **Experiment 1** : Try to find the GPU pair with **highest bandwidth and lowest latency** available as per the table above and replace `0,3` with those GPUs, and then run the command below: # !TF_CPP_MIN_LOG_LEVEL=3 NCCL_DEBUG=INFO CUDA_VISIBLE_DEVICES="0,3" horovodrun -np 2 --mpi-args="--oversubscribe" python3 ../source_code/N2/cnn_fmnist.py --batch-size=512 2> /dev/null # **Experiment 2** : Let us run the command below with the **highest latency and lowest bandwidth** ( in our case GPU `1,7` ). # !TF_CPP_MIN_LOG_LEVEL=3 NCCL_DEBUG=INFO CUDA_VISIBLE_DEVICES="1,7" horovodrun -np 2 --mpi-args="--oversubscribe" python3 ../source_code/N2/cnn_fmnist.py --batch-size=512 2> /dev/null # Now with the results obtained, we can now compare them : # # The scaling efficiency would likely be higher for the set of GPUs having **low latency and high bandwidth**. # # Output of running the command on DGX-1 : # # # ```bash # NCCL INFO Setting affinity for GPU 0 to 0fffff00,000fffff # NCCL INFO Trees [0] -1/-1/-1->1->0 [1] -1/-1/-1->1->0 [2] -1/-1/-1->1->0 [3] -1/-1/-1->1->0 # NCCL INFO Setting affinity for GPU 3 to 0fffff00,000fffff # NCCL INFO Channel 00 : 1[b000] -> 0[6000] via P2P/IPC # NCCL INFO Channel 00 : 0[6000] -> 1[b000] via P2P/IPC # NCCL INFO Channel 01 : 1[b000] -> 0[6000] via P2P/IPC # NCCL INFO Channel 01 : 0[6000] -> 1[b000] via P2P/IPC # NCCL INFO Channel 02 : 1[b000] -> 0[6000] via P2P/IPC # NCCL INFO Channel 02 : 0[6000] -> 1[b000] via P2P/IPC # NCCL INFO Channel 03 : 1[b000] -> 0[6000] via P2P/IPC # NCCL INFO Channel 03 : 0[6000] -> 1[b000] via P2P/IPC # NCCL INFO Connected all rings # NCCL INFO Connected all trees # NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/512 # NCCL INFO 4 coll channels, 4 p2p channels, 4 p2p channels per peer # # # Epoch 4/8 # Images/sec: 100702.49 # Epoch 5/8 # Images/sec: 101486.84 # Epoch 6/8 # Images/sec: 101490.28 # Epoch 7/8 # Images/sec: 99128.98 # Epoch 8/8 # Images/sec: 101215.77 # ``` # # Now, run the binary a pair of GPUs that have the lowest available bandwidth. In our case, we use GPU 1 and GPU 7. # # # Output of running the command on DGX-1 : # # ```bash # NCCL INFO Setting affinity for GPU 7 to ffff,f00000ff,fff00000 # NCCL INFO Channel 00/02 : 0 1 # NCCL INFO Channel 01/02 : 0 1 # NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 # NCCL INFO Setting affinity for GPU 1 to 0fffff00,000fffff # NCCL INFO Channel 00 : 1[8a000] -> 0[7000] via direct shared memory # NCCL INFO Channel 00 : 0[7000] -> 1[8a000] via direct shared memory # NCCL INFO Channel 01 : 1[8a000] -> 0[7000] via direct shared memory # NCCL INFO Channel 01 : 0[7000] -> 1[8a000] via direct shared memory # NCCL INFO Connected all rings # NCCL INFO Connected all trees # NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/512 # NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer # # Epoch 4/8 # Images/sec: 98996.51 # Epoch 5/8 # Images/sec: 98135.64 # Epoch 6/8 # Images/sec: 97798.09 # Epoch 7/8 # Images/sec: 96672.95 # Epoch 8/8 # Images/sec: 95782.78 # ``` # # Let us now try to understand the time taken for communication using DLProf profiling. # ### Profiling using DLProf # # **Important : If you are new to DLProf , kindly go through the DLProf Introduction notebook [here](2.2.DLProf.ipynb)** # # # The difference is subtle when we directly compare their throughput, but when we profile them using `dlprof` we can notice the difference in average time taken per NCCL call below. # Let us now profile both cases from above and understand the communication time taken. # # **Profiling case 1** : Try to find the GPU pair with highest bandwidth and lowest latency available as per the table above and replace 0,3 with those GPUs, and then run the command below: # # !TF_CPP_MIN_LOG_LEVEL=3 CUDA_VISIBLE_DEVICES="0,3" dlprof --output_path="Profile/N2_1" horovodrun -np 2 --mpi-args="--oversubscribe" python3 ../source_code/N2/cnn_fmnist.py --batch-size=512 # **Profiling case 2** : Let us run the command below with the highest latency and lowest bandwidth. # # # !TF_CPP_MIN_LOG_LEVEL=3 CUDA_VISIBLE_DEVICES="1,7" dlprof --output_path="Profile/N2_2" horovodrun -np 2 --mpi-args="--oversubscribe" python3 ../source_code/N2/cnn_fmnist.py --batch-size=512 # Let us now view the profile using `dlprofviewer` # # To run the `dlprofviewer` server , open a new terminal and run the following command. Replace the port `6666` to the port that you want to view the `dlprofviewer` server to run on. # # ```bash # dlprofviewer -b 0.0.0.0 --port 6666 Profile/N2_1/dlprof_dldb.sqlite # ``` # # Let us now open the Operatations table and view the operations summary. # For the DGX-1 cluster ,we get the following results from profiling. # # # Using GPU 0 & 3 # ![p2p_2_gpu_memcpy_nsys](images/0_3.png) # # Using GPU 1 & 7 # ![p2p_2_gpu_memcpy_nsys](images/2_7.png) # # We can notice the difference in average time taken by the NCCL calls has increase significantly in **case 2** , this is a much better respresentation to understand the time taken for the communication calls. # # Let us now try to understand how NCCL works and other options present in NCCL. # # NCCL # # The NVIDIA Collective Communication Library (NCCL) implements multi-GPU and multi-node communication primitives optimized for NVIDIA GPUs and Networking. NCCL provides routines such as `all-gather`, `all-reduce`, `broadcast`, `reduce`, `reduce-scatter` as well as point-to-point send and receive that are optimized to achieve high bandwidth and low latency over PCIe and NVLink high-speed interconnects within a node and over NVIDIA Mellanox Network across nodes. # # The Horovod framework also uses NCCL Collective communications to keep the all the GPUs in sync , we can then toggle P2P levels using Environment variables to manually switch between different communication protocols available. The complete list of Environment variables can be found # [here](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/env.html#nccl-p2p-disable) # # Let us now toggle Peer-to-peer levels using the `NCCL_P2P_LEVEL` environment variable. # # ```text # NCCL_P2P_LEVEL # (since 2.3.4) # # The NCCL_P2P_LEVEL variable allows the user to finely control when to use the peer to peer (P2P) transport between GPUs. The level defines the maximum distance between GPUs where NCCL will use the P2P transport. # # Values accepted # LOC or 0 : Never use P2P (always disabled) # # NVL : Use P2P when GPUs are connected through NVLink # # PIX or 1 : Use P2P when GPUs are on the same PCI switch. # # PXB or 2 : Use P2P when GPUs are connected through PCI switches (potentially multiple hops). # # PHB or 3, or 4 : Use P2P when GPUs are on the same NUMA node. Traffic will go through the CPU. # # SYS or 5 : Use P2P betweem NUMA nodes, potentially crossing the SMP interconnect (e.g. QPI/UPI). # ``` # # We have benchmarked for the case where we use NVLink and verified it through `NCCL_DEBUG` environment variable, let us now try two different settings and compare their throughputs. # # ### NCCL_P2P_LEVEL=0 or P2P Disabled # !NCCL_P2P_LEVEL=0 TF_CPP_MIN_LOG_LEVEL=3 NCCL_DEBUG=INFO CUDA_VISIBLE_DEVICES="0,3" horovodrun -np 2 --mpi-args="--oversubscribe" python3 ../source_code/N2/cnn_fmnist.py --batch-size=512 2> /dev/null # Output of running the command on DGX-1 : # # ```bash # NCCL INFO NCCL_P2P_LEVEL set by environment to LOC # NCCL INFO Trees [0] -1/-1/-1->1->0 [1] -1/-1/-1->1->0 # NCCL INFO Channel 00/02 : 0 1 # NCCL INFO Setting affinity for GPU 3 to 0fffff00,000fffff # NCCL INFO Channel 01/02 : 0 1 # NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 # NCCL INFO Setting affinity for GPU 0 to 0fffff00,000fffff # NCCL INFO Channel 00 : 1[b000] -> 0[6000] via direct shared memory # NCCL INFO Channel 00 : 0[6000] -> 1[b000] via direct shared memory # NCCL INFO Channel 01 : 1[b000] -> 0[6000] via direct shared memory # NCCL INFO Channel 01 : 0[6000] -> 1[b000] via direct shared memory # NCCL INFO Connected all rings # NCCL INFO Connected all trees # NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/512 # NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer # # Epoch 4/8 # Images/sec: 95033.4 # Epoch 5/8 # Images/sec: 94848.44 # Epoch 6/8 # Images/sec: 94289.97 # ``` # # ### NCCL_P2P_LEVEL=1 or P2P via PCIe # !NCCL_P2P_LEVEL=1 TF_CPP_MIN_LOG_LEVEL=3 NCCL_DEBUG=INFO CUDA_VISIBLE_DEVICES="0,3" horovodrun -np 2 --mpi-args="--oversubscribe" python3 ../source_code/N2/cnn_fmnist.py --batch-size=512 2> /dev/null # Output of running the command on DGX-1 : # # ```bash # NCCL INFO NCCL_P2P_LEVEL set by environment to PIX # NCCL INFO NCCL_P2P_LEVEL set by environment to PIX # NCCL INFO Trees [0] -1/-1/-1->1->0 [1] -1/-1/-1->1->0 [2] -1/-1/-1->1->0 [3] -1/-1/-1->1->0 # NCCL INFO Channel 00/04 : 0 1 # NCCL INFO Channel 01/04 : 0 1 # NCCL INFO Channel 02/04 : 0 1 # NCCL INFO Setting affinity for GPU 3 to 0fffff00,000fffff # NCCL INFO Channel 03/04 : 0 1 # NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 # NCCL INFO Setting affinity for GPU 0 to 0fffff00,000fffff # NCCL INFO Channel 00 : 1[b000] -> 0[6000] via P2P/IPC # NCCL INFO Channel 00 : 0[6000] -> 1[b000] via P2P/IPC # NCCL INFO Channel 01 : 1[b000] -> 0[6000] via P2P/IPC # NCCL INFO Channel 01 : 0[6000] -> 1[b000] via P2P/IPC # NCCL INFO Channel 02 : 1[b000] -> 0[6000] via P2P/IPC # NCCL INFO Channel 02 : 0[6000] -> 1[b000] via P2P/IPC # NCCL INFO Channel 03 : 1[b000] -> 0[6000] via P2P/IPC # NCCL INFO Channel 03 : 0[6000] -> 1[b000] via P2P/IPC # NCCL INFO Connected all rings # NCCL INFO Connected all trees # Epoch 4/8 # Images/sec: 96529.63 # Epoch 5/8 # Images/sec: 97288.7 # Epoch 6/8 # Images/sec: 97230.33 # Epoch 7/8 # Images/sec: 97701.72 # Epoch 8/8 # Images/sec: 97075.39 # ``` # We can summarise the results using the following table. # # |GPUs|Condition|Throughput| # |-|-|-| # |0,3|P2P via NVLink|~100000| # |0,3|P2P via PCIe|~97000| # |0,3|P2P Disabled|~95000| # # We now understand the role of communication and hardware configuration for training. In our case, we used a smaller model for quicker runtimes, this decrease in throughput due to communication is more pronounced when the data transfer size increases for larger models that typically require multi-node training, and in those cases, NVLink helps reduce the scaling efficiency gap as we scale further. # ### Benchmarking the system topology # # The above application is not very memory intensive as we mentioned earlier, this can also be verified using `dlprof` that most of the time in GPU is spent on computation. Therefore, to get a quantitative measure of latency and bandwidth impact due to topology, we run a micro-benchmark. # # **The p2pBandwidthLatencyTest micro-benchmark** # # p2pBandwidthLatencyTest is a part of [CUDA Samples GitHub repository](https://github.com/NVIDIA/cuda-samples) available to help CUDA developers. # # As the name suggests, this test measures the bandwidth and latency impact of P2P and underlying communication topology. Let's compile the benchmark: # !cd ../source_code/N2/Samples/p2pBandwidthLatencyTest/ && make clean && make # Now, let's run the benchmark: # !cd ../source_code/N2/Samples/p2pBandwidthLatencyTest/ && ./p2pBandwidthLatencyTest # The first part of the benchmark gives device information and P2P access available from each GPU (similar to `nvidia-smi topo -m` command). Next, the benchmark measures the unidirectional and bidirectional bandwidth and latency with P2P disabled and enabled. # # We share partial results obtained on running the command on DGX-1 : : # # ```bash # Bidirectional P2P=Disabled Bandwidth Matrix (GB/s) # D\D 0 1 2 3 4 5 6 7 # 0 783.95 9.56 14.43 14.46 14.47 14.24 14.51 14.43 # # Bidirectional P2P=Enabled Bandwidth Matrix (GB/s) # D\D 0 1 2 3 4 5 6 7 # 0 784.87 48.49 48.49 96.85 96.90 14.25 14.54 14.49 # # P2P=Disabled Latency Matrix (us) # GPU 0 1 2 3 4 5 6 7 # 0 1.78 17.52 16.41 16.43 17.35 16.88 17.34 16.85 # # P2P=Enabled Latency (P2P Writes) Matrix (us) # GPU 0 1 2 3 4 5 6 7 # 0 1.76 1.62 1.61 2.01 2.02 18.44 19.15 19.34 # ``` # # Our system is based on PCIe gen 3.0 with a peak maximum GPU-GPU PCIe banwidth of 15.75 GB/s. Let us analyze and understand these results: # # * GPU 0 and GPU 1/2: Connected by a single NVLink connection. By enabling P2P- # - Bandwidth reaches close to the maximum peak of 50 GB/s. # - Latency decreases by an order of magnitude. # * GPU 0 and GPU 3/4: Connected by a double NVLink connection. By enabling P2P- # - Bandwidth reaches close to the maximum peak of 100 GB/s. # - Latency decreases by an order of magnitude. # * GPU 0 and GPU 5/6/7: Connected by PCIe and SMP interconnect. By enabling P2P- # - Bandwidth is unchanged. # - Latency increases a marginally. # # Correlate these results with the communication topology that can be displayed by usng `nvidia-smi topo -m` command and the qualtitative table in the previous section. They should be consistent with one another. # # In general, we should try to set the GPUs in an application such that a GPU can share data with its neighbours using a high-bandwidth, low-latency communication topology. Enabling P2P, when possible, usually improves the performance by eliminating host staging. # **Now that we understand the role of system topology in distributed deep learning , let us now get hands-on with refractoring and scaling Deep learning models in the upcoming notebook.** # *** # # ## Licensing # # This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0). # <div> # <span style="float: left; width: 33%; text-align: left;"><a href="1.Introduction-to-Distributed-Deep-Learning.ipynb">Previous Notebook</a></span> # <span style="float: left; width: 33%; text-align: center;"> # <a href="1.Introduction-to-Distributed-Deep-Learning.ipynb">1</a> # <a >2</a> # <a href="3.Hands-on-Multi-GPU.ipynb">3</a> # <a href="4.Convergence.ipynb">4</a> # </span> # <span style="float: left; width: 33%; text-align: right;"><a href="3.Hands-on-Multi-GPU.ipynb">Next Notebook</a></span> # </div> # # <p> <center> <a href="../Start_Here.ipynb">Home Page</a> </center> </p> # #
ai/Distributed_Deep_Learning/English/python/jupyter_notebook/2.1.System-Topology.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import numpy as np import pandas as pd drug_df = pd.read_csv('DE1_0_2008_to_2010_Prescription_Drug_Events_Sample_1.csv') ben_df_08 = pd.read_csv('DE1_0_2008_Beneficiary_Summary_File_Sample_1.csv') ben_df_09 = pd.read_csv('DE1_0_2009_Beneficiary_Summary_File_Sample_1.csv') ben_df_10 = pd.read_csv('DE1_0_2010_Beneficiary_Summary_File_Sample_1.csv') in_claims_df = pd.read_csv('DE1_0_2008_to_2010_Inpatient_Claims_Sample_1.csv') out_claims_df = pd.read_csv('DE1_0_2008_to_2010_Outpatient_Claims_Sample_1.csv') ben_df_08['YEAR'] = 2008 ben_df_09['YEAR'] = 2009 ben_df_10['YEAR'] = 2010 ben_df = ben_df_08.append(ben_df_09) ben_df = ben_df.append(ben_df_10) print(ben_df_08.shape) print(ben_df_09.shape) print(ben_df_10.shape) print(ben_df.shape) top_drug_df =drug_df.groupby(['PROD_SRVC_ID']).size().reset_index(name = 'count').sort_values(by = 'count',ascending = False).head(100) selected_ids_df = pd.merge(left = drug_df, right = top_drug_df, how = 'inner', left_on = 'PROD_SRVC_ID', right_on = 'PROD_SRVC_ID') print(selected_ids_df.shape) print(selected_ids_df.head(10)) print(len(selected_ids_df.DESYNPUF_ID.unique())) selected_ben = selected_ids_df.DESYNPUF_ID.unique() ben_df.columns print(ben_df[ben_df['DESYNPUF_ID'].isin(selected_ben)].shape) print(len(ben_df.columns)) print(in_claims_df.shape) print(in_claims_df[in_claims_df.columns[:38]].shape) in_claims_df.columns out_claims_df['CLM_FROM_DT'].unique() out_claims_df.fillna(0, inplace = True) in_claims_df = in_claims_df[in_claims_df.columns[:36]] in_claims_df['IN_YEAR'] = in_claims_df['CLM_ADMSN_DT'].apply(lambda x : str(x)[:4]) in_claims_df['IN_YEAR'] = in_claims_df['IN_YEAR'].astype('int64') summary_in_claims = in_claims_df[in_claims_df[u'DESYNPUF_ID'].isin(selected_ben)] # out_claims_df=out_claims_df[out_claims_df.columns[:28]] # out_claims_df['OUT_YEAR'] = out_claims_df[u'CLM_FROM_DT'].apply(lambda x : str(x)[:4]) # out_claims_df['OUT_YEAR'] = out_claims_df['OUT_YEAR'].astype('int64') # summary_out_claims = out_claims_df[out_claims_df[u'DESYNPUF_ID'].isin(selected_ben)] ben_df_1 = pd.merge(left = ben_df, right = summary_in_claims, how = 'left', left_on = ['DESYNPUF_ID','YEAR'], right_on = ['DESYNPUF_ID','IN_YEAR']) print(ben_df_1.shape) # ben_df_2 = pd.merge(left = ben_df_1, right = summary_out_claims, how = 'inner', left_on =['DESYNPUF_ID','YEAR'], right_on = ['DESYNPUF_ID','OUT_YEAR']) # print(ben_df_2.shape) drug_df['D_YEAR'] = drug_df['SRVC_DT'].apply(lambda x : str(x)[:4]) drug_df['D_YEAR'] = drug_df['D_YEAR'].astype('int64') drug_df.head(10) ben_df_2 = pd.merge(left = ben_df_1, right =drug_df , how = 'left', left_on = ['DESYNPUF_ID','YEAR'], right_on = ['DESYNPUF_ID','D_YEAR']) print(ben_df_1.shape)
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Analytic PNS3 bounds for any number of strata (nz) # # example with 3 strata (3z) # this makes sure it starts looking for things from the JudeasRx folder down. import os import sys os.chdir('../') sys.path.insert(0,os.getcwd()) print(os.getcwd()) # + import pandas as pd import numpy as np from MultiBounder_ana import MultiBounder_ana # - # To show how to load Experimental and Observational data from a csv file, we first create a csv file using Pandas # + # o1b0 = input_probs[0] # o1b1 = input_probs[1] # px1 = input_probs[2] # e1b0 = input_probs[3] # e1b1 = input_probs[4] # pz = input_probs[5] # zname_to_input_probs['a'] = [ .5, .33, .62, .5, .5, .2] # zname_to_input_probs['b'] = [.37, .62, .71, .5, .5, .3] # zname_to_input_probs['c'] = [ .2, .5 , .7 , .1, .6, .5] # alp_y0_y1 = np.array([[.5, -.4], [.2, .1]]) o1b0 = [.5, .37, .2] o1b1 = [.33, .62, .5] px1 = [.62, .71, .7] e1b0 = [.5, .5, .1] e1b1 = [.5, .5, .6] pz = [.2, .3, .5] col_name_to_col_list = { 'o1b0': o1b0, 'o1b1': o1b1, 'px1': px1, 'e1b0': e1b0, 'e1b1': e1b1, 'pz': pz } df = pd.DataFrame(col_name_to_col_list, index=['a', 'b', 'c']) # print("aaaaaaaaaa\n", df) df.reset_index(inplace=True) df = df.rename(columns={'index': 'zname'}) # print("bbbbbbbbb\n", df) # saving the dataframe df.to_csv('jupyter_notebooks/3z_example.csv', index=False) # - # Next we plot the PNS3 bounds and both ATE (ATE and backdoor ATE). alp_y0_y1 = np.array([[.5, -.4], [.2, .1]]) mba = MultiBounder_ana.create_from_file('jupyter_notebooks/3z_example.csv', alp_y0_y1=alp_y0_y1, only_obs=False, exogeneity=False, strong_exo=False, monotonicity=False) print("------------------------------------") mba.print_both_ATE() mba.plot_bds() mba.plot_both_ATE()
jupyter_notebooks/analytic_pns3_bds_nz.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Random Forest # %load_ext autoreload # %autoreload 2 # + # Cargamos las librerías necesarias import math from functools import reduce import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.ensemble import RandomForestRegressor import eli5 from eli5.sklearn import PermutationImportance #import shap #from src.visualization.metrics_summary import cross_val_reg, gam_cross_val_reg, err_hist, pred_hist, plot_residuo, plot_corr_matrix import os # - os.chdir('..') # Cargamos datos con SQR perfiles_sqr = pd.read_parquet('data/processed/perfiles_sqr_filtrado.parquet') perfiles_sqr.reset_index(drop=True, inplace=True) # reseteamos el índice perfiles_sqr['IMC'] = perfiles_sqr['peso'] / (perfiles_sqr['altura']/100)**2 perfiles_sqr['IMC_cat'] = pd.cut(perfiles_sqr['IMC'], bins=[0, 25, 30, 50], include_lowest=True,labels=['Normal', 'Sobrepeso', 'Obesidad']) # + # Separamos las presiones para tenerlas como 6 variables cols = ['PresPos1', 'PresPos2', 'PresPos3', 'PresPos4', 'PresPos5', 'PresPos6'] rows = range(len(perfiles_sqr[['presiones']])) df_pres_split = pd.DataFrame(columns=cols, index=rows) for j in range(len(perfiles_sqr[['presiones']])): pres_j = perfiles_sqr.loc[j,'presiones'] pres_j_split = [pres_j[i:i + 1] for i in range(0, len(pres_j), 1)] df_pres_split.iloc[j, :] = pres_j_split # juntamos todo perfiles_sqr = pd.concat([perfiles_sqr, df_pres_split], axis=1) perfiles_sqr.head() # - ### Creamos cada uno de los dataframes datos1 = perfiles_sqr[(perfiles_sqr["sexo"] == 'Male') & (perfiles_sqr["posicion"] == 'Lateral') & (perfiles_sqr["IMC_cat"] == 'Normal')] datos2 = perfiles_sqr[(perfiles_sqr["sexo"] == 'Male') & (perfiles_sqr["posicion"] == 'Lateral') & (perfiles_sqr["IMC_cat"] == 'Overweight')] datos3 = perfiles_sqr[(perfiles_sqr["sexo"] == 'Male') & (perfiles_sqr["posicion"] == 'Lateral') & (perfiles_sqr["IMC_cat"] == 'Obese')] datos4 = perfiles_sqr[(perfiles_sqr["sexo"] == 'Male') & (perfiles_sqr["posicion"] == 'Supine') & (perfiles_sqr["IMC_cat"] == 'Normal')] datos5 = perfiles_sqr[(perfiles_sqr["sexo"] == 'Male') & (perfiles_sqr["posicion"] == 'Supine') & (perfiles_sqr["IMC_cat"] == 'Overweight')] datos6 = perfiles_sqr[(perfiles_sqr["sexo"] == 'Male') & (perfiles_sqr["posicion"] == 'Supine') & (perfiles_sqr["IMC_cat"] == 'Obese')] datos7 = perfiles_sqr[(perfiles_sqr["sexo"] == 'Female') & (perfiles_sqr["posicion"] == 'Lateral') & (perfiles_sqr["IMC_cat"] == 'Normal')] datos8 = perfiles_sqr[(perfiles_sqr["sexo"] == 'Female') & (perfiles_sqr["posicion"] == 'Lateral') & (perfiles_sqr["IMC_cat"] == 'Overweight')] datos9 = perfiles_sqr[(perfiles_sqr["sexo"] == 'Female') & (perfiles_sqr["posicion"] == 'Lateral') & (perfiles_sqr["IMC_cat"] == 'Obese')] datos10 = perfiles_sqr[(perfiles_sqr["sexo"] == 'Female') & (perfiles_sqr["posicion"] == 'Supine') & (perfiles_sqr["IMC_cat"] == 'Normal')] datos11 = perfiles_sqr[(perfiles_sqr["sexo"] == 'Female') & (perfiles_sqr["posicion"] == 'Supine') & (perfiles_sqr["IMC_cat"] == 'Overweight')] datos12 = perfiles_sqr[(perfiles_sqr["sexo"] == 'Female') & (perfiles_sqr["posicion"] == 'Supine') & (perfiles_sqr["IMC_cat"] == 'Obese')] # + ## Preparamos datasets para rf X1 = datos1[['PresPos1','PresPos2','PresPos3','PresPos4','PresPos5','PresPos6']] y1 = datos1['sqr'] X2 = datos2[['PresPos1','PresPos2','PresPos3','PresPos4','PresPos5','PresPos6']] y2 = datos2['sqr'] X3 = datos3[['PresPos1','PresPos2','PresPos3','PresPos4','PresPos5','PresPos6']] y3 = datos3['sqr'] X4 = datos4[['PresPos1','PresPos2','PresPos3','PresPos4','PresPos5','PresPos6']] y4 = datos4['sqr'] X5 = datos5[['PresPos1','PresPos2','PresPos3','PresPos4','PresPos5','PresPos6']] y5 = datos5['sqr'] X6 = datos6[['PresPos1','PresPos2','PresPos3','PresPos4','PresPos5','PresPos6']] y6 = datos6['sqr'] X7 = datos7[['PresPos1','PresPos2','PresPos3','PresPos4','PresPos5','PresPos6']] y7 = datos7['sqr'] X8 = datos8[['PresPos1','PresPos2','PresPos3','PresPos4','PresPos5','PresPos6']] y8 = datos8['sqr'] X9 = datos9[['PresPos1','PresPos2','PresPos3','PresPos4','PresPos5','PresPos6']] y9 = datos9['sqr'] X10 = datos10[['PresPos1','PresPos2','PresPos3','PresPos4','PresPos5','PresPos6']] y10 = datos10['sqr'] X11 = datos11[['PresPos1','PresPos2','PresPos3','PresPos4','PresPos5','PresPos6']] y11 = datos11['sqr'] X12 = datos12[['PresPos1','PresPos2','PresPos3','PresPos4','PresPos5','PresPos6']] y12 = datos12['sqr'] # - # ### Data1 # + ## Comenzamos a hacer rf params = {'bootstrap': True, 'criterion': 'mse', 'max_depth': 10, 'max_features': 'sqrt', 'max_leaf_nodes': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 300, 'n_jobs': -1, 'oob_score': False, 'random_state': 42, 'verbose': 0, 'warm_start': False} rfr = RandomForestRegressor(**params) rf1 = rfr.fit(X1, y1) # + #perm = PermutationImportance(rf1, random_state=42, n_iter=10).fit(X1, y1) #eli5.show_weights(perm, feature_names = X1.columns.tolist()) # + #rf1.feature_importances_ # - sorted_idx = rf1.feature_importances_.argsort() plt.barh(X1.columns[sorted_idx], rf1.feature_importances_[sorted_idx]) plt.xlabel("Importancia de las variables"); ## Mejora aa = pd.DataFrame(datos1.groupby('PresPos1').agg({'sqr': ['mean']})) maxim = aa[:].values.max() #mejora m1=100*(maxim-datos1['sqr'].mean())/datos1['sqr'].mean() m1 aa maxim # En la categoría Male, con posición lateral y un IMC normal, si el tubo 1 se pone al valor óptimo de presión 4, la media del SQR mejora en un 12.9%. # # ### Dataset2 # + ## Comenzamos a hacer rf params = {'bootstrap': True, 'criterion': 'mse', 'max_depth': 10, 'max_features': 'sqrt', 'max_leaf_nodes': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 300, 'n_jobs': -1, 'oob_score': False, 'random_state': 42, 'verbose': 0, 'warm_start': False} rfr = RandomForestRegressor(**params) rf2 = rfr.fit(X2, y2) # - rf2.feature_importances_ sorted_idx = rf2.feature_importances_.argsort() plt.barh(X2.columns[sorted_idx], rf2.feature_importances_[sorted_idx]) plt.xlabel("Random Forest Feature Importance") ## Mejora aa = pd.DataFrame(datos2.groupby('PresPos3').agg({'sqr': ['mean']})) #aa.reset_index(drop=True, inplace = True) maxim = aa[:].values.max() #mejora m2=100*(maxim-datos2['sqr'].mean())/datos2['sqr'].mean() m2 aa maxim # En esta categoría, si el tubo 3 se pone al valor óptimo de presión 6, la media del SQR mejora en un 9.15%. # ### Dataset3 # + ## Comenzamos a hacer rf params = {'bootstrap': True, 'criterion': 'mse', 'max_depth': 10, 'max_features': 'sqrt', 'max_leaf_nodes': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 300, 'n_jobs': -1, 'oob_score': False, 'random_state': 42, 'verbose': 0, 'warm_start': False} rfr = RandomForestRegressor(**params) rf3 = rfr.fit(X3, y3) # - rf3.feature_importances_ sorted_idx = rf3.feature_importances_.argsort() plt.barh(X3.columns[sorted_idx], rf3.feature_importances_[sorted_idx]) plt.xlabel("Random Forest Feature Importance") ## Mejora aa = pd.DataFrame(datos3.groupby('PresPos5').agg({'sqr': ['mean']})) #aa.reset_index(drop=True, inplace = True) maxim = aa[:].values.max() #mejora m3=100*(maxim-datos3['sqr'].mean())/datos3['sqr'].mean() m3 aa maxim # En esta categoría, si el tubo 5 se pone al valor óptimo de presión 3, la media del SQR mejora en un 12.47%. # ### Dataset4 # + ## Comenzamos a hacer rf params = {'bootstrap': True, 'criterion': 'mse', 'max_depth': 10, 'max_features': 'sqrt', 'max_leaf_nodes': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 300, 'n_jobs': -1, 'oob_score': False, 'random_state': 42, 'verbose': 0, 'warm_start': False} rfr = RandomForestRegressor(**params) rf4 = rfr.fit(X4, y4) # - rf4.feature_importances_ sorted_idx = rf4.feature_importances_.argsort() plt.barh(X4.columns[sorted_idx], rf4.feature_importances_[sorted_idx]) plt.xlabel("Random Forest Feature Importance") # + ## Mejora aa = pd.DataFrame(datos4.groupby('PresPos1').agg({'sqr': ['mean']})) #aa.reset_index(drop=True, inplace = True) maxim1 = aa[:].values.max() aa1 = pd.DataFrame(datos4.groupby('PresPos3').agg({'sqr': ['mean']})) #aa.reset_index(drop=True, inplace = True) maxim2 = aa1[:].values.max() maxim = np.array([maxim1,maxim2]).mean() #mejora m4=100*(maxim1-datos4['sqr'].mean())/datos4['sqr'].mean() m4 m4a=100*(maxim2-datos4['sqr'].mean())/datos4['sqr'].mean() m4a # - aa1 maxim2 aa maxim1 # En esta categoría, si el tubo 1 se pone al valor óptimo de presión 4, la media del SQR mejora en un 23.43%. Si el tubo 3 se pone al valor óptimo 5, la media del SQR mejora en un 21.04%. # ### Dataset5 # + ## Comenzamos a hacer rf params = {'bootstrap': True, 'criterion': 'mse', 'max_depth': 10, 'max_features': 'sqrt', 'max_leaf_nodes': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 300, 'n_jobs': -1, 'oob_score': False, 'random_state': 42, 'verbose': 0, 'warm_start': False} rfr = RandomForestRegressor(**params) rf5 = rfr.fit(X5, y5) # - rf5.feature_importances_ sorted_idx = rf5.feature_importances_.argsort() plt.barh(X5.columns[sorted_idx], rf5.feature_importances_[sorted_idx]) plt.xlabel("Random Forest Feature Importance") ## Mejora aa = pd.DataFrame(datos5.groupby('PresPos5').agg({'sqr': ['mean']})) #aa.reset_index(drop=True, inplace = True) maxim = aa[:].values.max() #mejora m5=100*(maxim-datos5['sqr'].mean())/datos5['sqr'].mean() m5 aa maxim # En esta categoría, si el tubo 5 se pone al valor óptimo de presión 1, la media del SQR mejora en un 17.64%. # ### Dataset6 # + ## Comenzamos a hacer rf params = {'bootstrap': True, 'criterion': 'mse', 'max_depth': 10, 'max_features': 'sqrt', 'max_leaf_nodes': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 300, 'n_jobs': -1, 'oob_score': False, 'random_state': 42, 'verbose': 0, 'warm_start': False} rfr = RandomForestRegressor(**params) rf6 = rfr.fit(X6, y6) # - rf6.feature_importances_ sorted_idx = rf6.feature_importances_.argsort() plt.barh(X6.columns[sorted_idx], rf6.feature_importances_[sorted_idx]) plt.xlabel("Random Forest Feature Importance") ## Mejora aa = pd.DataFrame(datos6.groupby('PresPos1').agg({'sqr': ['mean']})) #aa.reset_index(drop=True, inplace = True) maxim = aa[:].values.max() #mejora m6=100*(maxim-datos6['sqr'].mean())/datos6['sqr'].mean() m6 aa maxim # En esta categoría, si el tubo 1 se pone al valor óptimo de presión 2, la media del SQR mejora en un 10.95%. # ### Dataset7 # + ## Comenzamos a hacer rf params = {'bootstrap': True, 'criterion': 'mse', 'max_depth': 10, 'max_features': 'sqrt', 'max_leaf_nodes': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 300, 'n_jobs': -1, 'oob_score': False, 'random_state': 42, 'verbose': 0, 'warm_start': False} rfr = RandomForestRegressor(**params) rf7 = rfr.fit(X7, y7) # - rf7.feature_importances_ sorted_idx = rf7.feature_importances_.argsort() plt.barh(X7.columns[sorted_idx], rf7.feature_importances_[sorted_idx]) plt.xlabel("Random Forest Feature Importance") ## Mejora aa = pd.DataFrame(datos7.groupby('PresPos3').agg({'sqr': ['mean']})) #aa.reset_index(drop=True, inplace = True) maxim = aa[:].values.max() #mejora m7=100*(maxim-datos7['sqr'].mean())/datos7['sqr'].mean() m7 aa maxim # En esta categoría, si el tubo 3 se pone al valor óptimo de presión 4, la media del SQR mejora en un 15.35%. # ### Dataset8 # + ## Comenzamos a hacer rf params = {'bootstrap': True, 'criterion': 'mse', 'max_depth': 10, 'max_features': 'sqrt', 'max_leaf_nodes': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 300, 'n_jobs': -1, 'oob_score': False, 'random_state': 42, 'verbose': 0, 'warm_start': False} rfr = RandomForestRegressor(**params) rf8 = rfr.fit(X8, y8) # - rf8.feature_importances_ sorted_idx = rf8.feature_importances_.argsort() plt.barh(X8.columns[sorted_idx], rf8.feature_importances_[sorted_idx]) plt.xlabel("Random Forest Feature Importance") ## Mejora aa = pd.DataFrame(datos8.groupby('PresPos1').agg({'sqr': ['mean']})) #aa.reset_index(drop=True, inplace = True) maxim = aa[:].values.max() #mejora m8=100*(maxim-datos8['sqr'].mean())/datos8['sqr'].mean() m8 aa maxim # En esta categoría, si el tubo 1 se pone al valor óptimo de presión 3, la media del SQR mejora en un 14.62%. # ### Dataset9 # + ## Comenzamos a hacer rf params = {'bootstrap': True, 'criterion': 'mse', 'max_depth': 10, 'max_features': 'sqrt', 'max_leaf_nodes': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 300, 'n_jobs': -1, 'oob_score': False, 'random_state': 42, 'verbose': 0, 'warm_start': False} rfr = RandomForestRegressor(**params) rf9 = rfr.fit(X9, y9) # - rf9.feature_importances_ sorted_idx = rf9.feature_importances_.argsort() plt.barh(X9.columns[sorted_idx], rf9.feature_importances_[sorted_idx]) plt.xlabel("Random Forest Feature Importance") # + ## Mejora aa1 = pd.DataFrame(datos9.groupby('PresPos5').agg({'sqr': ['mean']})) #aa.reset_index(drop=True, inplace = True) maxim1 = aa1[:].values.max() aa2 = pd.DataFrame(datos9.groupby('PresPos1').agg({'sqr': ['mean']})) #aa.reset_index(drop=True, inplace = True) maxim2 = aa2[:].values.max() maxim = np.array([maxim1,maxim2]).mean() #mejora m9=100*(maxim-datos9['sqr'].mean())/datos9['sqr'].mean() m9 # - aa2 maxim2 aa1 maxim1 # En esta categoría, si el tubo 5 se pone al valor óptimo de presión 3, la media del SQR mejora en un 13.79%. Si el tubo 1 se pone al valor óptimo de presión 1, la media del SQR mejora en un 5.34%. # ### Dataset10 # + ## Comenzamos a hacer rf params = {'bootstrap': True, 'criterion': 'mse', 'max_depth': 10, 'max_features': 'sqrt', 'max_leaf_nodes': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 300, 'n_jobs': -1, 'oob_score': False, 'random_state': 42, 'verbose': 0, 'warm_start': False} rfr = RandomForestRegressor(**params) rf10 = rfr.fit(X10, y10) # - rf10.feature_importances_ sorted_idx = rf10.feature_importances_.argsort() plt.barh(X10.columns[sorted_idx], rf10.feature_importances_[sorted_idx]) plt.xlabel("Random Forest Feature Importance") ## Mejora aa = pd.DataFrame(datos10.groupby('PresPos5').agg({'sqr': ['mean']})) #aa.reset_index(drop=True, inplace = True) maxim = aa[:].values.max() #mejora m10=100*(maxim-datos10['sqr'].mean())/datos10['sqr'].mean() m10 aa maxim # En esta categoría, si el tubo 5 se pone al valor óptimo de presión 1, la media del SQR mejora en un 11.58%. # ### Dataset11 # + ## Comenzamos a hacer rf params = {'bootstrap': True, 'criterion': 'mse', 'max_depth': 10, 'max_features': 'sqrt', 'max_leaf_nodes': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 300, 'n_jobs': -1, 'oob_score': False, 'random_state': 42, 'verbose': 0, 'warm_start': False} rfr = RandomForestRegressor(**params) rf11 = rfr.fit(X11, y11) # - rf11.feature_importances_ sorted_idx = rf11.feature_importances_.argsort() plt.barh(X11.columns[sorted_idx], rf11.feature_importances_[sorted_idx]) plt.xlabel("Importancia de las variables"); ## Mejora aa = pd.DataFrame(datos11.groupby('PresPos4').agg({'sqr': ['mean']})) #aa.reset_index(drop=True, inplace = True) maxim = aa[:].values.max() #mejora m11=100*(maxim-datos11['sqr'].mean())/datos11['sqr'].mean() m11 aa maxim # En esta categoría, si el tubo 4 se pone al valor óptimo de presión 4, la media del SQR mejora en un 5.37%. # ### Dataset12 # + ## Comenzamos a hacer rf params = {'bootstrap': True, 'criterion': 'mse', 'max_depth': 10, 'max_features': 'sqrt', 'max_leaf_nodes': None, 'min_impurity_decrease': 0.0, 'min_impurity_split': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_estimators': 300, 'n_jobs': -1, 'oob_score': False, 'random_state': 42, 'verbose': 0, 'warm_start': False} rfr = RandomForestRegressor(**params) rf12 = rfr.fit(X12, y12) # - rf12.feature_importances_ sorted_idx = rf12.feature_importances_.argsort() plt.barh(X12.columns[sorted_idx], rf12.feature_importances_[sorted_idx]) plt.xlabel("Random Forest Feature Importance") ## Mejora aa = pd.DataFrame(datos12.groupby('PresPos5').agg({'sqr': ['mean']})) #aa.reset_index(drop=True, inplace = True) maxim = aa[:].values.max() #mejora m12=100*(maxim-datos12['sqr'].mean())/datos12['sqr'].mean() m12 aa maxim # En esta categoría, si el tubo 5 se pone al valor óptimo de presión 3, la media del SQR mejora en un 7.35%.
notebooks/RF_categorias.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Homework 7 # ## Due Date: Wednesday, October 25th at 11:59 PM # # Problem 1: Linked List Class # Write a linked list class called `LinkedList`. Remember, a singly linked list is made up of nodes each of which contain a value and a pointer. The first node is called the "head node". # # Here are the required methods: # * `__init__(self, head)` where `head` is the value of the head node. You could make the head node an attribute. # * `__len__(self)`: Returns the number of elements in the linked list. # * `__getitem__(self, index)` returns the value of the node corresponding to `index`. Include checks to make sure that `index` is not out of range and that the user is not trying to index and empty list. # * `__repr__(self)` returns `LinkedList(head_node)`. # * `insert_front(self, element)` inserts a new node with value `element` at the beginning of the list. # * `insert_back(self, element)` inserts a new node with value `element` at the end of the list. # # Note: An alternative implementation is to create a `Node` class. You are not required to make a `Node` class but you may if you prefer that implementation. Please don't steal that implementation from the online forums. I've seen those too. # + # The Node class for nodes in the LinkedList class Node: def __init__(self, val): self.val = val self.next = None def __repr__(self): return "Node({})".format(self.val) # The LinkedList class class LinkedList: def __init__(self, head): self.head_node = Node(head) self.size = 1 def __len__(self): return self.size def __getitem__(self, index): if index >= self.size or index < 0: raise IndexError('LinkedList Index Out of Bound.') p = self.head_node idx = 0 while idx != index: idx += 1 p = p.next return p.val def __repr__(self): return "LinkedList({})".format(self.head_node.val) def insert_front(self, element): new_head = Node(element) new_head.next = self.head_node self.head_node = new_head self.size += 1 def insert_back(self, element): p = self.head_node while p.next != None: p = p.next p.next = Node(element) self.size += 1 # + my_linked_list = LinkedList(1) my_linked_list.insert_front(0) my_linked_list.insert_back(2) my_linked_list.insert_front(-1) my_linked_list.insert_back(100) print('Length of my_linked_list: {}'.format(len(my_linked_list))) for v in my_linked_list: print(v) # - repr(my_linked_list) # + ll = LinkedList(5) ll.insert_back(6) ll.insert_back(7) ll_from_repr = eval(repr(ll)) # Make another LinkedList with eval(repr(ll)) ll_from_repr.insert_front(4) ll_from_repr.insert_back(6) print('Length of ll_from_repr: {}'.format(len(ll_from_repr))) # - # # Problem 2: Binary Tree Class # A binary search tree is a binary tree with the invariant that for any particular node the left child is smaller and the right child is larger. Create the class `BinaryTree` with the following specifications: # # `__init__(self)`: Constructor takes no additional arguments # # `insert(self, val)`: This method will insert `val` into the tree # # (Optional) `remove(self, val)`: This will remove `val` from the tree. # 1. If the node to be deleted has no children then just remove it. # 2. If the node to be deleted has only one child, remove the node and replace it with its child. # 3. If the node to be deleted has two children, replace the node to be deleted with the maximum value in the left subtree. Finally, delete the node with the maximum value in the left-subtree. # # `getValues(self. depth)`: Return a list of the entire row of nodes at the specified depth with `None` at the index if there is no value in the tree. The length of the list should therefore be $2^{\text{depth}}$. # Here is a sample output: # # ```python # bt = BinaryTree() # arr = [20, 10, 17, 14, 3, 0] # for i in arr: # bt.insert(i) # # print("Height of binary tree is {}.\n".format(len(bt))) # for i in range(len(bt)): # print("Level {0} values: {1}".format(i, bt.getValues(i))) # ``` # # ``` # Height of binary tree is 4. # # Level 0 values: [20] # Level 1 values: [10, None] # Level 2 values: [3, 17, None, None] # Level 3 values: [0, None, 14, None, None, None, None, None] # ``` # # Note that you do not need to format your output in this way. Nor are you required to implement a `__len__` method to compute the height of the tree. I did this because it was convenient for illustration purposes. This example is simply meant to show you some output at each level of the tree. # ## Note of Reference # **I referred to Chapter 3 - 12 Binary Search Tree of the book "Introduction to Algorithms 3-ED". ** # http://ressources.unisciel.fr/algoprog/s00aaroot/aa00module1/res/%5BCormen-AL2011%5DIntroduction_To_Algorithms-A3.pdf # + import warnings # The BinaryNode class for nodes in the BinaryTree class BinaryNode: def __init__(self, val): self.val = val self.p = None self.left = None self.right = None def __repr__(self): return "BinaryNode({})".format(self.val) def count_child(self): # count the number of children of this node if self.left == None and self.right == None: return 0 elif self.left != None and self.right != None: return 2 else: return 1 # The BinaryTree class class BinaryTree: def __init__(self): self.root = None def __repr__(self): return "BinaryTree()" # The height of the BinaryTree def __len__(self): return self.maxDepth(self.root) # The height of the BinaryTree def maxDepth(self, root): if root == None: return 0 else: return max(self.maxDepth(root.left), self.maxDepth(root.right))+1 # Insert def insert(self, val): bi_node = BinaryNode(val) # create a new BinaryNode for the value to be inserted if self.root == None: # if the tree is empty, we just need to insert it at root self.root = bi_node return current_node = self.root # walk thru the tree to find the right position to insert while current_node != None: current_p = current_node if val > current_node.val: current_node = current_node.right else: current_node = current_node.left if val > current_p.val: current_p.right = bi_node # is a right child else: current_p.left = bi_node # is a left child bi_node.p = current_p # set parent # Print out nodes sorted ascendingly def inOrderWalk(self, node): if node != None: self.inOrderWalk(node.left) print(node.val) self.inOrderWalk(node.right) # Delete the nodes with 'None' as value def clearNoneNodes(self, node): if node != None: if node.val == 'None': if node == node.p.right: node.p.right = None else: node.p.left = None self.clearNoneNodes(node.left) self.clearNoneNodes(node.right) # GetValues: calling getValuesNode(self.root, 0, depth, values) def getValues(self, depth): values = [] self.getValuesNode(self.root, 0, depth, values) self.clearNoneNodes(self.root) return values # GetValues from the subtree rooted at node, store in values def getValuesNode(self, node, current_depth, depth, values): if node != None: if current_depth == depth: values.append(node.val) else: if node.left == None: none_node = BinaryNode('None') none_node.p = node node.left = none_node if node.right == None: none_node = BinaryNode('None') none_node.p = node node.right = none_node self.getValuesNode(node.left, current_depth+1, depth, values) self.getValuesNode(node.right, current_depth+1, depth, values) # Return the right-most node from the subtree rooted at node def tree_max(self, node): while node.right != None: node = node.right return node # Return the predecessor of a certain node (Not Used in the HW) def tree_predecessor(self, node): if node.left != None: return self.tree_max(node.left) parent = node.p while parent != None and node == parent.left: node = parent parent = parent.p return parent # Replace the subtree rooted at u with the subtree rooted at v def transplant(self, u, v): if u.p == None: self.root = v elif u == u.p.left: u.p.left = v else: u.p.right = v if v != None: v.p = u.p # Search for the value=key thru the subtree rooted at node def search(self, node, key): while node != None and key != node.val: if key > node.val: node = node.right else: node = node.left return node # Remove def remove(self, val): rm_node = self.search(self.root, val) if rm_node == None: # invalid remove node warnings.warn('The value to be removed does not has a node associated.') return if rm_node.left == None: self.transplant(rm_node, rm_node.right) elif rm_node.right == None: self.transplant(rm_node, rm_node.left) else: left_max = self.tree_max(rm_node.left) if left_max.p != rm_node: self.transplant(left_max, left_max.left) left_max.left = rm_node.left left_max.left.p = left_max self.transplant(rm_node, left_max) left_max.right = rm_node.right left_max.right.p = left_max # - tree1 = BinaryTree() arr1 = [20, 10, 17, 14, 3, 0] for a1 in arr1: tree1.insert(a1) tree1 tree1.root # + print('Height of tree1: ', len(tree1)) for i in range(len(tree1)): print('Level %d values: ' % i, tree1.getValues(i)) tree1.inOrderWalk(tree1.root) # + print('Remove 17') tree1.remove(17) print('Height of tree1: ', len(tree1)) for i in range(len(tree1)): print('Level %d values: ' % i, tree1.getValues(i)) tree1.inOrderWalk(tree1.root) # + print('Remove 0') tree1.remove(0) print('Height of tree1: ', len(tree1)) for i in range(len(tree1)): print('Level %d values: ' % i, tree1.getValues(i)) tree1.inOrderWalk(tree1.root) # + print('Remove 10') tree1.remove(10) print('Height of tree1: ', len(tree1)) for i in range(len(tree1)): print('Level %d values: ' % i, tree1.getValues(i)) tree1.inOrderWalk(tree1.root) # + print('Remove 20') tree1.remove(20) print('Height of tree1: ', len(tree1)) for i in range(len(tree1)): print('Level %d values: ' % i, tree1.getValues(i)) tree1.inOrderWalk(tree1.root) # + tree2 = BinaryTree() arr2 = [13, 7, 19, 17, 3, 29, 5, 31, 2, 11] for a2 in arr2: tree2.insert(a2) print('Height of tree2: ', len(tree2)) for i in range(len(tree2)): print('Level %d values: ' % i, tree2.getValues(i)) tree2.inOrderWalk(tree2.root) # + print('Remove 13') tree2.remove(13) print('Height of tree2: ', len(tree2)) for i in range(len(tree2)): print('Level %d values: ' % i, tree2.getValues(i)) tree2.inOrderWalk(tree2.root) # - # # Problem 3: Peer Evaluations # Evaluate the members of your group for Milestone 1. Please follow the instructions in the provided survey. The survey can be found here: [Milestone 1 Peer Evaluation](https://harvard.az1.qualtrics.com/jfe/form/SV_0JnuXbE5QjLCrKB). # # Problem 4: Course Evaluation # Please take the [Course Evaluation](https://docs.google.com/forms/d/e/1FAIpQLSdDyrtf_aByU4xNeLMSmDrFCJ2OLDrK1Q7ZoeTd2Whf_cdRrw/viewform?usp=sf_link).
Homework/HW7/HW7-final.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # # !pip install sqlalchemy # # !pip install pymysql # - from sqlalchemy import create_engine import pymysql import pandas as pd db_connection_str = 'mysql+pymysql://root:tianshan@localhost/demo' db_connection = create_engine(db_connection_str) # + # query = "select * from demo.bankdata" query = "SELECT `Business Level 2`, `Business Sub Level 3`, `Risk Type`,`Date`, sum(CVaR) as `CVaR Sum`, null as 'Difference in %', null as 'Difference in figure', null as 'Alert' FROM demo.bankdata group by `Business Level 2`, `Business Sub Level 3`, `Risk Type`, `Date` order by `Business Level 2`, `Business Sub Level 3`, `Risk Type`, `Date`;" # pd.set_option('display.max_rows', df.shape[0]+1) df = pd.read_sql(query, con=db_connection) df # + # pd.Series(df.iloc[1, 0:3] == df.iloc[2, 0:3]).any() for index, row in df.iterrows(): if index > 0: if pd.Series(df.iloc[index, 0:3] == df.iloc[index-1, 0:3]).all(): diff_figure = abs(df.iloc[index, 4] - df.iloc[index-1, 4]) diff_prec = abs(((df.iloc[index, 4] - df.iloc[index-1, 4]) / df.iloc[index, 4]) * 100.0) df.loc[index, 'Difference in %'] = diff_prec df.loc[index, 'Difference in figure'] = diff_figure if (abs(df.loc[index, 'Difference in %']) > 10 and abs(df.loc[index, 'Difference in figure'])) > 100000: df.loc[index, 'Alert'] = 'WARNING' df # - df.loc[df['Alert'] == 'WARNING'] db_connection.dispose()
casestudy/case3/scripts/bankdboperates3.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.7.1 # language: julia # name: julia-1.7 # --- # + kgrid2d(Nx, Ny=Nx) = ( [ kx for kx in kgrid(Nx), ky in 1:2Ny ], [ ky for kx in 1:2Nx, ky in kgrid(Ny) ] ) # + N = 64, f = (x, y) -> exp(-3(cos(x)sin(y))) - exp(-3(sin(x)cos(y))) F̂ = triginterp2d(f, N) Kx, Ky = kgrid2d(N) L̂ = (Kx.^2 + Ky.^2).^2 L̂[1] = 1 Û = F̂ ./ L̂ Û[1] = 0 U = real.(ifft(Û) * (2N)^2) x = xgrid(N) contourf(x, x, U, size = (300,300), colorbar=false) # - err(N) = trigerr(f, triginterp2d(f, N), M) plot(NN, err.(NN), lw=2, ms=4, m=:o, label = "error", yscale = :log10, size = (300, 250), xlabel = L"N", ylabel = L"\Vert f - I_N f \Vert_\infty") α = asinh(1 / sqrt(10)) plot!(NN[5:end], 2 * exp.( - α * NN[5:end]), c=:black, lw=2, ls=:dash, label = L"\exp(-\alpha N)")
nb/SpectralMethods2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES']='1' # + import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import random import torchvision.datasets as datasets import torchvision.transforms as transforms import torchvision import torch.optim as optim from torchvision import datasets, transforms from torch.autograd import Variable from torchvision.utils import save_image # + class Flatten(nn.Module): def forward(self, input): return input.view(input.size(0), -1) class UnFlatten(nn.Module): def forward(self, input, size=100): return input.view(input.size(0), size, 1, 1) # - class VAE(nn.Module): def __init__(self, x_dim, h_dim1, h_dim2, z_dim): super(VAE, self).__init__() # encoder part self.fc1 = nn.Linear(x_dim, h_dim1) self.fc2 = nn.Linear(h_dim1, h_dim2) self.fc31 = nn.Linear(h_dim2, z_dim) self.fc32 = nn.Linear(h_dim2, z_dim) # decoder part self.fc4 = nn.Linear(z_dim, h_dim2) self.fc5 = nn.Linear(h_dim2, h_dim1) self.fc6 = nn.Linear(h_dim1, x_dim) def encoder(self, x): h = F.relu(self.fc1(x)) h = F.relu(self.fc2(h)) return self.fc31(h), self.fc32(h) # mu, log_var def sampling(self, mu, log_var): std = torch.exp(0.5*log_var) eps = torch.randn_like(std) return eps.mul(std).add_(mu) # return z sample def decoder(self, z): h = F.relu(self.fc4(z)) h = F.relu(self.fc5(h)) return F.sigmoid(self.fc6(h)) def forward(self, x): mu, log_var = self.encoder(x.view(-1, 784)) z = self.sampling(mu, log_var) return self.decoder(z), mu, log_var # + train_dataset = datasets.MNIST(root='./mnist_data/', train=True, transform=transforms.ToTensor(), download=True) test_dataset = datasets.MNIST(root='./mnist_data/', train=False, transform=transforms.ToTensor(), download=False) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=128, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=128, shuffle=False) # - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") vae = VAE(x_dim=784, h_dim1= 512, h_dim2=256, z_dim=2) if torch.cuda.is_available(): vae.cuda() optimizer = optim.Adam(vae.parameters()) def loss_function(recon_x, x, mu, log_var): BCE = F.binary_cross_entropy(recon_x, x.view(-1, 784), reduction='sum') KLD = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp()) return BCE + 10 * KLD def train(epoch): vae.train() train_loss = 0 for batch_idx, (data, _) in enumerate(train_loader): data = data.cuda() optimizer.zero_grad() recon_batch, mu, log_var = vae(data) loss = loss_function(recon_batch, data, mu, log_var) loss.backward() train_loss += loss.item() optimizer.step() if batch_idx % 100 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item() / len(data))) print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss / len(train_loader.dataset))) def test(): vae.eval() test_loss= 0 with torch.no_grad(): for data, _ in test_loader: data = data.cuda() recon, mu, log_var = vae(data) # sum up batch loss test_loss += loss_function(recon, data, mu, log_var).item() test_loss /= len(test_loader.dataset) print('====> Test set loss: {:.4f}'.format(test_loss)) # + from tqdm.autonotebook import tqdm for epoch in tqdm(range(1, 51)): train(epoch) test() # + with torch.no_grad(): # z = torch.randn(64, 2).cuda() x = np.linspace(-1, 1, 21) z = [] for i in x: for j in x: z.append([j, -i]) z = torch.tensor(z).cuda() # print (z.shape) sample = vae.decoder(z.float()).cuda() save_image(sample.view(21*21, 1, 28, 28), './samples/sample_1' + '.png', nrow=21) # - z.dtype
AI502-TA/HW4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # (DPOCFC)= # # 3.1 Definición de problemas de optimización, conjuntos y funciones convexas # ```{admonition} Notas para contenedor de docker: # # Comando de docker para ejecución de la nota de forma local: # # nota: cambiar `<ruta a mi directorio>` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker y `<versión imagen de docker>` por la versión más actualizada que se presenta en la documentación. # # `docker run --rm -v <ruta a mi directorio>:/datos --name jupyterlab_optimizacion -p 8888:8888 -d palmoreck/jupyterlab_optimizacion:<versión imagen de docker>` # # password para jupyterlab: `<PASSWORD>` # # Detener el contenedor de docker: # # `docker stop jupyterlab_optimizacion` # # Documentación de la imagen de docker `palmoreck/jupyterlab_optimizacion:<versión imagen de docker>` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab/optimizacion). # # ``` # --- # Nota generada a partir de [liga1](https://www.dropbox.com/s/qb3swgkpaps7yba/4.1.Introduccion_optimizacion_convexa.pdf?dl=0), [liga2](https://www.dropbox.com/s/6isby5h1e5f2yzs/4.2.Problemas_de_optimizacion_convexa.pdf?dl=0), [liga3](https://www.dropbox.com/s/ko86cce1olbtsbk/4.3.1.Teoria_de_convexidad_Conjuntos_convexos.pdf?dl=0), [liga4](https://www.dropbox.com/s/mmd1uzvwhdwsyiu/4.3.2.Teoria_de_convexidad_Funciones_convexas.pdf?dl=0), [liga5](https://drive.google.com/file/d/1xtkxPCx05Xg4Dj7JZoQ-LusBDrtYUqOF/view), [liga6](https://drive.google.com/file/d/16-_PvWNaO0Zc9x04-SRsxCRdn5fxebf2/view). # ```{admonition} Al final de esta nota la comunidad lectora: # :class: tip # # * Conocerá la definición de un problema de optimización, algunos ejemplos, definiciones y resultados que serán utilizados en los métodos para resolver problemas de optimización con énfasis en funciones convexas. # # * Tendrá una lista ejemplo de funciones convexas utilizadas en aplicaciones. # # ``` # ## ¿Problemas de optimización numérica? # Una gran cantidad de aplicaciones plantean problemas de optimización. Tenemos problemas básicos que se presentan en cursos iniciales de cálculo: # # *Una caja con base y tapa cuadradas debe tener un volumen de $100 cm^3$. Encuentre las dimensiones de la caja que minimicen la cantidad de material.* # # Y tenemos más especializados que encontramos en áreas como Estadística, Ingeniería, Finanzas o Aprendizaje de Máquina, *Machine Learning*: # # * Ajustar un modelo de regresión lineal a un conjunto de datos. # # * Buscar la mejor forma de invertir un capital en un conjunto de activos. # # * Elección del ancho y largo de un dispositivo en un circuito electrónico. # # * Ajustar un modelo que clasifique un conjunto de datos. # En general un problema de optimización matemática o numérica tiene la forma: # # $$\displaystyle \min f_o(x)$$ # $$\text{sujeto a:} f_i(x) \leq b_i, i=1,\dots, m$$ # # donde: $x=(x_1,x_2,\dots, x_n)^T$ es la **variable de optimización del problema**, la función $f_o: \mathbb{R}^{n} \rightarrow \mathbb{R}$ es la **función objetivo**, las funciones $f_i: \mathbb{R}^n \rightarrow \mathbb{R}, i=1,\dots,m$ son las **funciones de restricción** (aquí se colocan únicamente desigualdades pero pueden ser sólo igualdades o bien una combinación de ellas) y las constantes $b_1,b_2,\dots, b_m$ son los **límites o cotas de las restricciones**. # Un vector $x^* \in \mathbb{R}^n$ es nombrado **óptimo** o solución del problema anterior si tiene el valor más pequeño de entre todos los vectores $x \in \mathbb{R}^n$ que satisfacen las restricciones. Por ejemplo, si $z \in \mathbb{R}^n$ satisface $f_1(z) \leq b_1, f_2(z) \leq b_2, \dots, f_m(z) \leq b_m$ y $x^*$ es óptimo entonces $f_o(z) \geq f_o(x^*)$. # ```{margin} # # A grandes rasgos dos problemas de optimización son equivalentes si con la solución de uno de ellos se obtiene la solución del otro y viceversa. # # ``` # ```{admonition} Comentarios # # * Se consideran funciones objetivo $f_o: \mathbb{R}^n \rightarrow \mathbb{R}$, sin embargo, hay formulaciones que utilizan $f_o: \mathbb{R}^n \rightarrow \mathbb{R}^q$. Tales formulaciones pueden hallarlas en la optimización multicriterio, multiobjetivo, vectorial o también nombrada Pareto, ver [Multi objective optimization](https://en.wikipedia.org/wiki/Multi-objective_optimization). # # * El problema de optimización definido utiliza una forma de minimización y no de maximización. Típicamente en la literatura por convención se consideran problemas de este tipo. Además minimizar $f_o$ y maximizar $-f_o$ son **problemas de optimización equivalentes**. # # ``` # ### Ejemplo # $$\displaystyle \min_{x \in \mathbb{R}^n} ||x||_2$$ # $$\text{sujeto a:} Ax \leq b$$ # # # con $A \in \mathbb{R}^{m \times n}, b \in \mathbb{R}^m$. En este problema buscamos el vector $x$ que es solución del problema $Ax \leq b$ con **mínima norma Euclidiana**. La función objetivo es $f_o(x)=||x||_2$, las funciones de restricción son las desigualdades lineales $f_i(x) = a_i^Tx \leq b_i$ con $a_i$ $i$-ésimo renglón de $A$ y $b_i$ $i$-ésima componente de $b$, $\forall i=1,\dots,m$. # # ```{admonition} Comentario # # Un problema similar (sólo modificando desigualdad por igualdad) lo encontramos al resolver un sistema de ecuaciones lineales $Ax=b$ *underdetermined* en el que $m < n$ y se busca el vector $x$ con mínima norma Euclidiana que satisfaga tal sistema. Este sistema puede tener infinitas soluciones o ninguna solución. # # ``` # ### Ejemplo # Encuentra el punto en la gráfica de $y=x^2$ que es más cercano al punto $P=(1,0)$ bajo la norma Euclidiana. # Deseamos minimizar la cantidad $||(1,0)-(x,y)||_2$. Además $y = y(x)$ por lo que definiendo la función objetivo $f_o(x) = ||(1,0)-(x,x^2)||_2=||(1-x,-x^2)||_2=\sqrt{(1-x)^2+x^4}$, el problema de optimización (sin restricciones) es: # # $$\displaystyle \min_{x \in \text{dom}f_o}\sqrt{(1-x)^2+x^4}$$ # ## Optimización numérica en ciencia de datos # La ciencia de datos apunta al desarrollo de técnicas y se apoya de aplicaciones de *machine learning* para la extracción de conocimiento útil tomando como fuente de información las grandes cantidades de datos. Algunas de las aplicaciones son: # # * Clasificación de documentos o textos: detección de *spam*. # # * [Procesamiento de lenguaje natural](https://en.wikipedia.org/wiki/Natural_language_processing): [named-entity recognition](https://en.wikipedia.org/wiki/Named-entity_recognition). # # * [Reconocimiento de voz](https://en.wikipedia.org/wiki/Speech_recognition). # # * [Visión por computadora](https://en.wikipedia.org/wiki/Computer_vision): reconocimiento de rostros o imágenes. # # * Detección de fraude. # # * [Reconocimiento de patrones](https://en.wikipedia.org/wiki/Pattern_recognition). # # * Diagnóstico médico. # # * [Sistemas de recomendación](https://en.wikipedia.org/wiki/Recommender_system). # Las aplicaciones anteriores involucran problemas como son: # # * Clasificación. # # * Regresión. # # * *Ranking*. # # * *Clustering*. # # * Reducción de la dimensionalidad. # ### Optimización numérica y *machine learning* # En cada una de las aplicaciones o problemas anteriores se utilizan **funciones de pérdida** que guían el proceso de aprendizaje. Tal proceso involucra **optimización parámetros** de la función de pérdida. Por ejemplo, si la función de pérdida en un problema de regresión es una pérdida cuadrática $\mathcal{L}(y,\hat{y}) = (\hat{y}-y)^2$ con $\hat{y} = \hat{\beta}_0 + \hat{\beta_1}x$, entonces el vector de parámetros a optimizar (aprender) es $\beta= \left[ \begin{array}{c} \beta_0\\ \beta_1 \end{array} \right]$. # ```{sidebar} Un poco de historia... # # La IA o Inteligencia Artificial es una rama de la Ciencia de la Computación que atrajo un gran interés en 1950. # # Colloquially, the term artificial intelligence is often used to describe machines (or computers) that mimic “cognitive” functions that humans associate with the human mind, such as learning and problem solving ([<NAME>, <NAME>, 1995](https://en.wikipedia.org/wiki/Artificial_Intelligence:_A_Modern_Approach)) # ``` # *Machine learning* no sólo se apoya de la optimización pues es un área de Inteligencia Artificial que utiliza técnicas estadísticas para el diseño de sistemas capaces de aplicaciones como las escritas anteriormente, de modo que hoy en día tenemos *statistical machine learning*. No obstante, uno de los **pilares** de *machine learning* o *statistical machine learning* es la optimización. # *Machine learning* o *statistical machine learning* se apoya de las formulaciones y algoritmos en optimización. Sin embargo, también ha contribuido a ésta área desarrollando nuevos enfoques en los métodos o algoritmos para el tratamiento de grandes cantidades de datos o *big data* y estableciendo retos significativos no presentes en problemas clásicos de optimización. De hecho, al revisar literatura que intersecta estas dos disciplinas encontramos comunidades científicas que desarrollan o utilizan métodos o algoritmos exactos (ver [Exact algorithm](https://en.wikipedia.org/wiki/Exact_algorithm)) y otras que utilizan métodos de optimización estocástica (ver [Stochastic optimization](https://en.wikipedia.org/wiki/Stochastic_optimization) y [Stochastic approximation](https://en.wikipedia.org/wiki/Stochastic_approximation)) basados en métodos o algoritmos aproximados (ver [Approximation algorithm](https://en.wikipedia.org/wiki/Approximation_algorithm)). Hoy en día es común encontrar estudios que hacen referencia a **modelos o métodos de aprendizaje**. # ```{admonition} Observación # :class: tip # # Como ejemplo de lo anterior considérese la técnica de [**regularización**](https://en.wikipedia.org/wiki/Regularization_(mathematics)) que en *machine learning* se utiliza para encontrar soluciones que generalicen y provean una explicación no compleja del fenómeno en estudio. # # La regularización sigue el principio de la navaja de Occam, ver [Occam's razor](https://en.wikipedia.org/wiki/Occam%27s_razor): para cualquier conjunto de observaciones en general se prefieren explicaciones simples a explicaciones más complicadas. Aunque la técnica de regularización es conocida en optimización, han sido varias las aplicaciones de *machine learning* las que la han posicionado como clave. # # ``` # ### Del *small scale* al *large scale machine learning* # ```{sidebar} Un poco de historia... # # Un ejemplo de esto se observa en métodos de optimización desarrollados en la década de los $50$'s. Mientras que métodos tradicionales en optimización basados en el cálculo del gradiente y la Hessiana de una función son efectivos para problemas de aprendizaje *small-scale* (en los que utilizamos un enfoque en ***batch*** o por lote), en el contexto del aprendizaje *large-scale*, el **método de gradiente estocástico** se posicionó en el centro de discusiones a inicios del siglo XXI. # # El método de gradiente estocástico fue propuesto por <NAME> Monro en 1951, es un **algoritmo estocástico**. Ver [Stochastic gradient descent](https://en.wikipedia.org/wiki/Stochastic_gradient_descent). # # ``` # El inicio del siglo XXI estuvo marcado, entre otros temas, por un incremento significativo en la generación de información. Esto puede contrastarse con el desarrollo de los procesadores de las máquinas, el cual tuvo un menor avance en el incremento del *performance* al del siglo XX. Asimismo, las mejoras en dispositivos de almacenamiento o *storage* abarató costos de almacenamiento y mejoras en sistemas de *networking* permitieron la transmisión de la información más eficiente. En este contexto, los modelos y métodos de *statistical machine learning* se vieron limitados por el tiempo de cómputo y no por el tamaño de muestra. La conclusión de esto fue una inclinación en la comunidad científica por el diseño o uso de métodos o modelos para procesar grandes cantidades de datos usando recursos computacionales comparativamente menores. # ## ¿Optimización numérica convexa? # Aplicaciones de *machine learning* conducen al planteamiento de problemas de optimización convexa y no convexa. Por ejemplo en la aplicación de clasificación de textos, en donde se desea asignar un texto a clases definidas de acuerdo a su contenido (determinar si un documento de texto es sobre un tema), puede formularse un problema convexo a partir de una **función de pérdida convexa**. # ```{sidebar} Un poco de historia... # # Los tipos de redes neuronales profundas, *deep neural networks*, que han sido mayormente usadas a inicios del siglo XXI son las mismas que las que eran populares en los años $90$'s. El éxito de éstos tipos y su uso primordialmente se debe a la disponibilidad de *larger datasets* y mayores recursos computacionales. # # ``` # Como ejemplos de aplicaciones en la **optimización no convexa** están el reconocimiento de voz y reconocimiento de imágenes. El uso de [redes neuronales](https://en.wikipedia.org/wiki/Artificial_neural_network) [profundas](https://en.wikipedia.org/wiki/Deep_learning) ha tenido muy buen desempeño en tales aplicaciones haciendo uso de cómputo en la GPU, ver [ImageNet Classification with Deep Convolutional Neural Networks](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf), [2012: A Breakthrough Year for Deep Learning](https://medium.com/limitlessai/2012-a-breakthrough-year-for-deep-learning-2a31a6796e73). En este caso se utilizan **funciones objetivo no lineales y no convexas**. # ```{admonition} Comentarios # # * Desde los $40$'s se han desarrollado algoritmos para resolver problemas de optimización, se han analizado sus propiedades y se han desarrollado buenas implementaciones de software. Sin embargo, una clase de problemas de optimización en los que encontramos métodos **efectivos** son los convexos. # # * Métodos para optimización no convexa utilizan parte de la teoría de convexidad desarrollada en optimización convexa. Además, un buen número de problemas de aprendizaje utilizan funciones de pérdida convexas. # # ``` # (PESTOPT)= # ## Problema estándar de optimización # En lo que continúa se considera $f_0 = f_o$ (el subíndice "0" y el subíndice "o" son iguales) # ```{admonition} Definición # # Un problema estándar de optimización es: # # $$\displaystyle \min f_o(x)$$ # # $$\text{sujeto a:}$$ # # $$f_i(x) \leq 0, \quad \forall i=1,\dots,m$$ # # $$h_i(x) = 0, \quad \forall i=1,\dots,p$$ # # con $x=(x_1,x_2,\dots, x_n)^T$ es la **variable de optimización del problema**, $f_o: \mathbb{R}^n \rightarrow \mathbb{R}$ es la **función objetivo**, $f_i: \mathbb{R}^n \rightarrow \mathbb{R}$ $\forall i=1,\dots,m$ son las **restricciones de desigualdad**, $h_i: \mathbb{R}^n \rightarrow \mathbb{R}$, $\forall i=1,\dots,p$ son las **restricciones de igualdad**. # # ``` # ## Dominio del problema de optimización y puntos factibles # ```{admonition} Definiciones # # * El conjunto de puntos para los que la función objetivo y las funciones de restricción $f_i, h_i$ están definidas se nombra **dominio del problema de optimización**, esto es: # # $$\mathcal{D} = \bigcap_{i=0}^m\text{dom}f_i \cap \bigcap_{i=1}^p\text{dom}h_i.$$ # # # * Un punto $x \in \mathcal{D}$ se nombra **factible** si satisface las restricciones de igualdad y desigualdad. El conjunto de puntos factibles se nombra **conjunto de factibilidad**. # # * El {ref}`problema estándar de optimización <PESTOPT>` se nombra **problema de optimización factible** si existe **al menos un punto factible**, si no entonces es infactible. # # ``` # ## Valor óptimo del problema de optimización # ```{margin} # # Se asumen todos los puntos en el dominio del problema de optimización $\mathcal{D}$. # # ``` # ```{admonition} Definición # # El valor óptimo del problema se denota como $p^*$. En notación matemática es: # # $$p^* = \inf\{f_o(x) | f_i(x) \leq 0, \forall i=1,\dots,m, h_i(x) = 0 \forall i=1,\dots,p\}$$ # # ``` # ```{admonition} Comentarios # # * Si el problema es **infactible** entonces $p^* = \infty$. # # * Si $\exists x_k$ factible tal que $f_o(x_k) \rightarrow -\infty$ para $k \rightarrow \infty$ entonces $p^*=-\infty$ y se nombra **problema de optimización no acotado por debajo**. # # ``` # (POPTPROBOPT)= # ## Punto óptimo del problema de optimización # ```{margin} # # Se asumen todos los puntos en el dominio del problema de optimización $\mathcal{D}$. # # ``` # ```{admonition} Definición # # $x^*$ es **punto óptimo** si es factible y $f_o(x^*) = p^*$. # # El conjunto de óptimos se nombra **conjunto óptimo** y se denota: # # $$X_{\text{opt}} = \{x | f_i(x) \leq 0 \forall i=1,\dots,m, h_i(x) =0 \forall i=1,\dots,p, f_o(x) = p^*\}$$ # # ``` # ```{admonition} Comentarios # # * La propiedad de un punto óptimo $x^*$ es que si $z$ satisface las restricciones $f_i(z) \leq 0$ $\forall i=1,...,m$, $h_i(z)=0$ $\forall i=1,..,p$ se tiene: $f_o(x^*) \leq f_o(z)$. Es **óptimo estricto** si $z$ satisface las restricciones y $f_o(x^*) < f_o(z)$. # # * Si existe un punto óptimo se dice que el **valor óptimo se alcanza** y por tanto el problema de optimización tiene solución, es **soluble o *solvable***. # # * Si $X_{\text{opt}} = \emptyset$ se dice que el valor óptimo no se alcanza. Obsérvese que para problemas no acotados nunca se alcanza el valor óptimo. # # * Si $x$ es factible y $f_o(x) \leq p^* + \epsilon$ con $\epsilon >0$, $x$ se nombra **$\epsilon$-subóptimo** y el conjunto de puntos $\epsilon$-subóptimos se nombra **conjunto $\epsilon$-subóptimo**. # # ``` # ## Óptimo local # ```{margin} # # Se asumen todos los puntos en el dominio del problema de optimización $\mathcal{D}$. # # ``` # ```{admonition} Definición # # Un punto factible $x$ se nombra **óptimo local** si $\exists R > 0$ tal que: # # $$f_o(x) = \inf \{f_o(z) | f_i(z) \leq 0 \forall i=1,\dots,m, h_i(z) = 0 \forall i=1,\dots, p, ||z-x||_2 \leq R\}.$$ # # Así, $x$ resuelve: # # $$\displaystyle \min f_o(z)$$ # # $$\text{sujeto a:}$$ # # $$f_i(z) \leq 0, \forall i =1,\dots,m$$ # # $$h_i(z) =0, \forall i=1,\dots,p$$ # # $$||z-x||_2 \leq R$$ # # ``` # ```{admonition} Observación # :class: tip # # La palabra **óptimo** se utiliza para **óptimo global**, esto es, no consideramos la última restricción $||z-x||_2 \leq R$ en el problema de optimización y exploramos en todo el $\text{dom}f$. # # ``` # <img src="https://dl.dropboxusercontent.com/s/xyprhh7erbb6icb/min-max-points-example.png?dl=0" heigth="700" width="700"> # # ```{admonition} Observación # :class: tip # # Es común referirse al conjunto de mínimos y máximos como puntos extremos de una función. # # ``` # ## Restricciones activas, no activas y redundantes # ```{margin} # # Se asumen todos los puntos en el dominio del problema de optimización $\mathcal{D}$. # # ``` # ```{admonition} Definición # # Si $x$ es factible y $f_i(x)=0$ entonces la restricción de desigualdad $f_i(x) \leq 0$ se nombra **restricción activa en $x$**. Se nombra **inactiva en $x$** si $f_i(x) <0$ para alguna $i=1,\dots ,m$. # ``` # ```{admonition} Comentarios # # * Las restricciones de igualdad, $h_i(x)$, siempre son activas en el conjunto factible con $i=1,\dots ,p$. # # * Una restricción se nombra **restricción redundante** si al quitarla el conjunto factible no se modifica. # # (PROBOPTCONVEST)= # ## Problemas de optimización convexa en su forma estándar o canónica # ```{margin} # # Se asumen todos los puntos en el dominio del problema de optimización $\mathcal{D}$. # # ``` # ```{margin} # # Recuerda que una función afín es de la forma $h(x) = Ax+b$ con $A \in \mathbb{R}^{p \times n}$ y $b \in \mathbb{R}^p$. En la definición $h_i(x) = a_i^Tx-b_i$ con $a_i \in \mathbb{R}^n$, $b_i \in \mathbb{R}$ $\forall i=1,\dots,p$ y geométricamente $h_i(x)$ es un **hiperplano** en $\mathbb{R}^n$. # # ``` # ```{admonition} Definición # # Se define un problema de optimización convexa en su forma estándar o canónica como: # # $$\displaystyle \min f_o(x)$$ # # $$\text{sujeto a:}$$ # # $$f_i(x) \leq 0 , i=1,\dots,m$$ # # $$h_i(x)=0, i=1,\dots,p$$ # # donde: $f_i$ son **convexas** $\forall i=0,1,\dots,m$ y $h_i$ **es afín** $\forall i =1,\dots,p$. # # # ``` # ```{margin} # # Un conjunto $\alpha$-subnivel es de la forma $\{x \in \text{dom}f | f(x) \leq \alpha\}$. Un conjunto subnivel contiene las curvas de nivel de $f$, ver [Level set](https://en.wikipedia.org/wiki/Level_set): # # <img src="https://dl.dropboxusercontent.com/s/0woqoj8foo5eco9/level_set_of_func.png?dl=0" heigth="300" width="300"> # # ``` # ```{admonition} Comentarios # # * El conjunto de factibilidad de un problema de optimización convexa es un conjunto convexo. Esto se sigue pues es una intersección finita de conjuntos convexos: intersección entre las $x$'s que satisfacen $f_i(x) \leq 0$, $i=1,\dots ,m$, que se nombra **conjunto subnivel**, y las $x$'s que están en un hiperplano, esto es, que satisfacen $h_i(x) = 0$, $i=1,\dots ,p$. # # # * Si en el problema anterior se tiene que **maximizar** una $f_o$ función objetivo **cóncava** y se tienen misma forma estándar: $f_i$ convexa, $h_i$ afín entonces también se nombra al problema como **problema de optimización convexa**. Todos los resultados, conclusiones y algoritmos desarrollados para los problemas de minimización son aplicables para maximización. En este caso se puede resolver un problema de maximización al minimizar la función objetivo $-f_o$ que es convexa. # # ``` # ## Función convexa # ```{admonition} Definición # # Sea $f:\mathbb{R}^n \rightarrow \mathbb{R}$ una función con el conjunto $\text{dom}f$ convexo. $f$ se nombra convexa (en su $\text{dom}f$) si $\forall x,y \in \text{dom}f$ y $\theta \in [0,1]$ se cumple: # # $$f(\theta x + (1-\theta) y) \leq \theta f(x) + (1-\theta)f(y).$$ # # Si la desigualdad se cumple de forma estricta $\forall x \neq y$ $f$ se nombra **estrictamente convexa**. # # ``` # ```{admonition} Observaciones # :class: tip # # * La convexidad de $f$ se define para $\text{dom}f$ aunque para casos en particular se detalla el conjunto en el que $f$ es convexa. # # * La desigualdad que define a funciones convexas se nombra [**desigualdad de Jensen**](https://en.wikipedia.org/wiki/Jensen%27s_inequality). # # ``` # ### Propiedades # Entre las propiedades que tiene una función convexa se encuentran las siguientes: # # * Si $f$ es convexa el conjunto subnivel es un conjunto convexo. # # * $\text{dom}f$ es convexo $\therefore$ $\theta x + (1-\theta)y \in \text{dom}f$ # # # * $f$ es **cóncava** si $-f$ es convexa y **estrictamente cóncava** si $-f$ es estrictamente convexa. Otra forma de definir concavidad es con una desigualdad del tipo: # # $$f(\theta x + (1-\theta) y) \geq \theta f(x) + (1-\theta)f(y).$$ # # y mismas definiciones para $x,y, \theta$ que en la definición de convexidad. # # * Si $f$ es convexa, geométricamente el segmento de línea que se forma con los puntos $(x,f(x)), (y,f(y))$ está por encima o es igual a $f(\theta x + (1-\theta)y) \forall \theta \in [0,1]$ y $\forall x,y \in \text{dom}f$: # # <img src="https://dl.dropboxusercontent.com/s/fdcx1k150nfwykv/draw_convexity_for_functions.png?dl=0" heigth="300" width="300"> # # ## Conjuntos convexos # ### Línea y segmentos de línea # ```{admonition} Definición # # Sean $x_1, x_2 \in \mathbb{R}^n$ con $x_1 \neq x_2$. Entonces el punto: # # $$y = \theta x_1 + (1-\theta)x_2$$ # # con $\theta \in \mathbb{R}$ se encuentra en la línea que pasa por $x_1$ y $x_2$. $\theta$ se le nombra parámetro y si $\theta \in [0,1]$ tenemos un segmento de línea: # # <img src="https://dl.dropboxusercontent.com/s/dldljf5igy8xt9d/segmento_linea.png?dl=0" heigth="200" width="200"> # # ``` # ```{admonition} Comentarios # # * $y = \theta x_1 + (1-\theta)x_2 = x_2 + \theta(x_1 -x_2)$ y esta última igualdad se interpreta como "$y$ es la suma del punto base $x_2$ y la dirección $x_1-x_2$ escalada por $\theta$". # # * Si $\theta=0$ entonces $y=x_2$. Si $\theta \in [0,1]$ entonces $y$ se "mueve" en la dirección $x_1-x_2$ hacia $x_1$ y si $\theta>1$ entonces $y$ se encuentra en la línea "más allá" de $x_1$: # # <img src="https://dl.dropboxusercontent.com/s/nbahrio7p1mj4hs/segmento_linea_2.png?dl=0" heigth="350" width="350"> # # # El punto entre $x_1$ y $x_2$ tiene $\theta=\frac{1}{2}$. # ``` # ### Conjunto convexo # ```{admonition} Definición # # Un conjunto $\mathcal{C}$ es convexo si el segmento de línea entre cualquier par de puntos de $\mathcal{C}$ está completamente contenida en $\mathcal{C}$. Esto se escribe matemáticamente como: # # $$\theta x_1 + (1-\theta) x_2 \in \mathcal{C} \quad \forall \theta \in [0,1], \forall x_1, x_2 \in \mathcal{C}.$$ # # ``` # Ejemplos gráficos de conjuntos convexos: # <img src="https://dl.dropboxusercontent.com/s/gj54ism1lqojot6/ej_conj_convexos.png?dl=0" heigth="400" width="400"> # Ejemplos gráficos de conjuntos no convexos: # <img src="https://dl.dropboxusercontent.com/s/k37zh5v3iq3kx04/ej_conj_no_convexos.png?dl=0" heigth="350" width="350"> # ```{admonition} Comentarios # # # * El punto $\displaystyle \sum_{i=1}^k \theta_i x_i$ con $\displaystyle \sum_{i=1}^k \theta_i=1$, $\theta_i \geq 0 \forall i=1,\dots,k$ se nombra **combinación convexa** de los puntos $x_1, x_2, \dots, x_k$. Una combinación convexa de los puntos $x_1, \dots, x_k$ puede pensarse como una mezcla o promedio ponderado de los puntos, con $\theta_i$ la fracción $\theta_i$ de $x_i$ en la mezcla. # # * Un conjunto es convexo si y sólo si contiene cualquier combinación convexa de sus puntos. # # * El conjunto óptimo y los conjuntos $\epsilon$-subóptimos son convexos. Ver definiciones de conjunto óptimo y $\epsilon$-subóptimos en {ref}`punto óptimo del problema de optimización<POPTPROBOPT>`. # # ``` # ## Ejemplos de funciones convexas y cóncavas # * Una función afín es convexa y cóncava en todo su dominio: $f(x) = Ax+b$ con $A \in \mathbb{R}^{m \times n}, b \in \mathbb{R}^m$, $\text{dom}f = \mathbb{R}^n$. # # ```{admonition} Observación # :class: tip # # Por tanto las funciones lineales también son convexas y cóncavas. # # ``` # ```{margin} # # Recuérdese que los conjuntos de matrices que se utilizan para definir a matrices simétricas semidefinidas positivas y simétricas definidas positivas son $\mathbb{S}_{+}^n$ y $\mathbb{S}_{++}^n$ respectivamente ($\mathbb{S}$ es el conjunto de matrices simétricas). # # ``` # * Funciones cuadráticas: $f: \mathbb{R}^n \rightarrow \mathbb{R}$, $f(x) = \frac{1}{2} x^TPx + q^Tx + r$ son convexas en su dominio $\mathbb{R}^n$ si $P \in \mathbb{S}_+^n, q \in \mathbb{R}^n, r \in \mathbb{R}$ con $\mathbb{S}_+^n$ conjunto de **matrices simétricas positivas semidefinidas**. # # ```{admonition} Observación # :class: tip # # Observa que por este punto la norma $2$ o Euclidiana es una función convexa en $\mathbb{R}^n$. # # ``` # ```{margin} # # Recuérdese que el producto $x^T Ax$ con $A$ simétrica se le nombra forma cuadrática y es un número en $\mathbb{R}$. # # ``` # ```{admonition} Definición # # $x^TPx$ con $P \in \mathbb{S}^{n}_+$ se nombra forma cuadrática semidefinida positiva. # # ``` # ```{admonition} Comentario # # La función $f(x) = \frac{1}{2} x^TPx + q^Tx + r$ es estrictamente convexa si y sólo si $P \in \mathbb{S}_{++}^n$. $f$ es cóncava si y sólo si $P \in -\mathbb{S}_+^n$. # # ``` # * Exponenciales: $f: \mathbb{R} \rightarrow \mathbb{R}$, $f(x) = e^{ax}$ para cualquier $a \in \mathbb{R}$ es convexa en su dominio $\mathbb{R}$. # # * Potencias: $f: \mathbb{R} \rightarrow \mathbb{R}$, $f(x)=x^a$: # # * Si $a \geq 1$ o $a \leq 0$ entonces $f$ es convexa en $\mathbb{R}_{++}$ (números reales positivos). # * Si $0 \leq a \leq 1$ entonces $f$ es cóncava en $\mathbb{R}_{++}$. # * Potencias del valor absoluto: $f: \mathbb{R} \rightarrow \mathbb{R}$, $f(x)=|x|^p$ con $p \geq 1$ es convexa en $\mathbb{R}$. # # * Logaritmo: $f: \mathbb{R} \rightarrow \mathbb{R}$, $f(x) = \log(x)$ es cóncava en su dominio: $\mathbb{R}_{++}$. # # * Entropía negativa: $f(x) = \begin{cases} # x\log(x) &\text{ si } x > 0 ,\\ # 0 &\text{ si } x = 0 # \end{cases}$ es estrictamente convexa en su dominio $\mathbb{R}_+$. # # * Normas: cualquier norma es convexa en su dominio. # # * Función máximo: $f: \mathbb{R}^{n} \rightarrow \mathbb{R}$, $f(x) = \max\{x_1,\dots,x_n\}$ es convexa. # # * Función log-sum-exp: $f: \mathbb{R}^{n} \rightarrow \mathbb{R}$, $f(x)=\log\left(\displaystyle \sum_{i=1}^ne^{x_i}\right)$ es convexa en su dominio $\mathbb{R}^n$. # # * La media geométrica: $f: \mathbb{R}^{n} \rightarrow \mathbb{R}$, $f(x) = \left(\displaystyle \prod_{i=1}^n x_i \right)^\frac{1}{n}$ es cóncava en su dominio $\mathbb{R}_{++}^n$. # # * Función log-determinante: $f: \mathbb{S}^{n} \rightarrow \mathbb{R}^n$, $f(x) = \log(\det(X))$ es cóncava en su dominio $\mathbb{S}_{++}^n$. # (RESUT)= # ## Resultados útiles # ```{margin} # # Se sugiere revisar {ref}`definición de función, continuidad y derivada <FCD>` y {ref}`condición de un problema y estabilidad de un algoritmo <CPEA>` como recordatorio de definiciones. En particular las **definiciones de primera y segunda derivada, gradiente y Hessiana** para la primer nota y la **definición de número de condición de una matriz** para la segunda. # ``` # ### Sobre funciones convexas/cóncavas # # * Sea $f: \mathbb{R}^n \rightarrow \mathbb{R}$ diferenciable entonces $f$ es convexa si y sólo si $\text{dom}f$ es un conjunto convexo y se cumple: # # $$f(y) \geq f(x) + \nabla f(x)^T(y-x) \forall x,y \in \text{dom}f.$$ # # Si se cumple de forma estricta la desigualdad $f$ se nombra estrictamente convexa. También si su $\text{dom}f$ es convexo y se tiene la desigualdad en la otra dirección "$\leq$" entonces $f$ es cóncava. # # Geométricamente este resultado se ve como sigue para $\nabla f(x) \neq 0$: # # <img src="https://dl.dropboxusercontent.com/s/e581e22xeejdwu0/convexidad_con_hiperplano_de_soporte.png?dl=0" heigth="350" width="350"> # # # y el hiperplano $f(x) + \nabla f(x)^T(y-x)$ se nombra **hiperplano de soporte para la función $f$ en el punto $(x,f(x))$**. Obsérvese que si $\nabla f(x)=0$ se tiene $f(y) \geq f(x) \forall y \in \text{dom}f$ y por lo tanto $x$ es un mínimo global de $f$. # * Una función es convexa si y sólo si es convexa al restringirla a cualquier línea que intersecte su dominio, esto es, si $g(t) = f(x + tv)$ es convexa $\forall x,v \in \mathbb{R}^n$, $\forall t \in \mathbb{R}$ talque $x + tv \in \text{dom}f$ # * Sea $f: \mathbb{R}^n \rightarrow \mathbb{R}$ tal que $f \in \mathcal{C}^2(\text{dom}f)$. Entonces $f$ es convexa en $\text{dom}f$ si y sólo si $\text{dom}f$ es convexo y $\nabla^2f(x) \in \mathbb{S}^n_+$ en $\text{dom}f$. Si $\nabla^2f(x) \in \mathbb{S}^n_{++}$ en $\text{dom}f$ y $\text{dom}f$ es convexo entonces $f$ es estrictamente convexa en $\text{dom}f$. # ```{admonition} Comentario # # Para una función: $f: \mathbb{R} \rightarrow \mathbb{R}$, la hipótesis del enunciado anterior ($\nabla^2 f(x) \in \mathbb{S}^n_{++}$ en $\text{dom}f$) es que la segunda derivada sea positiva. El recíproco no es verdadero, para ver esto considérese $f(x)=x^4$ la cual es estrictamente convexa en $\text{dom}f$ pero su segunda derivada en $0$ no es positiva. # # ``` # (SPOPT)= # ### Sobre problemas de optimización # # Para **problemas de optimización sin restricciones**: # # # * **Condición necesaria de primer orden:** si $f_o$ es diferenciable y $x^*$ es óptimo entonces $\nabla f_o(x^*) = 0$. # # * **Condición necesaria de segundo orden:** si $f_o \in \mathcal{C}^2(\text{domf})$ y $x^*$ es mínimo local entonces $\nabla^2 f_o(x^*) \in \mathbb{S}^n_{+}$ # # * **Condición suficiente de segundo orden:** si $f_o \in \mathcal{C}^2(\text{domf})$, $\nabla f_o(x)=0$ y $\nabla^2f_o(x) \in \mathbb{S}^n_{++}$ entonces $x$ es mínimo local estricto. # # # ```{admonition} Comentario # # Las condiciones anteriores se les conoce con el nombre de **condiciones de optimalidad** para problemas de optimización sin restricciones. # # ``` # ### Sobre problemas de optimización convexa # # # * Una propiedad fundamental de un óptimo local en un problema de optimización convexa es que también es un óptimo global. Si la función es estrictamente convexa entonces el conjunto óptimo contiene a lo más un punto. # # * Si $f_o$ es diferenciable y $X$ es el conjunto de factibilidad entonces $x$ es óptimo si y sólo si $x \in X$ y $\nabla f_o(x)^T(y-x) \geq 0$ $\forall y \in X$. Si se considera como conjunto de factibilidad $X = \text{dom}f_o$ (que es un problema sin restricciones) la propiedad se reduce a la **condición necesaria y suficiente de primer orden**: $x$ es óptimo si y sólo si $\nabla f_o(x) = 0$. # # Geométricamente el resultado anterior se visualiza para $\nabla f_o(x) \neq 0$ y $-\nabla f_o(x)$ apuntando hacia la dirección dibujada: # # <img src="https://dl.dropboxusercontent.com/s/0tmpivvo5ob4oox/optimo_convexidad_con_hiperplano_de_soporte.png?dl=0" heigth="550" width="550"> # # ```{admonition} Comentario # # Por los resultados anteriores los métodos de optimización buscan resolver la **ecuación no lineal** $\nabla f_o(x)=0$ para aproximar en general mínimos locales. Dependiendo del número de soluciones de la ecuación $\nabla f_o(x)=0$ se tienen situaciones distintas. Por ejemplo, si no tiene solución entonces el/los óptimos no se alcanza(n) pues el problema puede no ser acotado por debajo o si existe el óptimo éste puede no alcanzarse. Por otro lado, si la ecuación tiene múltiples soluciones entonces cada solución es un mínimo de $f_o$. # ``` # (SPCRITICOS)= # ### Sobre puntos críticos # ```{admonition} Definición # # Puntos $x \in \text{intdom}f$ en los que $\nabla f(x) = 0$ o en los que $\nabla f$ no existe, se les nombra **puntos críticos o estacionarios** de $f$. # # ``` # * No todo punto crítico es un extremo de $f$. # # * La Hessiana de $f$ nos ayuda a caracterizar los puntos críticos en mínimos o máximos locales. Si $x \in \mathbb{R}^n$ es punto crítico: # # * Y además $\nabla^2f(x) \in \mathbb{S}_{++}$ entonces $x$ es mínimo local. # * Y además $\nabla^2f(x) \in -\mathbb{S}_{++}$ entonces $x$ es máximo local. # * Y además $\nabla^2f(x)$ es indefinida entonces $x$ se nombra punto silla o [*saddle point*](https://en.wikipedia.org/wiki/Saddle_point). # # * Si $x \in \mathbb{R}^n$ es punto crítico y $\nabla^2f(x) \in \mathbb{S}_{+}$ no podemos concluir si es máximo o mínimo local (análogo si $\nabla^2f(x) \in -\mathbb{S}_{+}$). # # # ```{admonition} Definición # # Una matriz es indefinida si tiene eigenvalores positivos, negativos y cero. # # ``` # ## Función fuertemente convexa # ```{admonition} Definición # # Una función $f:\mathbb{R}^n \rightarrow \mathbb{R}$ tal que $f \in \mathcal{C}^2(\text{dom}f)$ se nombra **fuertemente convexa** en el conjunto convexo $\mathcal{S} \neq \emptyset$ si existe $m>0$ tal que $\nabla^2 f(x) - mI$ es simétrica semidefinida positiva $\forall x \in \mathcal{S}$. # # ``` # ```{admonition} Comentario # # Es equivalente escribir que una función $f$ es fuertemente convexa en un conjunto $\mathcal{S}$ que escribir $\nabla^2 f(x)$ es definida positiva para toda $x \in \mathcal{S}$. # # ``` # (RESFFUERTCON)= # ### Algunos resultados que son posibles probar para funciones fuertemente convexas # Si una función es fuertemente convexa se puede probar que: # # * El conjunto óptimo contiene a lo más un punto. # # * $f(y) \geq f(x) + \nabla f(x)^T(y-x) + \frac{m}{2}||y-x||_2^2 \forall x,y \in \mathcal{S}$, $m > 0$. Por esto si $f$ es fuertemente convexa en $\mathcal{S}$ entonces es estrictamente convexa en $\mathcal{S}$. También esta desigualdad indica que la diferencia entre la función de $y$, $f(y)$, y la función lineal en $y$ $f(x) + \nabla f(x)^T(y-x)$ (Taylor a primer orden) está acotada por debajo por una cantidad cuadrática. # # * Existe una cota superior para el **número de condición** bajo la norma 2 de la Hessiana de $f$, esto es: $\text{cond}(\nabla ^2 f(x))= \frac{\lambda_\text{max}(\nabla^2 f(x))}{\lambda_\text{min}(\nabla^2 f(x))} \leq K$ con $K>0$, $\forall x \in \mathcal{S}$. # # * La propiedad que una función sea fuertemente convexa garantiza que el número de condición de la Hessiana de $f$ es una buena medida del desempeño de los algoritmos de optimización convexa sin restricciones (se revisará más adelante). # # ```{admonition} Observación # :class: tip # # Si $f$ es fuertemente convexa en $\mathcal{S}$ entonces es estrictamente convexa en $\mathcal{S}$ pero no viceversa, considérese por ejemplo $f(x)=x^4$ la cual es estrictamente convexa en todo su dominio pero no es fuertemente convexa en todo su dominio pues su segunda derivada se anula en $x=0$. # # ``` # **Preguntas de comprehensión.** # # 0)Revisar el siguiente video: [<NAME>'s talk at NIPS](https://www.youtube.com/watch?v=Qi1Yry33TQE) de la plática de [<NAME>](https://twitter.com/alirahimi0) y la respuesta de [<NAME>](https://twitter.com/ylecun): [My take on Ali Rahimi's "Test of Time" award talk at NIPS](https://www2.isye.gatech.edu/~tzhao80/Yann_Response.pdf). # # 1)Detalla qué es un problema de optimización matemática y describe sus elementos. # # 2)¿Qué forma tiene un problema estándar de optimización? # # 3)¿Por qué se consideran problemas de minimización en la forma estándar y no los de maximización? # # 4)¿Qué propiedad cumple un punto que es óptimo para un problema de minimización? # # 5)¿Qué propiedad debe satisfacer una función para que se le llame convexa? # # 6)¿Qué forma tiene un problema convexo estándar con igualdades y desigualdades? # # 7)¿Qué es un conjunto convexo? # # 8)Da ejemplos de conjuntos convexos. # # 9)¿Qué es una combinación convexa? # # 10)Escribe equivalencias para definir funciones convexas. # # 11)¿Qué es una función cóncava? # # 12)Escribe ejemplos de funciones convexas. # # 13)¿Qué es una función estrictamente convexa? # # 14)Escribe resultados útiles respecto a problemas de optimización, optimización convexos y puntos críticos. # # 15)¿Qué es una función fuertemente convexa? # **Referencias:** # # 1. <NAME>, <NAME>, Convex Optimization, Cambridge University Press, 2009. #
libro_optimizacion/temas/3.optimizacion_convexa/3.1/Definicion_de_problema_optimizacion_conjuntos_y_funciones_convexas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Computing heatmaps for saliency methods # + import tensorflow as tf from tensorflow import keras import matplotlib import matplotlib.pyplot as plt plt.rc('image', cmap='Purples') import numpy as np from keras.utils import np_utils from keras.backend.tensorflow_backend import set_session, clear_session from scripts.analyzers import run_interpretation_methods from scripts.models import create_model_llr, train_model import pickle as pkl import warnings import os # - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) print(tf.__version__) print(keras.__version__) print(np.__version__) print(matplotlib.__version__) warnings.filterwarnings("ignore") result_dir = '../results/saliency_methods' # ## File path file_path = '../data/data_vary_signal_exact_2021-04-27-21-29-44_pattern_type_5.pkl' # + [markdown] pycharm={"name": "#%% md\n"} # ## Load data # - def test_saved_data(data_path): with open(data_path, 'rb') as f: data = pkl.load(f) return data keys = ['0.00_0.50_0.50', '0.02_0.49_0.49', '0.04_0.48_0.48', '0.06_0.47_0.47', '0.08_0.46_0.46'] # keys = ['0.00_0.50_0.50', '0.04_0.48_0.48', '0.08_0.46_0.46', '0.12_0.44_0.44', '0.16_0.42_0.42'] methods_params = [('gradient', {}), ('deep_taylor', {}), ('lrp.z', {}), ('lrp.alpha_beta', {'alpha' : 2, 'beta' : 1}), ('pattern.net', {}), ('pattern.attribution', {}), ('input_t_gradient', {})] methods = [method[0] for method in methods_params] print(methods) # + pycharm={"name": "#%%\n"} params = {'input_dim' : 64, 'output_dim' : 2, 'regularizer' : None, 'epochs' : 200, 'runs' : 100, 'save_data' : True} # - data = test_saved_data(data_path=file_path) # + [markdown] pycharm={"name": "#%% md\n"} # ## Define helper functions # + pycharm={"name": "#%%\n"} def generate_empty_results_dict(): return {'results': dict(), 'method_names': list()} # - def dump_results(output_dir : str, results: dict, suffix: str) -> None: output_path = os.path.join(output_dir, f'results_{suffix}.pkl') print(f'Output path: {output_path}') with open(output_path, 'wb') as f: pkl.dump(results, f) # + [markdown] pycharm={"name": "#%% md\n"} # ## 100 runs for all five parameter combinations # + pycharm={"name": "#%%\n"} results = generate_empty_results_dict() results['method_names'] = methods # + pycharm={"name": "#%%\n"} acc_dict = dict() for weights, data_list in data.items(): print(f'Weight: {weights}') results_per_weight = list() acc_per_weight = list() val_acc_per_weight = list() for data_run in data_list: clear_session() output = dict() data_train = data_run['train'] data_val = data_run['val'] X_train = data_train['x'] y_train_bin = data_train['y'] y_train = np_utils.to_categorical(y_train_bin, num_classes = 2) X_val = data_val['x'] y_val_bin = data_val['y'] y_val = np_utils.to_categorical(y_val_bin, num_classes = 2) model = create_model_llr(output_dim = params['output_dim'], activation = 'softmax', regularizer = params['regularizer'], input_dim = params['input_dim']) model_trained, acc, val_acc = train_model(model, X_train, y_train, X_val, y_val, epochs = params['epochs'], verbose = False) model_weights = model_trained.get_weights() heatmaps = run_interpretation_methods(model_trained, methods = methods_params, data = X_val, X_train_blob = X_train, normalize = False) output['model'] = model_weights # TODO write function to load model + weights output['explanations'] = heatmaps results_per_weight += [output] acc_per_weight += [acc[-1]] val_acc_per_weight += [val_acc[-1]] results['results'][weights] = results_per_weight acc_dict[weights] = {'acc' : acc_per_weight, 'val_acc' : val_acc_per_weight} # + pycharm={"name": "#%%\n"} print(len(acc_dict['0.00_0.50_0.50']['acc'])) # + pycharm={"name": "#%%\n"} for key in keys: print(f'Final accuracy for {key}: {np.mean(acc_dict[key]["val_acc"]):.2f}') # - def extract_pattern_type(data_path: str) -> str: return data_path.split('.')[2].split('pattern_type_')[-1] # + pycharm={"name": "#%%\n"} if params['save_data']: pattern_type = f'pattern_type_{extract_pattern_type(data_path=file_path)}' dump_results(output_dir = result_dir, results = results, suffix = f'heatmapping_methods_{pattern_type}') dump_results(output_dir = result_dir, results = acc_dict, suffix = f'accuracies_{pattern_type}')
saliency_methods/compute_explanations_heatmapping_methods.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Milestone Project 1: Walkthrough Steps Workbook # # Below is a set of steps for you to follow to try to create the Tic Tac Toe Milestone Project game! # #### Some suggested tools before you get started: # To take input from a user: # # player1 = input("Please pick a marker 'X' or 'O'") # # Note that input() takes in a string. If you need an integer value, use # # position = int(input('Please enter a number')) # # <br>To clear the screen between moves: # # from IPython.display import clear_output # clear_output() # # Note that clear_output() will only work in jupyter. To clear the screen in other IDEs, consider: # # print('\n'*100) # # This scrolls the previous board up out of view. Now on to the program! # **Step 1: Write a function that can print out a board. Set up your board as a list, where each index 1-9 corresponds with a number on a number pad, so you get a 3 by 3 board representation.** # + from IPython.display import clear_output def display_board(board): pass # - # **Step 2: Write a function that can take in a player input and assign their marker as 'X' or 'O'. Think about using *while* loops to continually ask until you get a correct answer.** def player_input(): pass # **Step 3: Write a function that takes in the board list object, a marker ('X' or 'O'), and a desired position (number 1-9) and assigns it to the board.** def place_marker(board, marker, position): pass # **Step 4: Write a function that takes in a board and a mark (X or O) and then checks to see if that mark has won. ** def win_check(board, mark): pass # **Step 5: Write a function that uses the random module to randomly decide which player goes first. You may want to lookup random.randint() Return a string of which player went first.** import random def choose_first(): pass # **Step 6: Write a function that returns a boolean indicating whether a space on the board is freely available.** def space_check(board, position): pass # **Step 7: Write a function that checks if the board is full and returns a boolean value. True if full, False otherwise.** def full_board_check(board): pass # **Step 8: Write a function that asks for a player's next position (as a number 1-9) and then uses the function from step 6 to check if it's a free position. If it is, then return the position for later use.** def player_choice(board): pass # **Step 9: Write a function that asks the player if they want to play again and returns a boolean True if they do want to play again.** def replay(): pass # **Step 10: Here comes the hard part! Use while loops and the functions you've made to run the game!** # + print('Welcome to Tic Tac Toe!') #while True: # Set the game up here #pass #while game_on: #Player 1 Turn # Player2's turn. #pass #if not replay(): #break # - # ## Good Job!
04-Milestone Project - 1/.ipynb_checkpoints/02-Milestone Project 1 - Walkthrough Steps Workbook-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.12 64-bit (''section-04-research-and-development-Cw-Rvs9o-py3.7'': # poetry)' # name: python3 # --- # # Machine Learning Pipeline - Model Training # # In this notebook, we pick up the transformed datasets and the selected variables that we saved in the previous notebooks. # # Reproducibility: Setting the seed # # With the aim to ensure reproducibility between runs of the same notebook, but also between the research and production environment, for each step that includes some element of randomness, it is extremely important that we **set the seed**. # + # to handle datasets import pandas as pd import numpy as np # for plotting import matplotlib.pyplot as plt # to save the model import joblib # to build the model from sklearn.linear_model import Lasso # to evaluate the model from sklearn.metrics import mean_squared_error, r2_score # to visualise al the columns in the dataframe pd.pandas.set_option('display.max_columns', None) # + # load the train and test set with the engineered variables # we built and saved these datasets in a previous notebook. # If you haven't done so, go ahead and check the previous notebooks (step 2) # to find out how to create these datasets X_train = pd.read_csv('xtrain.csv') X_test = pd.read_csv('xtest.csv') X_train.head() # + # load the target (remember that the target is log transformed) y_train = pd.read_csv('ytrain.csv') y_test = pd.read_csv('ytest.csv') y_train.head() # + # load the pre-selected features # ============================== # we selected the features in the previous notebook (step 3) # if you haven't done so, go ahead and visit the previous notebook # to find out how to select the features features = pd.read_csv('selected_features.csv') features = features['0'].to_list() # display final feature set features # + # reduce the train and test set to the selected features X_train = X_train[features] X_test = X_test[features] # - # ### Regularised linear regression: Lasso # # Remember to set the seed. # + # set up the model # remember to set the random_state / seed lin_model = Lasso(alpha=0.001, random_state=0) # train the model lin_model.fit(X_train, y_train) # + # evaluate the model: # ==================== # remember that we log transformed the output (SalePrice) # in our feature engineering notebook (step 2). # In order to get the true performance of the Lasso # we need to transform both the target and the predictions # back to the original house prices values. # We will evaluate performance using the mean squared error and # the root of the mean squared error and r2 # make predictions for train set pred = lin_model.predict(X_train) # determine mse, rmse and r2 print('train mse: {}'.format(int( mean_squared_error(np.exp(y_train), np.exp(pred))))) print('train rmse: {}'.format(int( mean_squared_error(np.exp(y_train), np.exp(pred), squared=False)))) print('train r2: {}'.format( r2_score(np.exp(y_train), np.exp(pred)))) print() # make predictions for test set pred = lin_model.predict(X_test) # determine mse, rmse and r2 print('test mse: {}'.format(int( mean_squared_error(np.exp(y_test), np.exp(pred))))) print('test rmse: {}'.format(int( mean_squared_error(np.exp(y_test), np.exp(pred), squared=False)))) print('test r2: {}'.format( r2_score(np.exp(y_test), np.exp(pred)))) print() print('Average house price: ', int(np.exp(y_train).median())) # - # let's evaluate our predictions respect to the real sale price plt.scatter(y_test, lin_model.predict(X_test)) plt.xlabel('True House Price') plt.ylabel('Predicted House Price') plt.title('Evaluation of Lasso Predictions') # We can see that our model is doing a pretty good job at estimating house prices. y_test.reset_index(drop=True) # + # let's evaluate the distribution of the errors: # they should be fairly normally distributed y_test.reset_index(drop=True, inplace=True) preds = pd.Series(lin_model.predict(X_test)) preds # + # let's evaluate the distribution of the errors: # they should be fairly normally distributed errors = y_test['SalePrice'] - preds errors.hist(bins=30) plt.show() # - # The distribution of the errors follows quite closely a gaussian distribution. That suggests that our model is doing a good job as well. # ### Feature importance # + # Finally, just for fun, let's look at the feature importance importance = pd.Series(np.abs(lin_model.coef_.ravel())) importance.index = features importance.sort_values(inplace=True, ascending=False) importance.plot.bar(figsize=(18,6)) plt.ylabel('Lasso Coefficients') plt.title('Feature Importance') # - # ## Save the Model # + # we are happy to our model, so we save it to be able # to score new data joblib.dump(lin_model, 'linear_regression.joblib') # - # # Additional Resources # # # ## Feature Engineering # # - [Feature Engineering for Machine Learning](https://www.udemy.com/course/feature-engineering-for-machine-learning/?referralCode=A855148E05283015CF06) - Online Course # - [Packt Feature Engineering Cookbook](https://www.packtpub.com/data/python-feature-engineering-cookbook) - Book # - [Feature Engineering for Machine Learning: A comprehensive Overview](https://trainindata.medium.com/feature-engineering-for-machine-learning-a-comprehensive-overview-a7ad04c896f8) - Article # - [Practical Code Implementations of Feature Engineering for Machine Learning with Python](https://towardsdatascience.com/practical-code-implementations-of-feature-engineering-for-machine-learning-with-python-f13b953d4bcd) - Article # # ## Feature Selection # # - [Feature Selection for Machine Learning](https://www.udemy.com/course/feature-selection-for-machine-learning/?referralCode=186501DF5D93F48C4F71) - Online Course # - [Feature Selection for Machine Learning: A comprehensive Overview](https://trainindata.medium.com/feature-selection-for-machine-learning-a-comprehensive-overview-bd571db5dd2d) - Article # # ## Machine Learning # # - [Best Resources to Learn Machine Learning](https://trainindata.medium.com/find-out-the-best-resources-to-learn-machine-learning-cd560beec2b7) - Article # - [Machine Learning with Imbalanced Data](https://www.udemy.com/course/machine-learning-with-imbalanced-data/?referralCode=F30537642DA57D19ED83) - Online Course
section-04-research-and-development/04-machine-learning-pipeline-model-training.ipynb