code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import logging import os logging.basicConfig(level=logging.DEBUG) import graphcat.notebook import imagecat.notebook # + graph = graphcat.DynamicGraph() monitor = graphcat.PerformanceMonitor(graph) imagecat.add_task(graph, "/foreground", imagecat.operator.color.fill, values=[0, 0, 0], res=[256, 128]) imagecat.add_task(graph, "/background", imagecat.operator.color.fill, values=[1, 0.5, 0], res=[2560, 1440]) imagecat.add_task(graph, "/mask", imagecat.operator.render.text, res=[256, 128], string="Imagecat!") imagecat.add_task(graph, "/comp", imagecat.operator.transform.composite, pivot=("1w", "1h"), orientation=0, position=("1w", "1h"), order=0) imagecat.set_links(graph, "/foreground", ("/comp", "foreground")) imagecat.set_links(graph, "/background", ("/comp", "background")) imagecat.set_links(graph, "/mask", ("/comp", "mask")) graphcat.notebook.display(graph) imagecat.notebook.display(graph.output("/foreground")) imagecat.notebook.display(graph.output("/background")) imagecat.notebook.display(graph.output("/mask")) imagecat.notebook.display(graph.output("/comp")) # - sorted(monitor.tasks.items(), key=lambda item: item[1], reverse=True)
notebooks/composite.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from kmod import gan_ume_opt as gt # %load_ext autoreload # %autoreload 2 # %matplotlib inline # #%config InlineBackend.figure_format = 'svg' # #%config InlineBackend.figure_format = 'pdf' import kmod import kgof import kgof.goftest as gof # submodules from kmod import data, density, kernel, util from kmod import mctest as mct import matplotlib import matplotlib.pyplot as plt import autograd.numpy as np import scipy.stats as stats import utils, torch, time, os, pickle import numpy as np import torch.nn as nn import torch.optim as optim from torch.autograd import Variable from torch.utils.data import DataLoader from torchvision import datasets, transforms from PIL import Image # - from kmod.torch_models import Generator as DCGAN torch.backends.cudnn.enabled = True data_dir = '../problems/celeba/img_align_celeba' test_img_list = [] with open('../problems/celeba/test_list.txt') as f: for line in f: test_img_list.append(line.rstrip('\n')) # + def open_images(paths, size=64, resize=False): img_data = [] for path in paths: im = Image.open(path) if resize: im = im.resize((size, size)) im = np.array(im) img_data.append(im) return np.array(img_data) def normalize(images, mean, std): """normalize ndarray images of shape N x H x W x C""" return (images - mean) / std # - # ## Experiments on noise space optimization (under construction) batch_size = 1000 #number of images we want to generate z_dim = 100 #dimention of noise, this is fixed to 100 so don't change model_dir = '../problems/celeba/models/' gpu_id = 2 gpu_mode = True gt.set_gpu_mode(gpu_mode) gt.set_gpu_id(gpu_id) gp = DCGAN().cuda(gpu_id) gp.eval() gq = DCGAN().cuda(gpu_id) gq.eval() gp.load('{}/GAN_G_smile_unif.pkl'.format(model_dir)) gq.load('{}/GAN_G_nosmile_unif.pkl'.format(model_dir)) # + J = 5 from kmod.ex import celeba as clba gen_data_smile = clba.load_feature_array('gen_smile') gen_data_nonsmile = clba.load_feature_array('gen_nonsmile') ref = clba.load_feature_array('ref_smile') datap = data.Data(gen_data_smile[:batch_size]) dataq = data.Data(gen_data_nonsmile[:batch_size]) datar = data.Data(ref[:batch_size]) from kmod import torch_models model = torch_models.load_inception_v3(pretrained=True, gpu_id=gpu_id) Zp0 = Zq0 = np.random.uniform(-1, 1, (J, z_dim)) XYZ = np.vstack((datap.data(), dataq.data(), datar.data())) med = util.meddistance(XYZ, subsample=1000) med = med.reshape([1,]) # - with util.ContextTimer() as t: # Z_opt, gw_opt, opt_result = gt.optimize_3sample_criterion(datap, dataq, datar, gp, gq, Zp0, Zq0, gwidth0=med2) Zp_opt, Zq_opt, gw_opt = gt.run_optimize_3sample_criterion(datap, dataq, datar, gp, gq, model.pool3, Zp0, Zq0, med) print(t.secs / 60.) sample_p_opt = gp(Zp_opt.view(-1, gp.z_size, 1, 1)).cpu().data.numpy().transpose(0, 2, 3, 1) sample_q_opt = gq(Zq_opt.view(-1, gp.z_size, 1, 1)).cpu().data.numpy().transpose(0, 2, 3, 1) grid_size = 5 plt.figure(figsize=(8, 8)) for i in range(sample_p_opt.shape[0]): img = np.clip(sample_p_opt[i], 0, 1) plt.subplot(grid_size, grid_size, i+1) plt.imshow(img) plt.figure(figsize=(8, 8)) for i in range(sample_q_opt.shape[0]): img = np.clip(sample_q_opt[i], 0, 1) plt.subplot(grid_size, grid_size, i+1) plt.imshow(img) print(np.sum((Zp0 - Zp_opt.cpu().data.numpy())**2)**0.5) # ## Experiments on discrete optimization # ### Using real images as samples for the three-sampel UME test, we examine test locations given by maximizing the power criterion. # loading data path data_dir = '../problems/celeba/img_align_celeba' test_img_list = [] with open('../problems/celeba/test_list.txt') as f: for line in f: test_img_list.append(line.rstrip('\n')) smile_img_list = [] with open('../problems/celeba/test_smile.txt') as f: for line in f: smile_img_list.append(line.rstrip('\n')) non_smile_img_list = [filename for filename in test_img_list if filename not in smile_img_list] # loading image data paths = ['{}/{}'.format(data_dir, filename) for filename in smile_img_list] smile_img_data = open_images(paths, 224, resize=True) smile_img_data = smile_img_data / 255 paths = ['{}/{}'.format(data_dir, filename) for filename in non_smile_img_list] non_smile_img_data = open_images(paths, 224, resize=True) non_smile_img_data = non_smile_img_data / 255 n1 = smile_img_data.shape[0] n2 = non_smile_img_data.shape[0] tr_data = np.vstack([smile_img_data[:int(n1/2)], non_smile_img_data[:int(n2/2)]]) te_data = np.vstack([smile_img_data[int(n1/2):], non_smile_img_data[int(n2/2):]]) # creating training and test data mean = np.mean(tr_data, axis=(0, 1, 2)) std = np.std(tr_data, axis=(0, 1, 2)) print(mean, std) smile_img_data_norm = normalize(smile_img_data, mean, std) smile_tr_data = smile_img_data_norm[:int(n1/2)] smile_te_data = smile_img_data_norm[int(n1/2):] non_smile_img_data_norm = normalize(non_smile_img_data, mean, std) non_smile_tr_data = non_smile_img_data_norm[:int(n2/2)] non_smile_te_data = non_smile_img_data_norm[int(n2/2):] del(non_smile_img_data) del(smile_img_data) # loading a feature extractor import pretrainedmodels model_name = 'resnet18' model = pretrainedmodels.__dict__[model_name]().cuda(2) model.eval() # + # Obtain samples for P, Q, R sample_size = 500 half_n1 = int(n1 / 2) half_n2 = int(n2 / 2) subsample_idx_p = util.subsample_ind(half_n1, sample_size) datap = smile_tr_data[subsample_idx_p].reshape(sample_size, -1) datap = data.Data(datap) subsample_idx_q = util.subsample_ind(half_n2, sample_size) dataq = non_smile_tr_data[subsample_idx_q].reshape(sample_size, -1) dataq = data.Data(dataq) rest_tr_data = np.vstack( [ np.delete(smile_tr_data, subsample_idx_p, axis=0), np.delete(non_smile_tr_data, subsample_idx_q, axis=0) ] ) n = rest_tr_data.shape[0] datar = util.subsample_rows(rest_tr_data.reshape(n, -1), sample_size) datar = data.Data(datar) del(rest_tr_data) # - te_data_norm = normalize(te_data, mean, std) num_candidates = 500 locs = util.subsample_rows(te_data_norm, num_candidates) #locs = smile_img_data_norm[-num_candidates:] locs = locs.reshape((locs.shape[0], -1)) XYZ = np.vstack((datap.data(), dataq.data(), datar.data())) med2 = util.meddistance(XYZ, subsample=1000)**2 J = 10 with util.ContextTimer() as t: p_best_locs = gt.opt_greedy_3sample_criterion(datap, dataq, datar, model.features, locs, med2, J, maximize=False) with util.ContextTimer() as t: q_best_locs = gt.opt_greedy_3sample_criterion(datap, dataq, datar, model.features, locs, med2, J, maximize=True) grid_size = 4 images = locs.reshape((num_candidates, 224, 224, 3)) * std + mean for i in range(len(p_best_locs)): idx = p_best_locs[i] img = images[idx] plt.subplot(grid_size, grid_size, i+1) plt.imshow(img) grid_size = 4 for i in range(len(q_best_locs)): idx = q_best_locs[i] img = images[idx] plt.subplot(grid_size, grid_size, i+1) plt.imshow(img)
ipynb/demo_gan_opt.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: kit_kernel # language: python # name: kit_kernel # --- # # Python + SQL Tech Session # # Today we'll be covering: # 1. Connecting to the database from python # 1. Using templated SQL in python # 1. getting data into and out of postgres efficiently # 1. Advanced SQL # - CTEs (WITH clauses) # - window functions # - indices / check plan # - temp tables # # ### Some initial setup # Downloading the materials we'll need: # 1. SSH to the class server # 1. Make sure you're in your home directory: `cd ~` # 1. Download the notebook: `wget https://raw.githubusercontent.com/dssg/mlforpublicpolicylab/master/techhelp/python_sql_tech_session.ipynb` # 1. Download the sql template example: `wget https://raw.githubusercontent.com/dssg/mlforpublicpolicylab/master/techhelp/tech_session_template.sql` # 1. Take a look at the sql template: `less tech_session_template.sql` (Type `q` to exit) # # Install some packages in your group virtualenv (only one person should need to do this): # 1. SSH to the class server (if you're not already there) # 1. Activate virtualenv: `source /data/groups/{your_group}/dssg_env/bin/activate` # 1. Install pandas and matplotlib: `pip install pandas matplotlib` # 1. Install pyscopg2 and sqlalchemy (to connect to postgres): `pip install psycopg2-binary sqlalchemy` # 1. Install ohio (tool for moving data to/from postgres): `pip install ohio` # 1. Install PyYAML (to read YAML format): `pip install PyYAML` # # Create a secrets file: # 1. SSH to the class server (if you're not already there) # 1. Make sure you're in your home directory: `cd ~` # 1. Create the secrets file: `touch secrets.yaml` # 1. Restrict access to the file: `chmod 600 secrets.yaml` # 1. Edit the file: `nano secrets.yaml` # 1. Fill it in with contents (remember, your password can be found in your `.pgpass` file): # ``` # db: # host: mlpolicylab.db.dssg.io # port: 5432 # dbname: db_donorschoose_example # user: {your_andrewid} # password: {<PASSWORD>} # ``` # # Start up your jupyter server (detailed instructions [here](https://github.com/dssg/mlforpublicpolicylab/blob/master/techhelp/jupyter_setup.md)): # 1. SSH to the class server (if you're not already there) # 1. Start a screen session: `screen` # 1. Choose a port (if you haven't already): `ss -lntu` (pick a port between 1024 and 65535 that is NOT on that list) # 1. Make sure you're in your home directory: `cd ~` # 1. Activate virtualenv: `source /data/groups/{your_group}/dssg_env/bin/activate` # 1. Start your server: `jupyter notebook --port {port_from_above} --no-browser` (make note of the token here) # 1. ON YOUR LOCAL MACHINE, create an SSH tunnel: `ssh -N -L localhost:8888:localhost:{YOUR_PORT} {<EMAIL>_ANDREW_ID}@ml<EMAIL>` (or [using PuTTY on windows](https://docs.bitnami.com/bch/faq/get-started/access-ssh-tunnel/)) # 1. ON YOUR LOCAL MACHINE, open a browser and navigate to: `http://localhost:8888/` # 1. Fill in the token from the jupyter server # 1. Open this notebook # 1. **Be sure to choose your group kernel from the "Kernel" menu** # # ## Import packages # + import matplotlib.pyplot as plt import pandas as pd from sqlalchemy import create_engine import yaml import ohio.ext.pandas # - # ## TOPIC 1: Connect to the database from python # + with open('secrets.yaml', 'r') as f: secrets = yaml.safe_load(f) db_params = secrets['db'] engine = create_engine('postgres://{user}:{password}@{host}:{port}/{dbname}'.format( host=db_params['host'], port=db_params['port'], dbname=db_params['dbname'], user=db_params['user'], password=<PASSWORD>['password'] )) # - # We're connected to a database with data from the DonorsChoose organization. It has a few useful tables: # - `public.projects` -- general information about projects # - `public.resources` -- detailed information about requested resources # - `public.essays` -- project titles and descriptions # - `public.donations` -- separate record for each donation to a project # # There's also a `sketch` schema you can use to create tables in # ### Simple select statement with sqlalchemy engine # + sql = "SELECT projectid, schoolid, resource_type FROM public.projects LIMIT 3" result_set = engine.execute(sql) for rec in result_set: print(rec) # - # ### Pandas will give a little cleaner output # + sql = "SELECT projectid, schoolid, resource_type FROM public.projects LIMIT 3" pd.read_sql(sql, engine) # - # ## Simple Table Manipulation with sqlalchemy (we'll do something more efficient below) # # Let's create a little table to track your stocks of halloween candy (fill in your andrew id below) andrew_id = # FILL IN YOUR andrew_id HERE! candy_table = '{}_candy'.format(andrew_id) table_schema = 'sketch' # Execute an appropriate CREATE statement # + create_sql = '''CREATE TABLE IF NOT EXISTS {}.{} ( candy_type varchar NULL, amount int, units varchar );'''.format(table_schema, candy_table) engine.execute(create_sql) # - # **IMPORTANT NOTE**: Statements that modify the state of the database will not be physically reflected until we tell the connection to commit these changes. If you went into DBeaver now, you still wouldn't see this new table! engine.execute("COMMIT") # Now let's insert a few records (again note that we have to **commit** for the records to show up): # + insert_sql = '''INSERT INTO {}.{} (candy_type, amount, units) VALUES(%s, %s, %s); '''.format(table_schema, candy_table) records_to_insert = [('snickers', 10, 'bars'), ('candy corn', 5, 'bags'), ('peanut butter cups', 15, 'cups')] for record in records_to_insert: engine.execute(insert_sql, record) engine.execute("COMMIT") # - # Let's look at the results: # + sql = "SELECT * FROM {}.{}".format(table_schema, candy_table) pd.read_sql(sql, engine) # - # Clean up: drop the table and commit: # + drop_sql = "DROP TABLE {}.{}".format(table_schema, candy_table) engine.execute(drop_sql) engine.execute("COMMIT") # - # ## TOPIC 2: Using Templated SQL # # Templating SQL statements and filling them in dynamically with python can be very helpful as you're transforming data for your projects, for instance, creating features, labels, and matrices for different temporal validation splits in your data. # # We've actually been doing a little bit of this already (e.g., filling in table names and insert values above), but let's look at a couple of examples in more detail with the donors choose data. Suppose we wanted to look at the sets of projects posted on a few given days: # + sql_template = """ SELECT projectid, resource_type, poverty_level, date_posted FROM public.projects WHERE date_posted = '{}'::DATE """ results = [] for dt in ['2014-05-01', '2014-04-15', '2014-04-01']: sql = sql_template.format(dt) results.append(pd.read_sql(sql, engine)) # - # Do some quick checks: # 1. How many result sets did we get back? # 1. Look at the first few results of one of the sets, are they all on the right date? # 1. How many projects were posted on each of these days? # + # Number of result sets # + # First few records of one set # + # Number of projects on each date # - # #### Some simple data visualization # # We won't go into detail here, but just to provide a quick example. See the matplot (or seaborn) documentation for more plot types and examples. # + ix = 0 df = results[ix].groupby('resource_type')['projectid'].count().reset_index() dt = results[ix]['date_posted'].max() fig, ax = plt.subplots() ax.bar('resource_type', 'projectid', data=df) ax.set_title('Counts by resource type for %s' % dt) ax.set_ylabel('Number of Projects') plt.show() # - # ### Templated SQL stored in a file # # If your queries get long or complex, you might want to move them out to separate files to keep your code a bit cleaner. We've provided an example to work with in `tech_session_template.sql` -- let's read that in here. # # Note that here we're just making use of basic python templating here, but if you want to use more complex logic in your templates, check out packages like [Jinja2](https://jinja.palletsprojects.com/en/2.11.x/) # + # Read the template file with open('tech_session_template.sql', 'r') as f: sql_template = f.read() # Look at the contents: print(sql_template) # - # **Looks like we'll need a few parameters:** # - table_schema # - table_name # - state_list # - start_dt # - end_dt # # Notice as well that we've explicitly encoded all of these columns by hand, but you might want to think about how you might construct the sets of columns for one-hot encoded categoricals programmatically from the data, as well as the other types of features we've discussed (like aggregations in different time windows)... # + table_schema = 'public' table_name = 'projects' state_list = ['CA', 'NY', 'PA'] start_dt = '2014-03-14' end_dt = '2014-04-30' sql = sql_template.format( table_schema=table_schema, table_name=table_name, state_list=state_list, start_dt=start_dt, end_dt=end_dt ) # Let's take a look... print(sql) # - # **Looks like the square brackets in that state list will generate an error!** # # Let's try formatting it before doing the templating: # + def list_to_string(l, dtype='string'): if dtype=='string': return ','.join(["'%s'" % elm for elm in l]) else: return ','.join(["%s" % elm for elm in l]) state_list = list_to_string(['CA', 'NY', 'PA']) print(state_list) # + sql = sql_template.format( table_schema=table_schema, table_name=table_name, state_list=state_list, start_dt=start_dt, end_dt=end_dt ) # Let's take a look... print(sql) # - # **Looks better!** Let's try running it now... # + df = pd.read_sql(sql, engine) df.head(10) # - # ## TOPIC 3: Getting data into and out of postgres efficiently # # At the command line, one very efficient way of getting data into postgres is to stream it to a `COPY` statement on `STDIN`, this might look something like: # ``` # # cat my_file.csv | psql -h mlpolicylab.db.dssg.io {group_database} -c "COPY {schema}.{table} FROM STDIN CSV HEADER" # ``` # (more details in the [postgres documentation](https://www.postgresql.org/docs/11/sql-copy.html)) # # Similarly, you can use the `\copy` command from within `psql` itself -- you can find [documentation here](https://www.postgresql.org/docs/11/app-psql.html) (seach for "\copy"). # # For today, we'll focus on a package called `ohio` that provides efficient tools for moving data between postgres and python. `ohio` provides interfaces for both `pandas` dataframes and `numpy` arrays, but we'll focus on the `pandas` tools here, which are provided via `import ohio.ext.pandas` (see the [docs for the numpy examples](https://github.com/dssg/ohio#extensions-for-numpy)) # # Note that `ohio` is dramatically more efficient than the built-in `df.to_sql()` (see the benchmarking graph below). The pandas function tries to be agnostic about SQL flavor by inserting data row-by-row, while `ohio` uses postgres-specific copy functionality to move the data much more quickly (and with lower memory overhead as well): # # ![ohio benchmarking](https://raw.githubusercontent.com/dssg/ohio/0.5.0/doc/img/profile-copy-from-dataframe-to-databas-1555458507.svg?sanitize=true) # # Let's try it out by re-creating our halloween candy table. andrew_id = # FILL IN YOUR andrew_id HERE! candy_table = '{}_candy'.format(andrew_id) table_schema = 'sketch' # + create_sql = '''CREATE TABLE IF NOT EXISTS {}.{} ( candy_type varchar NULL, amount int, units varchar );'''.format(table_schema, candy_table) engine.execute(create_sql) engine.execute("COMMIT") # - # ### Inserting data with df.pg_copy_to() # + df = pd.DataFrame({ 'candy_type': ['snickers', 'cookies', 'candy apples', 'peanut butter cups', 'candy corn'], 'amount': [1,1,2,3,5], 'units': ['bars', 'cookies', 'apples', 'cups', 'bags'] }) # The ohio package adds a `pg_copy_to` method to your dataframes... df.pg_copy_to(candy_table, engine, schema=table_schema, index=False, if_exists='append') # - # ### Reading data with pd.DataFrame.pg_copy_from() # # We can read the data from the table we just created using `pg_copy_from`: # + result_df = pd.DataFrame.pg_copy_from(candy_table, engine, schema=table_schema) result_df # - # Note that `pg_copy_from` can accept a query as well: # + sql = """ SELECT CASE WHEN candy_type IN ('snickers', 'cookies', 'peanut butter cups') THEN 'has chocolate' ELSE 'non-chocolate' END AS chocolate_flag, SUM(amount) AS total_number FROM {}.{} GROUP BY 1 """.format(table_schema, candy_table) result_df = pd.DataFrame.pg_copy_from(sql, engine) result_df # - # ## TOPIC 4: Advanced SQL # # Finally for today, we want to talk about a few more advanced SQL functions that will likely be helpful as you're starting to prepare your features and training/test matrices. We **strongly encourage** you to do as much of that data manipulation as you can in the database, as postgres is well-optimized for this sort of work. The functions here should help make that work a bit easier as well. # # The idea here is to give you an overview of some of the things that are possible that you might want to explore further. You can find a more in-depth [tutorial here](https://dssg.github.io/hitchhikers-guide/curriculum/2_data_exploration_and_analysis/advanced_sql/), with links out to additional documentation as well. # ### CTEs (WITH clauses) # # Common table expressions (CTEs), also known as WITH clauses, are a better alternative to subqueries both in terms of code readability as well as (in some cases) performance improvements. They can allow you to break up a complex query into consituent parts, making the logic of your code a little easier to follow. # # By way of example, suppose we wanted to calculate the fraction of different types of projects (based on their requested type of resource) that were fully funded in MD in January 2013. Here's how we might do that with CTEs: # + sql = """ WITH md_projects AS ( SELECT * FROM public.projects WHERE school_state='MD' AND date_posted BETWEEN '2013-01-01'::DATE AND '2013-01-31'::DATE ) , total_donations AS ( SELECT p.projectid, COALESCE(SUM(d.donation_total), 0) AS total_amount FROM md_projects p LEFT JOIN public.donations d USING(projectid) GROUP BY 1 ) , fully_funded AS ( SELECT p.*, td.total_amount, CASE WHEN td.total_amount > p.total_price_excluding_optional_support THEN 1 ELSE 0 END AS funded_flag FROM md_projects p LEFT JOIN total_donations td USING(projectid) ) SELECT resource_type, COUNT(*) AS num_projects, AVG(funded_flag) AS frac_funded FROM fully_funded GROUP BY 1 ORDER BY 3 DESC """ pd.read_sql(sql, engine) # + ### HANDS-ON: For all the MD projects posted in January 2013 that received any donations ### what is the average fraction of donations coming from teachers by resource type? ### (note: the donations table has a boolean `is_teacher_acct` column that will be useful) sql = """ """ pd.read_sql(sql, engine) # - # ### Analytic (Window) Functions # # Postgres provides powerful functionality for calculating complex metrics such as within-group aggregates, running averages, etc., called "window functions" (because they operate over a defined window of the data relative to a given row): # - They are similar to aggregate functions, but instead of operating on groups of rows to produce a single row, they act on rows related to the current row to produce the same amount of rows. # - There are several window functions like `row_number`, `rank`, `ntile`, `lag`, `lead`, `first_value`, `last_value`, `nth_value`. # - And you can use any aggregation functions: `sum`, `count`, `avg`, `json_agg`, `array_agg`, etc # # Supposed we want to answer a couple questions: # - What fraction of all projects in MD are posted by each schoolid? # - What is the most recently posted project for each school in MD? # - Calculate a running average of the total ask amount of the 4 most recent projects at a given school (say, `schoolid='ff2695b8b7f3ade678358f6e5c621c1e'`) # + ## HANDS-ON: Try answering those questions with SELECT, GROUP BY, HAVING, AND WHERE alone # - # Now let's look at how we'd answer these questions with window functions... # # **Fraction of projects by school** # # Here, we'll group by schools but calculate the number of projects across all schools in MD using: # # `SUM(COUNT(*)) OVER ()` # # In that statement, `COUNT(*)` is the number of projects at the given school, then we're summing that count across all the aggregated rows with `SUM(.) OVER ()`. There, the `OVER ()` indicates the window across which to take the sum -- in this case, an empty window (that is, `()`) indicates using all records in the table. # + result_df = pd.read_sql(""" SELECT schoolid, COUNT(*) AS num_projects, 1.000*COUNT(*)/SUM(COUNT(*)) OVER () AS frac_at_school FROM public.projects WHERE school_state = 'MD' GROUP BY 1 ORDER BY 3 DESC """, engine) result_df.head() # - # **Most recent project by school** # # Here, we'll use `row_number` to rank the projects (without ties) within school and by posting date. Note that the window here, `(PARTITION BY schoolid ORDER BY date_posted DESC)` means: within each school id, calculate a row number ordered by the posting date in descending order (so the most recent project by a given school will have `rn=1`, the second most recent will have `rn=2`, and so on). # # We do this row number calculation in a CTE, allowing us to pick out the most recent project for each school simply by looking for those with `rn=1` in a subsequent step: # + result_df = pd.read_sql(""" WITH school_rns AS ( SELECT *, row_number() OVER (PARTITION BY schoolid ORDER BY date_posted DESC) AS rn FROM public.projects WHERE school_state = 'MD' ) SELECT * FROM school_rns WHERE rn=1 ; """, engine) result_df.head() # - # **Running average of ask from last four projects** # # Here, we use postgres's functionality to restrict a window to certain rows relative to the given row. Our window is: # ``` # (PARTITION BY schoolid ORDER BY date_posted ASC ROWS BETWEEN 3 PRECEDING AND CURRENT ROW) # ``` # That is, # - `PARTITION BY schoolid`: Do the calculation among records at the same school # - `ORDER BY date_posted ASC`: Order the records by posting date (earliest first) # - `ROWS BETWEEN 3 PRECEDING AND CURRENT ROW`: Given this ordering, calculate the average across the four most recent rows (including the current row) # + result_df = pd.read_sql(""" SELECT date_posted, projectid, schoolid, total_price_excluding_optional_support AS current_ask, AVG(total_price_excluding_optional_support) OVER ( PARTITION BY schoolid ORDER BY date_posted ASC ROWS BETWEEN 3 PRECEDING AND CURRENT ROW ) AS running_avg_ask FROM public.projects WHERE schoolid = 'ff2695b8b7f3ade678358f6e5c621c1e' ORDER BY date_posted DESC ; """, engine) result_df.head(10) # - # **Days since last project was posted** # # We can use the `lag()` window function to get the date of the most recent previously-posted project (see also `last_value` for more flexibility): # + result_df = pd.read_sql(""" SELECT date_posted, projectid, schoolid, total_price_excluding_optional_support AS current_ask, date_posted::DATE - (lag(date_posted) OVER (PARTITION BY schoolid ORDER BY date_posted ASC))::DATE AS days_since_last_proj FROM public.projects WHERE schoolid = 'ff2695b8b7f3ade678358f6e5c621c1e' ORDER BY date_posted DESC ; """, engine) result_df.head(5) # - # What happens when we hit the end of the series? result_df.tail(5) # Notice the `NaN` (will be `NULL` in postgres) for the first record that doesn't have any previously-posted project, so you'd have to think about how you wanted to handle these edge cases in your feature development. # ### Indices / Checking the Query Plan # # Indices are particularly critical to the performance of postgres queries, especially as the data gets larger. You should think about adding indices to tables based on columns that will frequently be used for joins or filtering rows with `WHERE` clauses. # # A useful tool for understanding how the database will treat a given query is checking the query plan by using the `EXPLAIN` keyword before a `SELECT` statement: # Eliminate column width truncating pd.set_option('display.max_colwidth', None) pd.read_sql(""" EXPLAIN SELECT * FROM public.projects WHERE projectid = '32943bb1063267de6ed19fc0ceb4b9a7' """, engine) # Notice that picking out a specific project is making use of the index via `Index Scan`. # # By contrast, if we select projects for a given school: pd.read_sql(""" EXPLAIN SELECT * FROM public.projects WHERE schoolid = 'ff2695b8b7f3ade678358f6e5c621c1e' """, engine) # Here, `Seq Scan` tells us that postgres has to scan the entire table to find the right projects, which can be very expensive (especially with joins!). Also note how much higher the overall estimated cost is for this query in the first row here than for the query above. # # Likewise for joins, compare the two query plans below: pd.read_sql(""" EXPLAIN SELECT * FROM public.projects JOIN public.donations USING(projectid) """, engine) # + ## NOTE: Please don't actually run this query without the select!!! pd.read_sql(""" EXPLAIN SELECT * FROM public.projects p JOIN public.donations d ON d.donation_timestamp > p.date_posted """, engine) # - # **CREATING INDICES** # # When you need to create indices as you build tables for your project, you can use this syntax: # # ``` # CREATE INDEX ON {schema}.{table}({column}); # ``` # # Note that you can also specify a list of columns. If the given column (or set of columns) is a unique key for the table, you can get additional gains by declaring it as a primary key instead of simply creating an index: # # ``` # ALTER TABLE {schema}.{table} ADD PRIMARY KEY ({column}); # ``` # # You can also find a little more documentation of postgres indices [here](https://www.postgresqltutorial.com/postgresql-indexes/postgresql-create-index/) # ### Temporary Tables # # Breaking up complex queries with CTEs can make your code much more readable and may provide some performance gains, but further gains can often be realized by creating and indexing temporary tables. # # Let's rework one of the CTE examples from above using temporary tables: For all the MD projects posted in January 2013 that received any donations what is the average fraction of donations coming from teachers by resource type? # + andrew_id = # FILL IN YOUR andrew_id HERE! # Temporary table and index for projects posted by MD schools in Jan 2013 engine.execute(""" CREATE LOCAL TEMPORARY TABLE tmp_{}_md_projects ON COMMIT PRESERVE ROWS AS SELECT * FROM public.projects WHERE school_state='MD' AND date_posted BETWEEN '2013-01-01'::DATE AND '2013-01-31'::DATE ; """.format(andrew_id)) engine.execute("""CREATE INDEX ON tmp_{}_md_projects(projectid);""".format(andrew_id)) engine.execute("COMMIT;") # Temporary table and index for donations by teachers engine.execute(""" CREATE LOCAL TEMPORARY TABLE tmp_{}_teacher_donations ON COMMIT PRESERVE ROWS AS SELECT d.projectid, SUM(CASE WHEN is_teacher_acct THEN d.donation_total ELSE 0 END)/SUM(d.donation_total) AS teacher_frac FROM tmp_{}_md_projects p JOIN public.donations d USING(projectid) GROUP BY 1 ; """.format(andrew_id, andrew_id)) engine.execute("""CREATE INDEX ON tmp_{}_teacher_donations(projectid);""".format(andrew_id)) engine.execute("COMMIT;") # Join these two temporary tables to get our result pd.read_sql(""" SELECT p.resource_type, AVG(td.teacher_frac) AS avg_teacher_frac FROM tmp_{}_md_projects p JOIN tmp_{}_teacher_donations td USING(projectid) GROUP BY 1 ORDER BY 2 DESC """.format(andrew_id, andrew_id), engine) # - # ## Clean Up # # drop the candy table and commit; dispose of the sqlalchemy engine # + drop_sql = "DROP TABLE {}.{}".format(table_schema, candy_table) engine.execute(drop_sql) engine.execute("COMMIT") engine.execute("DROP TABLE IF EXISTS tmp_{}_md_projects".format(andrew_id)) engine.execute("COMMIT") engine.execute("DROP TABLE IF EXISTS tmp_{}_teacher_donations".format(andrew_id)) engine.execute("COMMIT") engine.dispose()
techhelp/python_sql_tech_session.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: in # language: python # name: in # --- from fastai.text import * import numpy as np from sklearn.model_selection import train_test_split import pickle import sentencepiece as spm import re import pdb import fastai, torch fastai.__version__ , torch.__version__ torch.cuda.set_device(0) def random_seed(seed_value, use_cuda): np.random.seed(seed_value) torch.manual_seed(seed_value) random.seed(seed_value) if use_cuda: torch.cuda.manual_seed(seed_value) torch.cuda.manual_seed_all(seed_value) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False random_seed(42, True) path = Path('./') # !pwd df_train = pd.read_csv(path/'../../classification_public_datasets/iitp+-movie-reviews/hi/hi-train.csv', header=None) df_train.head() df_valid = pd.read_csv(path/'../../classification_public_datasets/iitp+-movie-reviews/hi/hi-valid.csv', header=None) df_valid.head() df_test = pd.read_csv(path/'../../classification_public_datasets/iitp+-movie-reviews/hi/hi-test.csv', header=None) df_test.head() df_train.shape, df_valid.shape, df_test.shape df_train[df_train[1].isnull()] # + # df_train = df_train.dropna() # - df_test[df_test[1].isnull()] # + # df_test = df_test.dropna() # - label_cols = [0] class HindiTokenizer(BaseTokenizer): def __init__(self, lang:str): self.lang = lang self.sp = spm.SentencePieceProcessor() self.sp.Load(str('./../../models/hindi/tokenizer/Created from Hindi Wikipedia Articles - 172k/hindi_lm_large.model')) def tokenizer(self, t:str) -> List[str]: return self.sp.EncodeAsPieces(t) sp = spm.SentencePieceProcessor() sp.Load('./../../models/hindi/tokenizer/Created from Hindi Wikipedia Articles - 172k/hindi_lm_large.model') itos = [sp.IdToPiece(int(i)) for i in range(30000)] itos[:10] # 30,000 is the vocab size that we chose in sentencepiece hindi_vocab = Vocab(itos) tokenizer = Tokenizer(tok_func=HindiTokenizer, lang='hi') tokenizer.special_cases data_lm = TextLMDataBunch.from_df(path=path, train_df=df_train, valid_df=df_valid, test_df=df_test, tokenizer=tokenizer, vocab=hindi_vocab, bs=32) data_lm.show_batch() learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3) # Loading the pretrained language model on hindi wikipedia learn.load('../../../models/hindi/lm/ULMFiT - Trained on Hindi Wikipedia Articles - 172k/model') # + # Fine tuning the prtrained LM on current dataset # - learn.lr_find() learn.recorder.plot() learn.freeze() learn.fit_one_cycle(1, 1e-2, moms=(0.8,0.7)) learn.save('fit_head', with_opt=True) learn.load('fit_head', with_opt=True); learn.unfreeze() learn.fit_one_cycle(3, 1e-3, moms=(0.8,0.7)) learn.save('fine_tuned', with_opt=True) learn.load('fine_tuned', with_opt=True); learn.predict('▁इस ▁लाइन ▁के ▁चलने ▁से ▁दक्षिणी ',n_words=10) learn.save_encoder('fine_tuned_enc') data_clas = TextClasDataBunch.from_df(path=path, train_df=df_train, valid_df=df_valid, test_df=df_test, tokenizer=tokenizer, vocab=hindi_vocab) data_clas.show_batch() learn = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.5) learn.load_encoder('fine_tuned_enc') learn.freeze() learn.lr_find() learn.recorder.plot() learn.loss_func.func mcc = MatthewsCorreff() learn.metrics = [mcc, accuracy] learn.fit_one_cycle(1, 1e-1, moms=(0.8,0.7)) learn.save('first-full') learn.load('first-full'); learn.freeze_to(-2) learn.fit_one_cycle(5, slice(1e-2/(2.6**4),1e-2), moms=(0.8,0.7)) learn.save('second-full') learn.load('second-full') learn.unfreeze() learn.fit_one_cycle(1, slice(1e-3/(2.6**4),1e-3), moms=(0.8,0.7)) learn.save('final') # + from sklearn.metrics import accuracy_score, matthews_corrcoef df_dict = {'query': list(df_test[1]), 'actual_label': list(df_test[0]), 'predicted_label': ['']*df_test.shape[0]} all_nodes = list(set(df_train[0])) for node in all_nodes: df_dict[node] = ['']*df_test.shape[0] i2c = {} for key, value in learn.data.c2i.items(): i2c[value] = key df_result = pd.DataFrame(df_dict) for index, row in df_result.iterrows(): pred = learn.predict(data_clas.test_ds[index]) for node in all_nodes: row[node] = pred[2][learn.data.c2i[node]].item() row['predicted_label'] = i2c[pred[0].data.item()] df_result.head() # - accuracy_score(df_result['actual_label'], df_result['predicted_label']) matthews_corrcoef(df_result['actual_label'], df_result['predicted_label']) df_result.to_csv('iitp+movie_result.csv', index=False)
classification-benchmarks/Hindi_Classification_Model_IITP+Movie.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <center> Matplotlib <center> # <img src = 'https://github.com/saeed-saffari/alzahra-workshop-spr2021/blob/main/lecture/PIC/Matplotlib.png?raw=true' # width = "650" # > # <b> You can find a lot of example [HERE](https://matplotlib.org/gallery/index.html). # ## Installation # # - Conda install matplotlib # - pip install matplotlib # - pip install --upgrade matplotlib # ## Import import numpy as np import matplotlib.pyplot as plt # ## Specification # %matplotlib inline # + x = np.linspace(-10, 10, 50) y = x ** 2 plt.figure(figsize=(15, 8)) plt.plot(x, y, c = 'r', ls = '-', lw = 3, marker = 'o', markersize = 13, markeredgecolor = 'b', markerfacecolor = 'm', label = '$y = x^2$') plt.legend(fontsize = 15, loc = 'upper center') plt.grid(c = 'k', ls = '--', alpha = 0.5) plt.xlabel('X range', fontsize = 15, fontweight = 2, fontname = 'Times New Roman') plt.ylabel('Y range', fontsize = 15, fontweight = 2, fontname = 'Times New Roman') plt.title('Function $Y=x^2$\nin range (-10, 10)', fontsize = 20, fontweight = 2, fontname = 'Times New Roman', c = 'darkblue') #plt.text(-5, 60, 'First text\nin figure!', fontsize = 15, fontweight = 2, fontname = 'Times New Roman') #plt.ylim(top = 80, bottom = 20) #plt.ylim((20,80)) #plt.xlim(left = 0, right = 8) #plt.xlim((0,8)) plt.axhline(40, c = 'k', ls = '--', lw = 2) plt.axvline(0, c = 'g',ls = '--', lw = 2) #plt.xticket() #plt.savefig('first_plot.jpg', dpi = 700) plt.show() # + x = np.linspace(-10, 10, 50) y = x ** 2 plt.figure(figsize = (15,8)) plt.plot(x, 10*y, c = 'r', ls = '-', lw = 3, label = '$y = x^2$') plt.plot(x, x**3, c = 'b', ls = '--', lw = 2, label = '$y = x^3$') plt.plot(x, 200 * np.sin(x), c = 'g', ls = '-.', lw = 2, label = '$y =sin(x)$') plt.grid() plt.legend(fontsize = 15) plt.show() # + plt.figure(figsize= (15,8)) plt.subplot(2,3,1) plt.plot(x, x**2) plt.grid() plt.subplot(2,3,2) plt.plot(x, x**3) plt.subplot(2,3,3) plt.plot(x, np.sin(x)) plt.subplot(2,3,5) plt.plot(x, np.tan(x)) plt.show() # + fig, ax = plt.subplots(figsize = (15,8)) ax.plot(x, x**2, label = 'one') ax.plot(x, 50 * np.sin(x), label = 'two') ax.grid() ax.legend() ax.set_xlabel('x range') ax.set_ylabel('y range') ax.set_title('compare') plt.show() # + fig, (ax1, ax2) = plt.subplots(2,1, figsize = (15,8), sharex= True) ax1.plot(x, x**2, label = 'one') ax2.plot(x, 50 * np.sin(x), label = 'two') ax1.grid() ax2.grid() ax2.set_xlabel('x range') ax2.set_ylabel('y raange') ax1.set_ylabel('y raange') ax1.set_title('compare') plt.show() # - # $$ y_1 = 20 + x $$ # $$ y_2 = 100 - 2 \times x $$ # + x = np.linspace(0, 100, 100) y_1 = 20 + x y_2 = 100 - 2 * x plt.figure(figsize= (15,8)) plt.plot(x, y_1, c = 'r', lw = 2, label= 'Supply') plt.plot(x, y_2, c = 'g', lw = 2, label = 'Demand') plt.plot(x, y_2 - 20, 'b', lw = 2, ls = '-.', label = 'Demand after taxation') plt.xlabel('quantity') plt.ylabel('price') plt.title('Demand and Supply plot') plt.xlim(left = 0, right = 110) plt.ylim(bottom = 0) plt.axhline(80/3+20, c = 'k', ls = '--', lw = 1.5) plt.axvline(80/3, c = 'k', ls = '--', lw = 1.5) # or use sympy package plt.legend(fontsize = 15) plt.grid() plt.fill_between(x, y_2, y_1, where = (y_2 > y_1) & (x>=20) ,color = 'darkorange', alpha = 0.7) plt.show() # - # ## Bar # + plt.figure(figsize= (15,8)) plt.bar([1,3,5,7], [5,8,2,9], label = 'one') plt.bar([2,4,6,8], [4,6,1,8], label = 'two') plt.legend(fontsize = 15, loc = 'upper left') #plt.grid() plt.show() # - # ## Scatter # + x = np.linspace(0,10,100) e = np.random.rand(100) * 100 y = x + e scale = np.random.rand(100) * 1000 color = np.random.random(100) plt.figure(figsize= (15,8)) plt.scatter(x, y, s = scale, c = color ) plt.colorbar().set_label('color bar', fontsize = 15) plt.grid() plt.show() # + z = np.arange(100) d = np.arange(100) * 2 m = np.arange(100) a = np.random.randn(100) b = z + a * 20 plt.figure(figsize= (15,8)) plt.scatter(z, b, c = m, s = d) plt.colorbar() plt.grid() plt.show() # - # ## Histogram # <img stc = https://wikimedia.org/api/rest_v1/media/math/render/svg/00cb9b2c9b866378626bcfa45c86a6de2f2b2e40 > # ![image.png](attachment:image.png) # + data = np.random.normal(0,1,100000) x = np.linspace(-4,4, 100) y = 1 / np.sqrt(2*np.pi) * np.exp(-x**2 /2) plt.figure(figsize= (15,8)) plt.plot(x, y, lw = 4, c = 'k') plt.hist(data, 100, density = 1) plt.show() # - # ## Style plt.style.available import numpy as np import matplotlib.pyplot as plt # + x = np.linspace(-10, 10, 50) y = x ** 2 #plt.style.use('seaborn') #plt.xkcd() # for return in defualt import matplotlib as mpl mpl.rcParams.update(mpl.rcParamsDefault) plt.figure(figsize=(15, 8)) plt.plot(x, y, c = 'r', ls = '-', lw = 3, marker = 'o', markersize = 13, markeredgecolor = 'b', markerfacecolor = 'm', label = '$y = x^2$') plt.legend(fontsize = 15, loc = 'upper center') plt.grid(c = 'k', ls = '--', alpha = 0.5) plt.xlabel('X range', fontsize = 15, fontweight = 2, fontname = 'Times New Roman') plt.ylabel('Y range', fontsize = 15, fontweight = 2, fontname = 'Times New Roman') plt.title('Function $Y=x^2$\nin range (-10, 10)', fontsize = 20, fontweight = 2, fontname = 'Times New Roman', c = 'darkblue') #plt.text(-5, 60, 'First text\nin figure!', fontsize = 15, fontweight = 2, fontname = 'Times New Roman') #plt.ylim(top = 80, bottom = 20) #plt.ylim((20,80)) #plt.xlim(left = 0, right = 8) #plt.xlim((0,8)) plt.axhline(40, c = 'k', ls = '--', lw = 2) plt.axvline(0, c = 'g',ls = '--', lw = 2) #plt.xticket() #plt.savefig('first_plot.jpg', dpi = 700) plt.show() # - # # See [seeborn](https://seaborn.pydata.org) library # <img src = "https://github.com/saeed-saffari/alzahra-workshop-spr2021/blob/main/lecture/PIC/Seaborn.png?raw=true" > # # <center> Exercise 3 <center> # ## Q4: data GDP_2017 = [4867, 3666, 2666, 2595, 336, 342, 195, 20, 445, 204, 312, 680, 834] Life_exp = [84, 81, 81, 82, 83, 83, 70, 64, 76, 82, 77, 83, 81] Freedom = [96, 95, 95, 90, 96, 51, 27, 24, 17, 84, 64, 18, 99]
Lecture/.ipynb_checkpoints/4. Matplotlib-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # [Bag of Words](https://scikit-learn.org/stable/modules/feature_extraction.html#the-bag-of-words-representation) # ### Import libraries # + import pandas as pd from sklearn.feature_extraction.text import CountVectorizer # - # # [Count Vectorizer](https://scikit-learn.org/stable/modules/feature_extraction.html#common-vectorizer-usage) # # - El `CountVectorizer` crea una matriz de dimensión:`(número_de_documentos, número_de_tokens)` # - Cada entrada de esta matriz es la frecuencia o el número de apariciones de cada palabra en el documento. # - A cada término encontrado por el analizador durante el ajuste/entrenamiento/fit se le asigna un índice entero único correspondiente a una columna en la matriz resultante. # - Su entrada es una lista de strings. Cada string es un documento. # - **Las palabras que no se vieron en el corpus de entrenamiento se ignorarán por completo en futuras llamadas al método de transformación** vectorizer = CountVectorizer() corpus = [ 'This is the first document.', 'This is the second second document.', 'And the third one.', 'Is this the first document?', ] X = vectorizer.fit_transform(corpus) X # ### Obtener la representación matricial del CountVectorizer matrix_representation = X.toarray() matrix_representation # ### Obtener el nombre de las features (tokens) rows = [] for i in range(matrix_representation.shape[0]): rows.append("doc_{}".format(i)) cols = vectorizer.get_feature_names() print("Rows: {}".format(rows)) print("Cols: {}".format(cols)) pd.DataFrame(matrix_representation, columns = cols, index = rows) # # ¿Qué pasa si tenemos un documento con nuevas palabras que no aparecieron en training? # # El `CountVectorizer` ignora las palabras que no forman parte del vocabulario (creadas a partir de los datos de entrenamiento). new_doc = ["these words did not appear before"] X_test = vectorizer.transform(new_doc) X_test.toarray() # # [TF-IDF](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html#sklearn-feature-extraction-text-tfidfvectorizer) from sklearn.feature_extraction.text import TfidfVectorizer tfidf_vectorizer = TfidfVectorizer() corpus = [ 'This is the first document.', 'This is the second second document.', 'And the third one.', 'Is this the first document?', ] X = tfidf_vectorizer.fit_transform(corpus) X # ### Obtener la representación matricial del TfidfVectorizer matrix_representation = X.toarray() matrix_representation # ### Obtener el nombre de las features (tokens) rows = [] for i in range(matrix_representation.shape[0]): rows.append("doc_{}".format(i)) cols = vectorizer.get_feature_names() print("Rows: {}".format(rows)) print("Cols: {}".format(cols)) pd.DataFrame(matrix_representation, columns = cols, index = rows)
ejemplos_de_clase/NLP preprocessing/bag_of_words.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="ISubpr_SSsiM" # ##### Copyright 2019 The TensorFlow Authors. # # + colab={} colab_type="code" id="3jTMb1dySr3V" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="6DWfyNThSziV" # # tf.function # # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/alpha/tutorials/eager/tf_function"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/eager/tf_function.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/eager/tf_function.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # </table> # + [markdown] colab_type="text" id="J122XQYG7W6w" # # In TensorFlow 2.0 eager execution is turned on by default. This gets you a very # intuitive and flexible user interface (running one-off operations is much easier # and faster) but this can come at the expense of performance and deployability. # # To get peak performance and to make your model deployable anywhere, we provide # `tf.function` as the tool you can use to make graphs out of your programs. # + colab={} colab_type="code" id="otIdN1TS8N7S" from __future__ import absolute_import, division, print_function # !pip install tf-nightly-2.0-preview import tensorflow as tf # + colab={} colab_type="code" id="SbtT1-Wm70F2" # A function is like an op @tf.function def add(a, b): return a + b add(tf.ones([2, 2]), tf.ones([2, 2])) # [[2., 2.], [2., 2.]] # + [markdown] colab_type="text" id="bfFQfPGy73oe" # A `tf.function` you define is just like a core TensorFlow operation: you can execute it eagerly, you can use it in a graph, it has gradients, etc. # + colab={} colab_type="code" id="uP-zUelB8DbX" # Functions have gradients @tf.function def add(a, b): return a + b v = tf.Variable(1.0) with tf.GradientTape() as tape: result = add(v, 1.0) tape.gradient(result, v) # + colab={} colab_type="code" id="l5qRjdbBVdU6" # You can use functions inside functions @tf.function def dense_layer(x, w, b): return add(tf.matmul(x, w), b) dense_layer(tf.ones([3, 2]), tf.ones([2, 2]), tf.ones([2])) # + [markdown] colab_type="text" id="uZ4Do2AV80cO" # # Polymorphism # # `tf.function` tries to be as generic as a Python function. You can call Python functions with all sorts of signatures, and Python will usually do something reasonable. `tf.function` does this type of polymorphism for you even though the underlying TensorFlow graphs it generates are specific to the particular types in its signature. # # You can call a function with arguments of different types to see what is happening. # + colab={} colab_type="code" id="kojmJrgq8U9v" # Functions are polymorphic @tf.function def add(a): return a + a print("add 1", add(1)) print("add 1.1", add(1.1)) print("add string tensor", add(tf.constant("a"))) c = add.get_concrete_function(tf.TensorSpec(shape=None, dtype=tf.string)) c(a=tf.constant("a")) # aa # + colab={} colab_type="code" id="FRp7IjWFWECa" # Functions can be faster than eager code, for graphs with many small ops import timeit conv_layer = tf.keras.layers.Conv2D(100, 3) @tf.function def conv_fn(image): return conv_layer(image) image = tf.zeros([1, 200, 200, 100]) # warm up conv_layer(image); conv_fn(image) print("Eager conv:", timeit.timeit(lambda: conv_layer(image), number=10)) print("Function conv:", timeit.timeit(lambda: conv_fn(image), number=10)) print("Note how there's not much difference in performance for convolutions") lstm_cell = tf.keras.layers.LSTMCell(10) @tf.function def lstm_fn(input, state): return lstm_cell(input, state) input = tf.zeros([10, 10]) state = [tf.zeros([10, 10])] * 2 # warm up lstm_cell(input, state); lstm_fn(input, state) print("eager lstm:", timeit.timeit(lambda: lstm_cell(input, state), number=10)) print("function lstm:", timeit.timeit(lambda: lstm_fn(input, state), number=10)) # + [markdown] colab_type="text" id="tRdlnCfV_UTn" # ## State in `tf.function` # # A very appealing property of functions as the programming model, over a general dataflow graph, is that functions can give the runtime more information about what was the intended behavior of the code. # # For example, when writing code which has multiple reads and writes to the same variables, a dataflow graph might not naturally encode the originally intended order of operations. In `tf.function`, however, because we're converting code which was traced from Python, we know the intended execution order. # # This means there's no need to add manual control dependencies; `tf.function` is smart enough to add the minimal set of necessary and sufficient control dependencies for your code to run correctly. # + colab={} colab_type="code" id="SASm0ss8erVX" # Automatic control dependencies a = tf.Variable(1.0) b = tf.Variable(2.0) @tf.function def f(x, y): a.assign(y * b) b.assign_add(x * a) return a + b f(1.0, 2.0) # 10.0 # + [markdown] colab_type="text" id="lPr_6mK_AQWL" # ## Variables # # We can use the same idea of leveraging the intended execution order of the code to make variable creation and utilization very easy in `tf.function`. There is one very important caveat, though, which is that with variables it's possible to write code which behaves different when called eagerly multiple times and when its output tensor is evaluated multiple times. # # Here is a simple example: # ```python # @tf.function # def f(x): # v = tf.Variable(1.0) # v.assign_add(x) # return v # # f(1.) # Note: BROKEN, will throw exception # ``` # # If you run this with eager execution, you'll always get "2" as the answer; but if you repeatedly evaluate the Tensor obtained from `f(1.)` in a graph context you'll get increasing numbers. # # So `tf.function` does not allow you to write code like that. # + colab={} colab_type="code" id="DKzNjVg8h4ao" # Non-ambiguous code is ok though v = tf.Variable(1.0) @tf.function def f(x): return v.assign_add(x) f(1.0) # 2.0 f(2.0) # 4.0 # + colab={} colab_type="code" id="HQrG5_kOiKl_" # You can also create variables inside a tf.function as long as we can prove # that those variables are created only the first time the function is executed. class C: pass obj = C(); obj.v = None @tf.function def g(x): if obj.v is None: obj.v = tf.Variable(1.0) return obj.v.assign_add(x) g(1.0) # 2.0 g(2.0) # 4.0 # + colab={} colab_type="code" id="_IOVc1eujMH2" # Variable initializers can depend on function arguments and on values of other # variables. We can figure out the right initialization order using the same # method we use to generate control dependencies. state = [] @tf.function def fn(x): if not state: state.append(tf.Variable(2.0 * x)) state.append(tf.Variable(state[0] * 3.0)) return state[0] * x * state[1] fn(tf.constant(1.0)) fn(tf.constant(3.0)) # + [markdown] colab_type="text" id="5f05Vr_YBUCz" # ## Control flow and autograph # # While `tf.cond` and `tf.while_loop` continue to work with `tf.function`, we provide a better alternative based on lightweight compilation of your Python code. # # The [autograph](https://www.tensorflow.org/guide/autograph) library is fully integrated with `tf.function`, and it will rewrite conditionals and loops which depend on Tensors to run dynamically in the graph. # + colab={} colab_type="code" id="yCQTtTPTW3WF" # Simple loop @tf.function def f(x): while tf.reduce_sum(x) > 1: tf.print(x) x = tf.tanh(x) return x f(tf.random.uniform([10])) # + colab={} colab_type="code" id="jlQD1ffRXJhl" # If you're curious you can inspect the code autograph generates. # It feels like reading assembly language, though. def f(x): while tf.reduce_sum(x) > 1: tf.print(x) x = tf.tanh(x) return x print(tf.autograph.to_code(f)) # + [markdown] colab_type="text" id="CNqGBVJXCVKU" # To control autograph, remember that it only affects the basic control flow constructs in Python (if, for, while, break, etc) and that it only changes them if the predicates are Tensors. # # So in the following example the first loop is statically unrolled while the second loop is dynamically converted: # # ```python # @tf.function # def f(x): # for i in range(10): # Static python loop, we'll not convert it # do_stuff() # for i in tf.range(10): # depends on a tensor, we'll convert it # ``` # # Similarly, to guarantee that prints and asserts happen dynamically, use `tf.print` and `tf.assert`: # + colab={} colab_type="code" id="lDpuZLL2emjP" @tf.function def f(x): for i in tf.range(10): tf.print(i) tf.Assert(i < 10, ["a"]) x += x return x f(10) # + [markdown] colab_type="text" id="hyksHW9TCukR" # Finally, autograph cannot compile arbitrary Python code into TensorFlow graphs. Specifically, the data structures which you use dynamically still need to be TensorFlow data structures. # # So, for example, the best way to accumulate data in a loop is still to use `tf.TensorArray`: # + colab={} colab_type="code" id="HJ3Vb3dXfefN" @tf.function def f(x): ta = tf.TensorArray(tf.float32, size=10) for i in tf.range(10): x += x ta = ta.write(i, x) return ta.stack() f(10.0) # + [markdown] colab_type="text" id="gVO09og4C_b8" # ## Next steps # # Now revisit the earlier notebooks and try using `tf.function` to speed up your code!
site/en/r2/tutorials/eager/tf_function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import json import os import glob import pprint from tqdm import tqdm from collections import Counter import pandas as pd import matplotlib.pyplot as plt # - df = pd.read_csv("datasets/verified.dat", names=["uid", "name", "friends", "friends_count", "followers_count", "listed_count", "statuses_count", "created_at", "url", "avatar", "location"]) # df = df[df['friends_count'] <= 1000] df.info() df.head() # + # df.info() # - # ### Dict structure # uid:{ # # uid: 123, # name: '', # friends_count: 0, # followers_count: 0, # listed_count: 0, # statuses_count: 0, # pff: 0, # pfr: 0, # gcf: 0, # gcr: 0, # description: '', # tweets: [] # # } ids = list(df['uid']) # + users = {} # for i in ids: # users[i] = { # 'uid': i, # 'pff': 0, # 'pfr': 0, # 'gcf': 0, # 'gcr': 0, # 'description': '', # 'tweets': [] # } # + # len(users) # + # users = users # + files_users = glob.iglob(f"../fakenewsnet_dataset/user_profiles/*.json") count = 0 for file in files_users: if count%1000 == 0: print(count) # if count == 1: # break with open(file, encoding='utf-8', mode='r') as currentFile: data = json.load(currentFile) # print(data) if data['verified'] == False: users[data['id']] = { 'uid': data['id'], 'name': data['screen_name'], 'friends': 'friends', 'friends_count': data['friends_count'], 'followers_count': data['followers_count'], 'listed_count': data['listed_count'], 'statuses_count': data['statuses_count'], 'created_at': data['created_at'], 'url': data['url'], 'avatar': data['profile_image_url_https'], 'location': data['location'], 'pff': 0, 'pfr': 0, 'gcf': 0, 'gcr': 0, 'description': data['description'], 'tweets': [] } count+=1 # - len(users) # + list_of_lists = [] for user_id in users: name = users[user_id]['name'] friends_count = users[user_id]['friends_count'] followers_count = users[user_id]['followers_count'] listed_count = users[user_id]['listed_count'] statuses_count = users[user_id]['statuses_count'] friends = users[user_id]['friends'] created_at = users[user_id]['created_at'] location = users[user_id]['location'] avatar = users[user_id]['avatar'] url = users[user_id]['url'] list_of_lists.append([user_id, name, friends, friends_count, followers_count, listed_count, statuses_count, created_at, url, avatar, location]) # - df_users = pd.DataFrame(list_of_lists, columns=["uid", "name", "friends", "friends_count", "followers_count", "listed_count", "statuses_count", "created_at", "url", "avatar", "location"]) df_users.info() df_users.sort_values(['followers_count', 'friends_count'], ascending=False).head(10) # + # df_users.to_csv('datasets/unverified_users_list.csv', index=False) # - # ## PolitiFact # ### Fake Tweets & Retweets # + files = glob.iglob(f"../fakenewsnet_dataset/politifact/fake/*/tweets/*.json") count = 0 #143000 for file in files: if count%1000 == 0: print(count) # if count == 240000: # break with open(file, encoding='utf-8', mode='r') as currentFile: data = json.load(currentFile) tweet = data['text'] des = data['user']['description'] user_id = data['user']['id'] use = data['user'] if not use['verified']: if user_id in users: users[user_id]['pff']+=1 users[user_id]['description'] = des users[user_id]['tweets'].append(tweet) else: users[user_id] = { 'uid': use['id'], 'name': use['screen_name'], 'friends': 'friends', 'friends_count': use['friends_count'], 'followers_count': use['followers_count'], 'listed_count': use['listed_count'], 'statuses_count': use['statuses_count'], 'created_at': use['created_at'], 'url': use['url'], 'avatar': use['profile_image_url_https'], 'location': use['location'], 'pff': 1, 'pfr': 0, 'gcf': 0, 'gcr': 0, 'description': use['description'], 'tweets': [tweet] } count +=1 # + files_rt = glob.iglob(f"../fakenewsnet_dataset/politifact/fake/*/retweets/*.json") count = 0 # 27000 for file in files_rt: if count%1000 == 0: print(count) # if count == 240000: # break with open(file, encoding='utf-8', mode='r') as currentFile: data = json.load(currentFile) for d in data['retweets']: tweet = d['text'] des = d['user']['description'] user_id = d['user']['id'] use = d['user'] if not use['verified']: if user_id in users: users[user_id]['pff']+=1 users[user_id]['description'] = des users[user_id]['tweets'].append(tweet) else: users[user_id] = { 'uid': use['id'], 'name': use['screen_name'], 'friends': 'friends', 'friends_count': use['friends_count'], 'followers_count': use['followers_count'], 'listed_count': use['listed_count'], 'statuses_count': use['statuses_count'], 'created_at': use['created_at'], 'url': use['url'], 'avatar': use['profile_image_url_https'], 'location': use['location'], 'pff': 1, 'pfr': 0, 'gcf': 0, 'gcr': 0, 'description': use['description'], 'tweets': [tweet] } count +=1 # + # len(users) # - # ### Real Tweets & Retweets # + files = glob.iglob(f"../fakenewsnet_dataset/politifact/real/*/tweets/*.json") #362000 count = 0 for file in files: if count%1000 == 0: print(count) count +=1 # if count < 240001: # continue # if count == 500000: # break with open(file, encoding='utf-8', mode='r') as currentFile: data = json.load(currentFile) tweet = data['text'] des = data['user']['description'] user_id = data['user']['id'] use = data['user'] if not use['verified']: if user_id in users: users[user_id]['pfr']+=1 users[user_id]['description'] = des users[user_id]['tweets'].append(tweet) else: users[user_id] = { 'uid': use['id'], 'name': use['screen_name'], 'friends': 'friends', 'friends_count': use['friends_count'], 'followers_count': use['followers_count'], 'listed_count': use['listed_count'], 'statuses_count': use['statuses_count'], 'created_at': use['created_at'], 'url': use['url'], 'avatar': use['profile_image_url_https'], 'location': use['location'], 'pff': 0, 'pfr': 1, 'gcf': 0, 'gcr': 0, 'description': use['description'], 'tweets': [tweet] } # userList.append(data['user']) # + files_rt = glob.iglob(f"../fakenewsnet_dataset/politifact/real/*/retweets/*.json") count = 0 for file in files_rt: if count%1000 == 0: print(count) # if count == 240000: # break with open(file, encoding='utf-8', mode='r') as currentFile: data = json.load(currentFile) for d in data['retweets']: tweet = d['text'] des = d['user']['description'] user_id = d['user']['id'] use = d['user'] if not use['verified']: if user_id in users: users[user_id]['pfr']+=1 users[user_id]['description'] = des users[user_id]['tweets'].append(tweet) else: users[user_id] = { 'uid': use['id'], 'name': use['screen_name'], 'friends': 'friends', 'friends_count': use['friends_count'], 'followers_count': use['followers_count'], 'listed_count': use['listed_count'], 'statuses_count': use['statuses_count'], 'created_at': use['created_at'], 'url': use['url'], 'avatar': use['profile_image_url_https'], 'location': use['location'], 'pff': 0, 'pfr': 1, 'gcf': 0, 'gcr': 0, 'description': use['description'], 'tweets': [tweet] } count +=1 # - # ## GossipCop # ### Fake Tweets & Retweets # + files = glob.iglob(f"../fakenewsnet_dataset/gossipcop/fake/*/tweets/*.json") count = 0 #524000 for file in files: if count%1000 == 0: print(count) count +=1 # if count < 240001: # continue # if count == 500000: # break with open(file, encoding='utf-8', mode='r') as currentFile: data = json.load(currentFile) tweet = data['text'] des = data['user']['description'] user_id = data['user']['id'] use = data['user'] if not use['verified']: if user_id in users: users[user_id]['gcf']+=1 users[user_id]['description'] = des users[user_id]['tweets'].append(tweet) else: users[user_id] = { 'uid': use['id'], 'name': use['screen_name'], 'friends': 'friends', 'friends_count': use['friends_count'], 'followers_count': use['followers_count'], 'listed_count': use['listed_count'], 'statuses_count': use['statuses_count'], 'created_at': use['created_at'], 'url': use['url'], 'avatar': use['profile_image_url_https'], 'location': use['location'], 'pff': 0, 'pfr': 0, 'gcf': 1, 'gcr': 0, 'description': use['description'], 'tweets': [tweet] } # userList.append(data['user']) # + files_rt = glob.iglob(f"../fakenewsnet_dataset/gossipcop/fake/*/retweets/*.json") count = 0 # 57000 for file in files_rt: if count%1000 == 0: print(count) # if count == 240000: # break with open(file, encoding='utf-8', mode='r') as currentFile: data = json.load(currentFile) for d in data['retweets']: tweet = d['text'] des = d['user']['description'] user_id = d['user']['id'] use = d['user'] if not use['verified']: if user_id in users: users[user_id]['gcf']+=1 users[user_id]['description'] = des users[user_id]['tweets'].append(tweet) else: users[user_id] = { 'uid': use['id'], 'name': use['screen_name'], 'friends': 'friends', 'friends_count': use['friends_count'], 'followers_count': use['followers_count'], 'listed_count': use['listed_count'], 'statuses_count': use['statuses_count'], 'created_at': use['created_at'], 'url': use['url'], 'avatar': use['profile_image_url_https'], 'location': use['location'], 'pff': 0, 'pfr': 0, 'gcf': 1, 'gcr': 0, 'description': use['description'], 'tweets': [tweet] } count +=1 # + # users # - # ### Real Tweets & Retweets # + files = glob.iglob(f"../fakenewsnet_dataset/gossipcop/real/*/tweets/*.json") # 815000 count = 0 for file in files: if count%1000 == 0: print(count) count +=1 # if count < 240001: # continue if count == 1000000: break with open(file, encoding='utf-8', mode='r') as currentFile: data = json.load(currentFile) tweet = data['text'] des = data['user']['description'] user_id = data['user']['id'] use = data['user'] if not use['verified']: if user_id in users: users[user_id]['gcr']+=1 users[user_id]['description'] = des users[user_id]['tweets'].append(tweet) else: users[user_id] = { 'uid': use['id'], 'name': use['screen_name'], 'friends': 'friends', 'friends_count': use['friends_count'], 'followers_count': use['followers_count'], 'listed_count': use['listed_count'], 'statuses_count': use['statuses_count'], 'created_at': use['created_at'], 'url': use['url'], 'avatar': use['profile_image_url_https'], 'location': use['location'], 'pff': 0, 'pfr': 0, 'gcf': 0, 'gcr': 1, 'description': use['description'], 'tweets': [tweet] } # userList.append(data['user']) # - # ## Create Dataset # + # users # + # df_tw = pd.DataFrame(columns=["uid", 'pff','pfr','gcf','gcr', 'description', 'tweets']) # + list_of_lists = [] for user_id in users: name = users[user_id]['name'] friends_count = users[user_id]['friends_count'] followers_count = users[user_id]['followers_count'] listed_count = users[user_id]['listed_count'] statuses_count = users[user_id]['statuses_count'] friends = users[user_id]['friends'] created_at = users[user_id]['created_at'] location = users[user_id]['location'] avatar = users[user_id]['avatar'] url = users[user_id]['url'] gcf = users[user_id]['gcf'] gcr = users[user_id]['gcr'] pff = users[user_id]['pff'] pfr = users[user_id]['pfr'] des = users[user_id]['description'] tweets = "^".join(users[user_id]['tweets']) list_of_lists.append([user_id, name, friends_count, followers_count, listed_count, statuses_count , pff, pfr, gcf, gcr, des, tweets, friends , created_at, url, avatar, location]) # - df_tw = pd.DataFrame(list_of_lists, columns=["uid", 'name','friends_count','followers_count' ,'listed_count', 'statuses_count', 'pff','pfr','gcf','gcr', 'description', 'tweets', 'friends', 'created_at', 'url', 'avatar', 'location']) df_tw.head() # + # df.drop(['friends', 'created_at', 'url', 'avatar', 'location'], axis=1, inplace=True) # + # df.info() # - df_tw.info() # df_feat = df.merge(df_tw, how='inner', on='uid') df_feat = df_tw df_feat.info() df_feat.head() # + df_feat['total_fake'] = df_feat['pff'] + df_feat['gcf'] df_feat['total_real'] = df_feat['pfr'] + df_feat['gcr'] df_feat['net_trust'] = df_feat['total_real'] - df_feat['total_fake'] df_feat['total_news'] = df_feat['total_real'] + df_feat['total_fake'] df_feat['fake_prob'] = df_feat['total_fake'] / df_feat['total_news'] df_feat['net_trust_norm'] = df_feat['net_trust']/df_feat['total_news'] # - df_feat['fake'] = [1 if x >= 0.5 else 0 if x < 0.5 else 2 for x in df_feat['fake_prob']] df_feat.info() df_feat.to_csv('datasets/unverified_features_500k.csv', index=False) # + # df_feat = pd.read_csv("datasets/verified_features_300k.csv") # - df_final = df_feat[ (df_feat['name'] != 'GossipCop') & (df_feat['name'] != 'PolitiFact') # & (df_feat['net_trust_norm'] != 0) & (df_feat['friends_count'] <= 10000) & (df_feat['followers_count'] >= 1000) & (df_feat['total_fake'] > 2) & (df_feat['total_real'] > 2) ].sort_values(['followers_count', 'total_news', 'net_trust_norm', 'net_trust', 'followers_count'], ascending=False) df_final df_final['fake'] = [1 if x >= 0.5 else 0 if x < 0.5 else 2 for x in df_final['fake_prob']] df_final.to_csv('datasets/unverified_features_4k.csv', index=False) # + # df_feat.hist(column='fake_prob', bins=20) df_final.hist(column='fake_prob', bins=20) df_final.hist(column='friends_count', bins=20) df_final.hist(column='followers_count', bins=20) plt.show() # - df_final["fake"].value_counts().plot.bar() plt.show() df_final["fake"].value_counts() # un = pd.read_csv("datasets/unverified.dat", # names=["uid", "name", "friends", "friends_count", # "followers_count", "listed_count", "statuses_count", # "created_at", "url", "avatar", "location"]) df = pd.read_csv("datasets/unverified_features_4k.csv") un = df[["uid", "name", "friends", "friends_count", "followers_count", "listed_count", "statuses_count", "created_at", "url", "avatar", "location"]] un.info() un = un[un['friends_count'].astype(str).str.isdigit()] un.to_csv('datasets/unverified.dat', header=False, index=False) # + # with open('datasets/unverified.gml', 'r') as file: # data = file.read() # print(data) # -
unverified_user_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd from tqdm import tqdm_notebook prefix = 'data/' # - train_df = pd.read_csv(prefix + 'train.txt', sep='\t', header=None) train_df = train_df[train_df.columns[1:3]] train_df.head() test_df = pd.read_csv(prefix + 'test.txt', sep='\t', header=None) test_df = test_df[test_df.columns[1:3]] test_df.head() # + train_df = pd.DataFrame({ 'id':range(len(train_df)), 'label':train_df[1], 'alpha':['a']*train_df.shape[0], 'text': train_df[2].replace(r'\n', ' ', regex=True) }) train_df.head() # + dev_df = pd.DataFrame({ 'id':range(len(test_df)), 'label':test_df[1], 'alpha':['a']*test_df.shape[0], 'text': test_df[2].replace(r'\n', ' ', regex=True) }) dev_df.head() # - train_df.to_csv('data/train.tsv', sep='\t', index=False, header=False, columns=train_df.columns) dev_df.to_csv('data/dev.tsv', sep='\t', index=False, header=False, columns=dev_df.columns)
data_prep_sarcasm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## algorithm # + ops = { '+': float.__add__, '-': float.__sub__, '*': float.__mul__, '/': float.__truediv__, '^': float.__pow__, } def postfix(expression): stack = [] for x in expression.split(): if x in ops: x = ops[x](stack.pop(-2), stack.pop(-1)) else: x = float(x) stack.append(x) return stack.pop() # - # ## run postfix('1 2 + 4 3 - + 10 5 / *') postfix('1 2 * 6 2 / + 9 7 - ^') postfix('1 2 3 4 5 + + + +')
100days/day 06 - postfix notation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/ufrpe-ensino/ic-aulas/blob/master/aulas/05_ListasLoops.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="aXIEOy6TLbXs" # # Listas: Funções # ## Soma # Podemos obter a soma dos elementos de uma lista de números chamando a função sum() # + colab={} colab_type="code" id="wXCknkpcLbXt" temperaturas = [35, 37.8, 40, 39.8] sum(temperaturas) # - # ## len # Podemos obter a quantidade de elementos em uma lista com a função `len`: temperaturas = [35, 37.8, 40, 39.8] len(temperaturas) # + [markdown] colab_type="text" id="GpUwrfLoLbXw" # ## min e max da lista # # - max(): O método max retorna o elemento da lista com o valor máximo. # - min(): O método min retorna o elemento da lista com o valor mínimo. # + colab={} colab_type="code" id="xj21X0LILbXx" temperaturas = [35, 37.8, 40, 39.8] min(temperaturas) # + colab={} colab_type="code" id="rmRir4sALbX0" max(temperaturas) # + [markdown] colab_type="text" id="_U4JW7nhLbX3" # ## range # A função range(n) gera uma lista de números inteiros de 0 a n-1. Para obter a lista propriamente dita, usar a função list() também # + colab={} colab_type="code" id="uUbZ3IHoLbX3" list(range(0,31,3)) # + # help(range) # + [markdown] colab_type="text" id="t4MOEdV5LbX6" # # Laço for (loops) # Laços (*loops*) são uma das estruturas fundamentais na programação. Os loops permitem que você faça uma iteração sobre cada elemento em uma sequência, um de cada vez, e faça algo com esses elementos. # # *Sintaxe do loop*: os loops têm uma sintaxe muito particular no Python; Essa sintaxe é um dos recursos mais notáveis para os recém-chegados ao Python: # ``` # for *elemento* in *sequencia*: # NOTE os dois pontos no final #     <algum código que usa *elemento*> # o bloco de código que está em loop para cada elemento #     <mais código que usa o *elemento*> # é recuado com TAB # # <o código depois que o loop continua> # o final do loop é marcado simplesmente por código não-indentado # ``` # # Assim, o recuo (tab) é MUITO importante para o código. Isso foi feito porque uma boa prática de codificação (em quase todas as linguagens, C, FORTRAN, MATLAB) geralmente identa loops, funções, etc. Ter um recuo significativo economiza a sintaxe de loop para um código mais compacto. # # A maioria dos editores de texto de programação será inteligente sobre recuo e também converterá TABs em quatro espaços. Os notebooks Jupyter são inteligentes quanto a recuo e farão a coisa certa, ou seja, identificar automaticamente uma linha abaixo de uma linha com dois pontos, e converter TABs em espaços. Se você estiver em outro editor, lembre-se: TABS E ESPAÇOS NÃO MISTURAM. # # ### Exemplo 1 # # Percorrer uma lista de temperaturas e mostrar o valor associado a um texto descritivo: # + temperaturas = [35, 37.8, 40, 39.8] temp_media = sum(temperaturas) / len(temperaturas) print('temperatura média: ', temp_media) for temp in temperaturas: print('Temperatura:', temp, '\tDiferença:', temp - temp_media) # - # ### Exemplo 2 # Calcular a temperatura média e mostrar se cada valor está acima ou abaixo da média # + temperaturas = [35, 37.8, 40, 39.8] temp_media = sum(temperaturas) / len(temperaturas) print(20*'-') print('Temperatura média: ', temp_media) print(20*'-') for temp in temperaturas: if temp > temp_media: print('Temperatura:', temp, '\tACIMA DA MÉDIA!') else: print('Temperatura:', temp, '\tABAIXO DA MÉDIA!') # - # ### Exemplo 3 # Gerar a tabuada de 2 # + colab={} colab_type="code" id="QF3DHX9sLbX7" print('+-----------------+') print('| TABUADA DE 2 |') print('+-----------------+') for i in range(11): print('| 2 x %2s = %2s' % (i, 2*i), ' |') print('+-----------------+') # + [markdown] colab_type="text" id="TkhRyv1ALbX9" # ## Exercício 1 # # Modifique o exemplo anterior (tabuada) para gerar as tabuadas de 1 a 10 # + colab={} colab_type="code" id="mvzKDKPELbX9" # + [markdown] colab_type="text" id="MlJ21GyKL1ul" # ## Exercício 2 # Sejam as listas abaixo referentes aos nomes de alunos e suas respectivas notas do 1º, 2º e 3º VA's. Crie um programa que calcule a média de cada aluno, e imprima o seu nome, a média calculada e o status, 'APROVADO' caso a média seja maior ou igual a 7,0 (sete) e 'REPROVADO' caso contrário. # + colab={} colab_type="code" id="xQROS9M7LbYA" notas_alunos = [['<NAME>', 4.5, 8, 9.5], ['<NAME>', 6, 7.5, 4], ['<NAME>', 7.5, 5.8, 9.5], ['<NAME>', 4.5, 8.7, 6], ['<NAME>', 10, 7.8, 9.5], ['<NAME>', 9.8, 8, 9.5]] # sua resposta aqui # + [markdown] colab_type="text" id="qjby663XN_lx" # ## Exercício 3 # Seja a função matemática definida abaixo: # $$f(n) = n^2+1$$ # Escreva um programa em python, que calcule o valor de f(n) para todos os números no intervalo `[0,100]`, e armazene todos os valores calculados em uma lista # + colab={} colab_type="code" id="WzpIjng3NliK" # sua resposta aqui # + [markdown] colab_type="text" id="pzLlPr0FPiee" # ## Exercício 4 # Escreva um programa em python que imprima na tela todos os números ímpares de 0 a 1000. # + colab={} colab_type="code" id="QSa3Q-JbN96m" # sua resposta aqui # -
aulas/05_ListasLoops.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Custom Layout # Use the `Layout` class to create a variety of map views for comparison. # # For more information, run `help(Layout)`. # # This example uses two different custom layouts. The first with a vertical orientation (2x2) and the second, horizontal (1x4). # + from cartoframes.auth import set_default_credentials set_default_credentials('cartoframes') # + from cartoframes.viz import Map, Layer, Layout Layout([ Map(Layer('drought_wk_1')), Map(Layer('drought_wk_2')), Map(Layer('drought_wk_3')), Map(Layer('drought_wk_4')) ], 2, 2) # - Layout([ Map(Layer('drought_wk_1')), Map(Layer('drought_wk_2')), Map(Layer('drought_wk_3')), Map(Layer('drought_wk_4')) ], 1, 4)
docs/examples/data_visualization/grid_layout/custom_layout.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Keras 基础 # ## 数据表示 import numpy as np # ### 标量(0D张量) x = np.array(12) print(x) print(x.ndim) # ### 向量(1D张量) x = np.array([12, 3, 6, 14, 7]) print(x) print(x.ndim) # ### 矩阵(2D张量) x = np.array([[5, 78, 2, 34, 0], [6, 79, 3, 35, 1], [7, 80, 4, 36, 2]]) print(x) print(x.ndim) # ### 3D 张量与更高维张量 x = np.array([[[5, 78, 2, 34, 0], [6, 79, 3, 35, 1], [7, 80, 4, 36, 2]], [[5, 78, 2, 34, 0], [6, 79, 3, 35, 1], [7, 80, 4, 36, 2]], [[5, 78, 2, 34, 0], [6, 79, 3, 35, 1], [7, 80, 4, 36, 2]]]) print(x) print(x.ndim) # ### 关键属性 from tensorflow.keras.datasets import mnist (train_images, train_labels), (test_images, test_labels) = mnist.load_data() # 轴的个数(阶) print(train_images.ndim) # 形状 print(train_images.shape) # 数据类型 print(train_images.dtype) # + digit = train_images[4] import matplotlib.pyplot as plt # %matplotlib inline plt.imshow(digit, cmap=plt.cm.binary) plt.show() # - # ## 在 Numpy 中操作张量 # ### 张量切片 my_slice = train_images[10:100] print(my_slice.shape) # 等同于以下操作 my_slice = train_images[10:100, :, :] print(my_slice.shape) my_slice = train_images[10:100, 0:28, 0:28] print(my_slice.shape) # + # 其他轴切片 my_slice = train_images[:, 14:, 14:] print(my_slice.shape) digit = my_slice[4] # %matplotlib inline plt.imshow(digit, cmap=plt.cm.binary) plt.show() # + # 其他轴切片 my_slice = train_images[:, 7:-7, 7:-7] print(my_slice.shape) digit = my_slice[4] # %matplotlib inline plt.imshow(digit, cmap=plt.cm.binary) plt.show() # - # ## 张量运算 # ### 逐元素运算 def naive_relu(x): assert len(x.shape) == 2 x = x.copy() # 避免覆盖输入张量 for i in range(x.shape[0]): for j in range(x.shape[1]): x[i, j] = max(x[i, j], 0) return x def naive_add(x, y): assert len(x.shape) == 2 assert x.shape == y.shape x = x.copy() for i in range(x.shape[0]): for j in range(x.shape[1]): x[i, j] += y[i, j] return x # + # 在 Numpy 中可以直接进行下列逐元素运算 import numpy as np x = np.array([1, 2, 3]) y = np.array([4, 5, 6]) z = x + y print(z) z = np.maximum(z, 0.) print(z) # - # ### 广播 def naive_add_matrix_and_vector(x, y): assert len(x.shape) == 2 assert len(y.shape) == 1 assert x.shape[1] == y.shape[0] x = x.copy() for i in range(x.shape[0]): for j in range(x.shape[1]): x[i, j] += y[j] return x # + import numpy as np x = np.random.random((5, 4, 3)) print(x.shape) y = np.random.random(3) print(y.shape) z = np.maximum(x, y) print(z.shape) # - x = np.ones((5, 4, 1)) print(x) y = np.ones(3) print(y) z = np.add(x, y) print(z) # ### 张量点积 def naive_vector_dot(x, y): assert len(x.shape) == 1 assert len(y.shape) == 1 assert x.shape[0] == y.shape[0] z = 0. for i in range(x.shape[0]): z += x[i] * y[i] return z def naive_matrix_vector_dot(x, y): assert len(x.shape) == 2 assert len(y.shape) == 1 z = np.zeros(x.shape[0]) for i in range(x.shape[0]): z[i] = naive_vector_dot(x[i, :], y) return z def naive_matrix_dot(x, y): assert len(x.shape) == 2 assert len(y.shape) == 2 assert x.shape[1] == y.shape[0] z = np.zeros((x.shape[0], y.shape[1])) for i in range(x.shape[0]): for j in range(y.shape[1]): row_x = x[i, :] column_y = y[:, j] z[i, j] = naive_vector_dot(row_x, column_y) return z # + import numpy as np x = np.random.random((5, 3)) print(x.shape) y = np.random.random((3, 4)) print(y.shape) z = np.dot(x, y) print(z.shape) # - # ### 张量变形 # + import numpy as np x = np.array([[0., 1.], [2., 3.], [4., 5.]]) print(x.shape) x = x.reshape((6, -1)) print(x.shape) # - x = np.transpose(x) print(x.shape)
basic_of_keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Code to Convert the Switchboard dataset into Convokit format import os os.chdir("../../") # import convokit from convokit import Corpus, User, Utterance os.chdir("datasets/switchboard-corpus") # then come back for swda from swda import Transcript import glob # #### Create Users # # Each caller is considered a user, and there are total of 440 different callers in this dataset. Each user is marked with a numerical id, and the metadata for each user includes the following information: # # - Gender (str): MALE or FEMALE # - Education (int): 0, 1, 2, 3, 9 # - Birth Year (int): YYYY # - Dialect Area (str): MIXED, NEW ENGLAND, NORTH MIDLAND, NORTHERN, NYC, SOUTH MIDLAND, SOUTHERN, UNK, WESTERN # + files = glob.glob("./swda/*/sw_*.utt.csv") # Switchboard utterance files user_meta = {} for file in files: trans = Transcript(file, './swda/swda-metadata.csv') user_meta[str(trans.from_caller)] = {"sex": trans.from_caller_sex, "education": trans.from_caller_education, "birth_year": trans.from_caller_birth_year, "dialect_area": trans.from_caller_dialect_area} user_meta[str(trans.to_caller)] = {"sex": trans.to_caller_sex, "education": trans.to_caller_education, "birth_year": trans.to_caller_birth_year, "dialect_area": trans.to_caller_dialect_area} # - # Create a User object for each unique user in the dataset corpus_users = {k: User(name = k, meta = v) for k,v in user_meta.items()} # Check number of users in the dataset print("Number of users in the data = {}".format(len(corpus_users))) # Example metadata from user 1632 corpus_users['1632'].meta # #### Create Utterances # # Utterances are found in the "text" field of each Transcript object. There are 221,616 utterances in total. # # Each Utterance object has the following fields: # # - id (str): the unique id of the utterance # - user (User): the User giving the utterance # - root (str): id of the root utterance of the conversation # - reply_to (str): id of the utterance this replies to # - timestamp: timestamp of the utterance (not applicable in Switchboard) # - text (str): text of the utterance # - metadata # - tag (str): the DAMSL act-tag of the utterance # - pos (str): the part-of-speech tagged portion of the utterance # - trees (nltk Tree): parsed tree of the utterance # + utterance_corpus = {} # Iterate thru each transcript for file in files: trans = Transcript(file, './swda/swda-metadata.csv') utts = trans.utterances root = str(trans.conversation_no) + "-0" # Get id of root utterance recent_A = None recent_B = None # Iterate thru each utterance in transcript last_speaker = '' cur_speaker = '' all_text = '' text_pos = '' text_tag_list = [] counter = 0 first_utt = True for i, utt in enumerate(utts): idx = str(utt.conversation_no) + "-" + str(counter) text = utt.text # Check which user is talking if 'A' in utt.caller: recent_A = idx; user = str(trans.from_caller) cur_speaker = user else: recent_B = idx; user = str(trans.to_caller) cur_speaker = user # Only add as an utterance if the user has finished talking if cur_speaker != last_speaker and i > 0: # Put act-tag and POS information into metadata meta = {'tag': text_tag_list, } # For reply_to, find the most recent utterance from the other caller if first_utt: reply_to = None first_utt = False elif 'A' in utt.caller: reply_to = recent_B else: reply_to = recent_A utterance_corpus[idx] = Utterance(idx, corpus_users[user], root, reply_to, None, all_text, meta) # Update with the current utterance information # This is the first utterance of the next statement all_text = utt.text text_pos = utt.pos text_tag_list = [(utt.text, utt.act_tag)] counter += 1 else: # Otherwise, combine all the text from the user all_text += utt.text text_pos += utt.pos text_tag_list.append((utt.text, utt.act_tag)) last_speaker = cur_speaker last_speaker_idx = idx # - utterance_list = [utterance for k,utterance in utterance_corpus.items()] # Check number of utterances in the dataset print("Number of utterances in the data = {}".format(len(utterance_corpus))) # Example utterance object utterance_corpus['4325-2'] # #### Create corpus from list of utterances switchboard_corpus = Corpus(utterances=utterance_list, version=1) print("number of conversations in the dataset = {}".format(len(switchboard_corpus.get_conversation_ids()))) # ### Create Conversations # Set conversation Metadata for i, c in enumerate(switchboard_corpus.conversations): trans = Transcript(files[i], './swda/swda-metadata.csv') idx = str(trans.conversation_no) convo = switchboard_corpus.conversations[c] convo.meta['filename'] = files[i] date = trans.talk_day convo_date = "%d-%d-%d" % (date.year, date.month, date.day) convo.meta['talk_day'] = convo_date convo.meta['topic_description'] = trans.topic_description convo.meta['length'] = trans.length convo.meta['prompt'] = trans.prompt convo.meta['from_caller'] = str(trans.from_caller) convo.meta['to_caller'] = str(trans.to_caller) print(switchboard_corpus.conversations['4384-0'].meta) # #### Update corpus level metadata # + switchboard_meta = {} for file in files: trans = Transcript(file, './swda/swda-metadata.csv') idx = str(trans.conversation_no) switchboard_meta[idx] = {} switchboard_corpus.meta['metadata'] = switchboard_meta switchboard_corpus.meta['name'] = "The Switchboard Dialog Act Corpus" # - switchboard_corpus.meta['metadata']['4325'] # #### Save created corpus switchboard_corpus.dump("corpus", base_path = "./") # Check if available info from dataset can be viewed directly from convokit import meta_index meta_index(filename = "./corpus") switchboard_corpus = Corpus(filename = "./corpus") switchboard_corpus.print_summary_stats()
datasets/switchboard-corpus/convert.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd import os from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, classification_report df = pd.read_csv(os.path.join("..", "Resources", "diabetes.csv")) df.head() y = df["Outcome"] target_names = ["negative", "positive"] X = df.drop("Outcome", axis=1) X.head() # + # Split the data by using train_test_split() # YOUR CODE HERE # + # Create a logistic regression model and fit the model to the data # YOUR CODE HERE # + # Create a confusion matrix from the test values and predictions # + # Hint: Save the values of the confusion matrix to variables. # Uncomment the following code to extract the true negative, false positive, false negative, and true positive values from a confusion matrix `cm` # tn, fp, fn, tp = cm.ravel() # + # Calculate the precision of the model based on the confusion matrix # YOUR CODE HERE # + # Calculate the sensitivity of the model based on the confusion matrix # YOUR CODE HERE # + # Calculate the F1 score of the model based on the confusion matrix # YOUR CODE HERE # + # Print the classification report for the model on the test data # YOUR CODE HERE # -
01-Lesson-Plans/19-Supervised-Machine-Learning/2/Activities/03-Stu_Interpreting-Confusion-Matrixes/Unsolved/Stu_Confusion-Matrix.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import json from adblockparser import AdblockRules import utilities from openwpm_utils import domain as du from tqdm.notebook import tqdm from multiprocessing import Pool as ThreadPool import multiprocessing # + base_directory = 'khaleesi/' # Replace * with HTTP or JS request chains file name below json_representation_dir = base_directory + 'data/crawl-*.json' json_representation_dir_labeled = base_directory + 'data/crawl-*-labeled.json' easylist_dir = base_directory + 'ground_truth/easylist.txt' easyprivacy_dir = base_directory + 'ground_truth/easyprivacy.txt' # - el_rules = utilities.read_file_newline_stripped(easylist_dir) ep_rules = utilities.read_file_newline_stripped(easyprivacy_dir) # ## Initialize EL and EP rules # + adblock_el_rules_script = AdblockRules(el_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['script', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_el_rules_script_third = AdblockRules(el_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['third-party', 'script', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_el_rules_image = AdblockRules(el_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['image', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_el_rules_image_third = AdblockRules(el_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['third-party', 'image', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_el_rules_css = AdblockRules(el_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['stylesheet', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_el_rules_css_third = AdblockRules(el_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['third-party', 'stylesheet', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_el_rules_xmlhttp = AdblockRules(el_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['xmlhttprequest', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_el_rules_xmlhttp_third = AdblockRules(el_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['third-party', 'xmlhttprequest', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_el_rules_third = AdblockRules(el_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['third-party', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_el_rules_domain = AdblockRules(el_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['domain', 'subdocument'], skip_unsupported_rules=False) # + adblock_ep_rules_script = AdblockRules(ep_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['script', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_ep_rules_script_third = AdblockRules(ep_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['third-party', 'script', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_ep_rules_image = AdblockRules(ep_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['image', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_ep_rules_image_third = AdblockRules(ep_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['third-party', 'image', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_ep_rules_css = AdblockRules(ep_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['stylesheet', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_ep_rules_css_third = AdblockRules(ep_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['third-party', 'stylesheet', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_ep_rules_xmlhttp = AdblockRules(ep_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['xmlhttprequest', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_ep_rules_xmlhttp_third = AdblockRules(ep_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['third-party', 'xmlhttprequest', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_ep_rules_third = AdblockRules(ep_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['third-party', 'domain', 'subdocument'], skip_unsupported_rules=False) adblock_ep_rules_domain = AdblockRules(ep_rules, use_re2=True, max_mem=1024*1024*1024, supported_options=['domain', 'subdocument'], skip_unsupported_rules=False) # - # ### Helper functions # + def match_url_el(top_level_url, current_url, resource_type): try: domain_top_level = du.get_ps_plus_1(top_level_url) current_domain = du.get_ps_plus_1(current_url) if domain_top_level == current_domain: third_party_check = False else: third_party_check = True if resource_type == 'sub_frame': subdocument_check = True else: subdocument_check = False if resource_type == 'script': if third_party_check: adblock_el_rules = adblock_el_rules_script_third options = {'third-party': True, 'script': True, 'domain': domain_top_level, 'subdocument': subdocument_check} else: adblock_el_rules = adblock_el_rules_script options = {'script': True, 'domain': domain_top_level, 'subdocument': subdocument_check} elif resource_type == 'image' or resource_type == 'imageset': if third_party_check: adblock_el_rules = adblock_el_rules_image_third options = {'third-party': True, 'image': True, 'domain': domain_top_level, 'subdocument': subdocument_check} else: adblock_el_rules = adblock_el_rules_image options = {'image': True, 'domain': domain_top_level, 'subdocument': subdocument_check} elif resource_type == 'stylesheet': if third_party_check: adblock_el_rules = adblock_el_rules_css_third options = {'third-party': True, 'stylesheet': True, 'domain': domain_top_level, 'subdocument': subdocument_check} else: adblock_el_rules = adblock_el_rules_css options = {'stylesheet': True, 'domain': domain_top_level, 'subdocument': subdocument_check} elif resource_type == 'xmlhttprequest': if third_party_check: adblock_el_rules = adblock_el_rules_xmlhttp_third options = {'third-party': True, 'xmlhttprequest': True, 'domain': domain_top_level, 'subdocument': subdocument_check} else: adblock_el_rules = adblock_el_rules_xmlhttp options = {'xmlhttprequest': True, 'domain': domain_top_level, 'subdocument': subdocument_check} elif third_party_check: adblock_el_rules = adblock_el_rules_third options = {'third-party': True, 'domain': domain_top_level, 'subdocument': subdocument_check} else: adblock_el_rules = adblock_el_rules_domain options = {'domain': domain_top_level, 'subdocument': subdocument_check} return adblock_el_rules.should_block(current_url, options) except: return False def match_url_ep(top_level_url, current_url, resource_type): try: domain_top_level = du.get_ps_plus_1(top_level_url) current_domain = du.get_ps_plus_1(current_url) if domain_top_level == current_domain: third_party_check = False else: third_party_check = True if resource_type == 'sub_frame': subdocument_check = True else: subdocument_check = False if resource_type == 'script': if third_party_check: adblock_ep_rules = adblock_ep_rules_script_third options = {'third-party': True, 'script': True, 'domain': domain_top_level, 'subdocument': subdocument_check} else: adblock_ep_rules = adblock_ep_rules_script options = {'script': True, 'domain': domain_top_level, 'subdocument': subdocument_check} elif resource_type == 'image' or resource_type == 'imageset': if third_party_check: adblock_ep_rules = adblock_ep_rules_image_third options = {'third-party': True, 'image': True, 'domain': domain_top_level, 'subdocument': subdocument_check} else: adblock_ep_rules = adblock_ep_rules_image options = {'image': True, 'domain': domain_top_level, 'subdocument': subdocument_check} elif resource_type == 'stylesheet': if third_party_check: adblock_ep_rules = adblock_ep_rules_css_third options = {'third-party': True, 'stylesheet': True, 'domain': domain_top_level, 'subdocument': subdocument_check} else: adblock_ep_rules = adblock_ep_rules_css options = {'stylesheet': True, 'domain': domain_top_level, 'subdocument': subdocument_check} elif resource_type == 'xmlhttprequest': if third_party_check: adblock_ep_rules = adblock_ep_rules_xmlhttp_third options = {'third-party': True, 'xmlhttprequest': True, 'domain': domain_top_level, 'subdocument': subdocument_check} else: adblock_ep_rules = adblock_ep_rules_xmlhttp options = {'xmlhttprequest': True, 'domain': domain_top_level, 'subdocument': subdocument_check} elif third_party_check: adblock_ep_rules = adblock_ep_rules_third options = {'third-party': True, 'domain': domain_top_level, 'subdocument': subdocument_check} else: adblock_ep_rules = adblock_ep_rules_domain options = {'domain': domain_top_level, 'subdocument': subdocument_check} return adblock_ep_rules.should_block(current_url, options) except: return False # - json_representation = utilities.read_json(json_representation_dir) # ## Label ads/trackers def match_with_fl(item): try: splitted_item = item.split('|-|-|') key = splitted_item[0] top_url = splitted_item[1] current_url = splitted_item[2] resource_type = splitted_item[3] if match_url_ep(top_url, current_url, resource_type) or match_url_el(top_url, current_url, resource_type): return key + '|' + str(True) else: return key + '|' + str(False) except: return 'INVALID' key_map = [] for key in json_representation: top_url = json_representation[key]['top_url'] for request in json_representation[key]['content']: resource_type = request['resource_type'] current_url = request['url'] redirect_id = request['redirect_id'] if top_url == None: top_url = '' key_map.append(key + '|' + str(redirect_id) + '|-|-|' + top_url + '|-|-|' + current_url + '|-|-|' + resource_type) cpu_to_relax = 1 pool = ThreadPool(processes=multiprocessing.cpu_count() - cpu_to_relax) results = pool.map(match_with_fl, key_map) pool.close() pool.join() for key in json_representation: json_representation[key]['ground_truth'] = False for r_item in results: key = r_item.rsplit('|', 1)[0].rsplit('|', 1)[0] redirect_id = r_item.rsplit('|', 1)[0].rsplit('|', 1)[1] label = True if r_item.rsplit('|', 1)[1] == 'True' else False json_representation[key]['ground_truth'] = label for request in json_representation[key]['content']: current_redirect_id = request['redirect_id'] if int(current_redirect_id) == int(redirect_id): request['ground_truth'] = label utilities.write_json(json_representation_dir_labeled, json_representation)
code/filter_lists_labeling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Regexps: # # ###### Text: # 02 15:11:28.242069 31983 solver.cpp:341] Iteration 5655, Testing net (#0) # I1202 15:11:36.076130 374 blocking_queue.cpp:50] Waiting for data # I1202 15:11:52.472803 31983 solver.cpp:409] Test net output #0: accuracy = 0.873288 # I1202 15:11:52.472913 31983 solver.cpp:409] Test net output #1: loss = 0.605587 (* 1 = 0.605587 loss) # # ###### Regexp: # (?<=Iteration )(.*)(?=, Testing net) # Result: # 5655 # ###### Regexp: # (?<=accuracy = )(.*) # Result: # 0.873288 # ###### Regexp: # (?<=Test net output #1: loss = )(.*)(?= \() # Result: # 0.605587 # # ###### Text: # I1202 22:45:56.858299 31983 solver.cpp:237] Iteration 77500, loss = 0.000596309 # I1202 22:45:56.858502 31983 solver.cpp:253] Train net output #0: loss = 0.000596309 (* 1 = 0.000596309 loss) # # ###### Regexp: # (?<=Iteration )(.*)(?=, loss) # Result: # 77500 # ###### Regexp: # (?<=Train net output #0: loss = )(.*)(?= \() # Result: # 0.000596309 # # # ###### Text: # test_iter: 1456 # test_interval: 4349 # base_lr: 5e-05 # display: 1000 # max_iter: 4000 # lr_policy: "fixed" # momentum: 0.9 # weight_decay: 0.004 # snapshot: 2000 # # ###### Regexp: # (?<=base_lr: )(.*)(?=) # Result: # 5e-05 # imports, and setting for pretty plots. # + import matplotlib as mpl import seaborn as sns sns.set(style='ticks', palette='Set2') sns.despine() import matplotlib as mpl mpl.rcParams['xtick.labelsize'] = 20 mpl.rcParams['ytick.labelsize'] = 20 # %matplotlib inline import re import os from matplotlib import pyplot as plt import numpy as np from scipy.stats import ttest_rel as ttest import matplotlib from matplotlib.backends.backend_pgf import FigureCanvasPgf matplotlib.backend_bases.register_backend('pdf', FigureCanvasPgf) pgf_with_rc_fonts = { "font.family": "serif", } mpl.rcParams.update(pgf_with_rc_fonts) # + test_iteration_regex = re.compile("(?<=Iteration )(.*)(?=, Testing net)") test_accuracy_regex = re.compile("(?<=accuracy = )(.*)") test_loss_regex = re.compile("(?<=Test net output #1: loss = )(.*)(?= \()") train_iteration_regex = re.compile("(?<=Iteration )(.*)(?=, loss)") train_loss_regex = re.compile("(?<=Train net output #0: loss = )(.*)(?= \()") learning_rate_regex = re.compile("(?<=base_lr: )(.*)(?=)") # + def create_empty_regexp_dict(): regexps_dict = {test_iteration_regex: [], test_accuracy_regex: [], test_loss_regex: [], train_iteration_regex: [], train_loss_regex: [], learning_rate_regex: []} return regexps_dict def search_regexps_in_file(regexp_dict, file_name): with open(file_name) as opened_file: for line in opened_file: for regexp in regexp_dict: matches = regexp.search(line) # Assuming only one match was found if matches: regexp_dict[regexp].append(float(regexp.search(line).group())) # + rgb_dict = create_empty_regexp_dict() search_regexps_in_file(rgb_dict, '/home/noa/pcl_proj/experiments/cifar10/every_fifth_view/0702/rgb/log.log') hist_dict = create_empty_regexp_dict() search_regexps_in_file(hist_dict, '/home/noa/pcl_proj/experiments/cifar10/every_fifth_view/0702/hist/log.log') rgb_hist_dict = create_empty_regexp_dict() search_regexps_in_file(rgb_hist_dict, '/home/noa/pcl_proj/experiments/cifar10/every_fifth_view/0702/rgb_hist/log.log') # - print rgb_dict[learning_rate_regex][0] # + dates_list = ['1601', '1801', '2101', '2701', '0302', '0702', '0902', '1202'] acc = [[],[],[]] for date_dir in dates_list: rgb_dict = create_empty_regexp_dict() search_regexps_in_file(rgb_dict, '/home/noa/pcl_proj/experiments/cifar10/every_fifth_view/'+ date_dir +'/rgb/log.log') hist_dict = create_empty_regexp_dict() search_regexps_in_file(hist_dict, '/home/noa/pcl_proj/experiments/cifar10/every_fifth_view/' + date_dir+ '/hist/log.log') rgb_hist_dict = create_empty_regexp_dict() search_regexps_in_file(rgb_hist_dict, '/home/noa/pcl_proj/experiments/cifar10/every_fifth_view/' +date_dir+'/rgb_hist/log.log') acc[0].append(rgb_dict[test_accuracy_regex][-1]) acc[1].append(hist_dict[test_accuracy_regex][-1]) acc[2].append(rgb_hist_dict[test_accuracy_regex][-1]) # - print np.array(acc[0]).mean() print np.array(acc[0]).std() print np.array(acc[1]).mean() print np.array(acc[1]).std() print np.array(acc[2]).mean() print np.array(acc[2]).std() _, p_1 = ttest(np.array(acc[0]), np.array(acc[1])) _, p_2 = ttest(np.array(acc[0]), np.array(acc[2])) _, p_3 = ttest(np.array(acc[2]), np.array(acc[1])) print 'rgb vs. hist:' print p_1 print 'rgb vs. rgb_hist' print p_2 print 'hist vs, rgb_hist' print p_3 # + #csfont = {'fontname':'Comic Sans MS'} #hfont = {'fontname':'Helvetica'} fig2, axs2 = plt.subplots(1,1, figsize=(40, 20), facecolor='w', edgecolor='k', sharex=True) spines_to_remove = ['top', 'right'] for spine in spines_to_remove: axs2.spines[spine].set_visible(False) axs2.spines['bottom'].set_linewidth(3.5) axs2.spines['left'].set_linewidth(3.5) #axs2.set_title('Test set accuracy and loss', fontsize=20) axs2.xaxis.set_ticks_position('none') axs2.yaxis.set_ticks_position('none') axs2.plot(rgb_dict[test_iteration_regex], rgb_dict[test_accuracy_regex], label='RGB', linewidth=8.0) axs2.plot(hist_dict[test_iteration_regex], hist_dict[test_accuracy_regex], label='FPFH', linewidth=8.0) axs2.plot(rgb_hist_dict[test_iteration_regex], rgb_hist_dict[test_accuracy_regex], label='RGB+FPFH', linewidth=8.0) axs2.legend(loc=4, fontsize=60) axs2.set_ylabel('Test Accuracy', fontsize=70) plt.yticks(fontsize = 60) axs2.axes.get_xaxis().set_visible(False) '''for spine in spines_to_remove: axs2[1].spines[spine].set_visible(False) axs2[1].xaxis.set_ticks_position('none') axs2[1].yaxis.set_ticks_position('none') axs2[1].plot(rgb_dict[test_iteration_regex], rgb_dict[test_loss_regex], label='rgb') axs2[1].plot(hist_dict[test_iteration_regex], hist_dict[test_loss_regex], label='histograms') axs2[1].plot(rgb_hist_dict[test_iteration_regex], rgb_hist_dict[test_loss_regex], label='rgb+histograms') axs2[1].legend(fontsize=18) plt.ylabel('Test Accuracy', fontsize=18) plt.xlabel('Iterations', fontsize=18)''' #plt.xlim(0,3000) plt.show() fig2, axs2 = plt.subplots(1,1, figsize=(40, 15), facecolor='w', edgecolor='k', sharex=True) for spine in spines_to_remove: axs2.spines[spine].set_visible(False) axs2.spines['bottom'].set_linewidth(3.5) axs2.spines['left'].set_linewidth(3.5) axs2.xaxis.set_ticks_position('none') axs2.yaxis.set_ticks_position('none') axs2.set_yscale('log') axs2.plot(rgb_dict[train_iteration_regex], (np.array(rgb_dict[train_loss_regex])), label='RGB', linewidth=6.0) axs2.plot(hist_dict[train_iteration_regex], (np.array(hist_dict[train_loss_regex])), label='FPFH', linewidth=6.0) axs2.plot(rgb_hist_dict[train_iteration_regex], (np.array(rgb_hist_dict[train_loss_regex])), label='RGB+FPFH', linewidth=6.0) #axs2.set_title('Training set loss (log-scale)', fontsize=20) axs2.legend(fontsize=60) plt.ylabel('Train Loss', fontsize=70) plt.xlabel('Iterations', fontsize=70) plt.yticks(fontsize = 60) plt.xticks(fontsize = 60) plt.show() #plt.xlim(47800,48000) # -
parse_and_plot_caffe_log.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import datetime import math import sys import glob sys.path.insert(0,"../src/ext-libraries/SingularSpectrumAnalysis/") from mySSA import mySSA def load_data(path, resampling=None): ## some resampling options: 'H' - hourly, '15min' - 15 minutes, 'M' - montlhy ## more options at: ## http://benalexkeen.com/resampling-time-series-data-with-pandas/ allFiles = glob.iglob(path + "/**/*.txt", recursive=True) frame = pd.DataFrame() list_ = [] for file_ in allFiles: #print("Reading: ",file_) df = pd.read_csv(file_,index_col="datetime",parse_dates=['datetime'], header=0, sep=",") if frame.columns is None : frame.columns = df.columns list_.append(df) frame = pd.concat(list_) if resampling is not None: frame = frame.resample(resampling).mean() frame = frame.fillna(method='ffill') return frame # + path = '/Users/cseveriano/spatio-temporal-forecasting/data/processed/NREL/Oahu' df = load_data(path) # Corrigir ordem das colunas df.columns = ['DHHL_3','DHHL_4', 'DHHL_5', 'DHHL_10', 'DHHL_11', 'DHHL_9', 'DHHL_2', 'DHHL_1', 'DHHL_1_Tilt', 'AP_6', 'AP_6_Tilt', 'AP_1', 'AP_3', 'AP_5', 'AP_4', 'AP_7', 'DHHL_6', 'DHHL_7', 'DHHL_8'] #inicio dos dados possui falhas na medicao df = df.loc[df.index > '2010-03-20'] df.drop(['AP_3', 'DHHL_1_Tilt', 'AP_6_Tilt'], axis=1, inplace=True) # - df.to_pickle("df_oahu.pkl") # + dt = [] p_inds = [i for i in range(3)] df_clean = pd.DataFrame(columns=df.columns) df_residual = pd.DataFrame(columns=df.columns) for c in df.columns: dfc = df[c] cl = [] rs = [] for date in df.index.to_period('M').unique(): ts = dfc[str(date)] N = int(len(ts)) # number of samples T = 96 # sample daily frequency (4 samples per hour) embedding_dimension = int(N / T) ssa = mySSA(ts) ssa.embed(embedding_dimension=embedding_dimension,verbose=True) res_streams = [j for j in range(3,embedding_dimension)] ssa.decompose(verbose=True) principal = ssa.view_reconstruction(*[ssa.Xs[i] for i in p_inds], names=p_inds, plot=False, return_df=True) residual = ssa.view_reconstruction(*[ssa.Xs[i] for i in res_streams], names=res_streams, plot=False, return_df=True) cl.extend(list(principal['Reconstruction'])) rs.extend(list(residual['Reconstruction'])) df_clean[c] = cl df_residual[c] = rs # - plt.plot(df_clean['DHHL_3'].iloc[0:120]) df_clean.index = df.index df_residual.index = df.index df_clean.to_pickle("df_ssa_clean.pkl") df_residual.to_pickle("df_ssa_residual.pkl")
notebooks/180514 - Oahu Decompose with SSA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Homework 2 # # This week we learned about one of the most powerful concepts in all of programming. The `for` and `while` loops are used for so many different things in the world of programming and we will give you a sense of this in this homework. We also cover how to format strings, and use that as a tool for some real world application problems! If you have questions go to office hours (fill out the poll if you haven't already) or ask on the class piazza page. # ## Comparison Operators (5 points) # For each of the following expressions below, write the boolean answer that the expression will evaluate to. Double click where it says to put your answer, this will allow you to edit the cell. a = 7 b = 14 # `a > b` evaluates to: **[YOUR ANSWER HERE]** # `a > 0 and b > 0` evaluates to: **[YOUR ANSWER HERE]** # `a > 0 and b < 0` evaluates to: **[YOUR ANSWER HERE]** # `a == b` evaluates to: **[YOUR ANSWER HERE]** # `2*a == b and a < b` evaluates to: **[YOUR ANSWER HERE]** # `a != b` evaluates to: **[YOUR ANSWER HERE]** # `a == b or a != -b` evaluates to: **[YOUR ANSWER HERE]** # `"James" > "Yilun" or "Wendy" > "Yilun"` evaluates to: **[YOUR ANSWER HERE]** # `"racecar" == "racecar[::-1]` evaluates to: **[YOUR ANSWER HERE]** # `"a" < "b"` evaluates to: **[YOUR ANSWER HERE]** # ## Operations on Lists (5 Points) # Note: The following cells have to be run in order! # + ''' Here is a list of the names of everyone in this course! ''' students = ["Caytyn", "Paige", "Victoria", "Riley", "Victor", "Oski" "Kingsley", "Geo", "Enmanuel","<NAME>", "Joshua", "Lyla", "Nadia","Camden","Mey", "Sebastian", "Esther","Chloe"] # + ''' What is the length of this list? ''' ### YOUR CODE HERE ### # + '''Oops, we forgot the facilitators. Can you create a new list for faciliators and append it to the original list?''' ### YOUR CODE HERE ### # + ''' I think I also put in a couple people that aren't students by accident. Can you remove <NAME> and Oski from the list? ''' ### YOUR CODE HERE ### # + '''Now can you give me a new list with only people who's names start with a V. This might be a little challenging''' ### YOUR CODE HERE ### # - # ## Conditional + Iterative Problems (40 Points) # ### <span style="color:red">Absolute</span>ly Magnificent (5 Points) # # Write your own version of an absolute value function. Where it takes in a number a and returns the absolute value of a. You are not allowed to use the built in `abs` function discussed in lecture. def my_absolute(a): """Return abs(a), but without calling abs. >>> my_absolute(2) 2 >>> my_absolute(-2) 3 """ # ### Huh. That's Odd... (5 Points) # + def is_odd(num): ''' Returns True if number is odd. ''' ### YOUR CODE HERE ### def select_odd(numbers): ''' Returns a new list with only the numbers that are odd in the numbers list. >>> select_odd([2, 3, 7, 10]) [3, 7] >>> select_odd([2, 4, 6, 8]) [] ''' ### YOUR CODE HERE ### # - # ### Prime Numbers (10 Points) # # [Adapted from Ayars, Problem 1-6] Write a function called `is_prime(n)` that determines whether a number `n` is prime or not, and returns either `True` or `False` accordingly. You can assume that the argument `n` passed to any of your functions will be an integer. Remember to include descriptive doc strings for each function your write! # # def is_prime(n): # """ Determines if n is prime or not. Takes an integer n. # Returns True if n is prime, and False otherwise. """ # # Your code here! # # # Remember to try various test cases: What if the argument passed to `is_prime` is ... # - 20 # - 2 # - 1 # - 0 # - negative # + ### YOUR CODE HERE ### # - # ### Fibonacci Sequence (10 Points) # The Fibonacci sequence is a sequence of integers defined by the following relation. The $n$-th integer $a_n$ is defined in terms of previous integers of the sequence as # # $$a_n = a_{n-1} + a_{n-2}$$ # # and $a_0 = 0$ and $a_1 = 1$. So the first few numbers are 0, 1, 1, 2, 3, 5, 8, 13, 21, .... Write a program to print the Fibonacci series from 0 to 50. Expected output: # # 0 1 1 2 3 5 8 13 21 34 # Hint 1: Two (or more) variables can be assigned simultaneously. For instance, to swap the values of `a` and `b`, you can write `a, b = b, a`. # # Hint 2: Try doing so using a while or a for loop. If you feel ambitious you can try doing this recursively but that is not required. If you do code this recursively you will see how much prettier recursion can be and is often why computer scientists like to use it. # + ### YOUR CODE HERE ### # - # ### Pyramid Building (10 Points) # # Write a Python program to construct the following pattern, using a nested `for` loop (or maybe two nested `for` loops). # # * # * * # * * * # * * * * # * * * * * # * * * * # * * * # * * # * # Hints: You can multiply strings by numbers to have them repeat multiple times! # # Also, some solutions to exercise this might use a `range()` going backwards. You can make `range()` go backwards using syntax like this: `range(20,10,-1)`. # + ### YOUR CODE HERE ### # - # ## How to Format Strings # # We did not get the chance to cover this in lecture, but in scientific computing it is often extremeley valuable to print your results out in a way that the average reader could understand. We will be looking at the `.format` method which will help you in some of the problems above. # # `print("blah blah {0:2.1f} stuff and blah blah {1:2.1f} other stuff more blahs".format(stuff1, stuff2))` # # The above line of code is how we can place well formatted values into a long sentence in python when we want to print it out. You will notice right away the curly braces. The first number inside is the index of the `.format` tuple at the very end of the string (How you put multiple variables into the string). The second number refers to how much white space you would like between the words and your value. The third number is the number of sig figs past the decimal you would like. Below are some examples of how this works in practice. # + mass = 7.23872193 #kg height = 1.4 #meters print("My mass would be {0:2.1f} kg and my height would be {1:2.1f} meters".format(mass, height)) print("My mass would be {0:2.3f} kg and my height would be {1:2.3f} meters".format(mass, height)) print("My mass would be {0:2.6f} kg and my height would be {1:2.6f} meters".format(mass, height)) print('\n') #prints a new line for neatness print("My mass would be {0:10.2f} kg".format(mass)) print("My mass would be {0:5.2f} kg".format(mass)) print("My mass would be {0:1.2f} kg".format(mass)) # - # ## Real World Application Problems (30 Points) # ### Satellite Altitudes (15 Points) # # [Adapted from Newman, Exercise 2.2 and Physics 77] A satellite is to be launched into a circular orbit around the Earth so that it orbits the planet once every $T$ seconds. The altitude $h$ above the Earth's surface that the satellite must have is $$ h = \left( \frac{G M T^2}{4 \pi^2} \right)^{1/3} - R, $$ where $G = 6.67 \times 10^{-11}~\text{m}^3~\text{kg}^{-1}~\text{s}^{-2}$ is Newton's gravitational constant, $M = 5.97 \times 10^{24}~\text{kg}$ is the mass of the Earth, and $R = 6371~\text{km}$ is its radius. # **1a.** Write a program that, for a given value of $T$ (entered as a variable T in a cell), calculates and prints out the correct altitude in meters, kilometers, and miles, with one decimal place for each result. # # *Output for 1a*: When the code cell for this part of the problem is entered, it should specify (in the comments or the Markdown cell above) what units of $T$ are assumed. The program should print out the correct altitude in meters, kilometers, and miles, with one decimal place for each result. # + ### YOUR CODE HERE ### # - # *Output for 1b:* Use code cells to carry out the desired calculations, and Markdown cells to present and discuss your results. To create a new cell go to the top menu and click `Insert` then `Insert Cell` either above or below the cell you currently have selected # # **1b.** Use your program to calculate the altitudes of satellites that orbit the Earth once a day (so-called "geosynchronous" orbit), once every 90 minutes, and once every 45 minutes. What do you conclude from the last of these calculations? # + ### YOUR CODE HERE ### # - # ### Perfect Hardboiled Eggs (15 points) # # [Adapted from Langtangen, Exercise 1.12 and Physics 77. You may derive the formula in Physics 112 or Physics 89] As an egg cooks, the proteins first denature and then coagulate. When the temperature exceeds a critical point, reactions begin and proceed faster as the temperature increases. In the egg white the proteins start to coagulate for temperatures above 63 C, while in the yolk the proteins start to coagulate for temperatures above 70 C. For a soft boiled egg, the white needs to have been heated long enough to coagulate at a temperature above 63 C, but the yolk should not be heated above 70 C. For a hard boiled egg, the center of the yolk should be allowed to reach 70 C. # # The following formula expresses the time $t$ it takes (in seconds) for the center of the yolk to reach the temperature $T_y$ (in Celsius degrees): $$ t = \frac{M^{2/3} c \rho^{1/3}}{K \pi^2 (4\pi/3)^{2/3}} \ln \left[ 0.76 \frac{T_0 - T_w}{T_y - T_w} \right] . $$ Here, $M$, $\rho$, $c$, and $K$ are properties of the egg: # * $M$ is the mass, # * $\rho$ is the density, # * $c$ is the specific heat capacity, and # * $K$ is the thermal conductivity. # # Relevant values are # * $M = 64~\text{g}$ for a large egg (USA size XL: en.wikipedia.org/wiki/Chicken_egg_sizes), # * $\rho = 1.0378~\text{g cm}^{-3}$, # * $c = 3.7~\text{J g}^{-1}\,\text{K}^{-1}$, and # * $K = 5.4 \cdot 10^{-3}~\text{W cm}^{-1}\,\text{K}^{-1}$. # # Furthermore, # * $T_w$ is the temperature (in C degrees) of the boiling water, and # * $T_0$ is the original temperature (in C degrees) of the egg before being put in the water. # # Suppose we want our eggs hard-boiled. Implement the formula in a program, set $T_w = 100~\text{C}$ and $T_y = 70~\text{C}$, and compute $t$ for a large egg taken from the fridge ($T_0 = 4~\text{C}$) and from room temperature ($T_0 = 20~\text{C}$). Also compute the results for a small egg ($M = 42~\text{g}$). # *Output for 2:* # When you run your code cell, it should produce the following text, with your numbers instead of the `TTT`, `MMM`, and `SSS` placeholders: # # To hard-boil a large egg taken directly from the fridge, cook it for TTT minutes (MMM min, SSS sec). # To hard-boil a small egg taken directly from the fridge, cook it for TTT minutes (MMM min, SSS sec). # To hard-boil a large egg starting from room temperature, cook it for TTT minutes (MMM min, SSS sec). # To hard-boil a small egg starting from room temperature, cook it for TTT minutes (MMM min, SSS sec). # # The `TTT` placeholders should be values in minutes to two decimal places. The `MMM` and `SSS` placeholders should be rounded to the nearest minute/second, with no decimal places. For example, # # To hard-boil a large egg taken directly from the fridge, cook it for 56.78 minutes (56 min 47 sec). # # #### Hints # # Writing the entire formula in one line is difficult to type, difficult to read, difficult to debug---and you have to retype the entire calculation just to change one piece of it. Try breaking it down in smaller chunks assigned to variables, and combine those chunks to produce the final result. # # Beware of integer division! # # Remember to comment your code and use descriptive variable names so others (and future you) can understand what you're doing! # + ### YOUR CODE HERE ###
Fall2020_DeCal_Material/Homework/Week3/Week 3 HW2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="0g-h1j9LTaD2" colab_type="code" colab={} #importing tokenizer from tensorflow.keras.preprocessing.text import Tokenizer # + id="6hx4FX-sTwXf" colab_type="code" colab={} #list of sentences for input sentences = [ 'My favorite food is ice cream', 'do you like ice cream too?', 'My dog likes ice cream!', "your favorite flavor of icecream is chocolate", "chocolate isn't good for dogs", "your dog, your cat, and your parrot prefer broccoli" ] # + id="t_iU3yDoT3qD" colab_type="code" colab={} #tokenizing the words by setting optionally maximum number of words and also #setting the OOV token and then fitting our tokenizer on the words tokenizer = Tokenizer(num_words = 100, oov_token="<OOV>") tokenizer.fit_on_texts(sentences) # + id="SITJ3HUFUdpr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="e541f40b-ed6a-42c7-d060-fc55ba31eb6f" #for viewing how our sentences are tokenized we can use words index #here word is the key and numbers are the value word_index = tokenizer.word_index print(word_index) # + id="ezr_UsDzVQnb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="139b2389-a5f6-4f0e-a198-ec6e1467a703" #getting the number for a single word is also easy print(word_index['chocolate']) # + id="Imu388VMVbwS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b5fb8bd9-768f-4cb0-898d-b1be36b919f6" #creating sequences for sentences sequences = tokenizer.texts_to_sequences(sentences) print(sequences) # + id="UxXXMSJ6Vq8i" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="61d6786d-39ad-4680-a073-fbccf6aff7b1" #sequencing the words not present in the word index, such words are mapped with #OOV token sentences2 = ["I like hot chocolate", "My dogs and my hedgehog like kibble but my squirrel prefers grapes and my chickens like ice cream, preferably vanilla"] sequences2 = tokenizer.texts_to_sequences(sentences2) print(sequences2) # + id="W__7zCBeWIsr" colab_type="code" colab={}
ML_Training_udacity/8. NLP-TOKENIAZATION AND EMBEDDINGS/Words_into_tokens.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf physical_devices = tf.config.list_physical_devices() tf.config.experimental.set_memory_growth(physical_devices[3], True) import os import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.applications import MobileNetV2 from tensorflow.keras.layers import Dense, Dropout, BatchNormalization, Input, MaxPooling2D, Flatten, Conv2D from tensorflow.keras.models import Model from tensorflow.keras import layers import cv2 # + INPUT_SHAPE = (224, 224, 3) # creating objects of class obtained from opencv lib face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') # + # Defining the model def get_model(): x_input = Input(INPUT_SHAPE) pretrained_model = MobileNetV2(input_shape=INPUT_SHAPE, alpha=1.0, include_top=False, weights='imagenet', input_tensor=x_input) pretrained_model.trainable = True x = tf.reduce_mean(pretrained_model.outputs[0], axis = [1, 2]) x = Flatten()(x) x = Dense(512, kernel_initializer = "he_normal")(x) x = tf.nn.relu(x) x = Dropout(0.2)(x) x = Dense(256, kernel_initializer = "he_normal")(x) x = tf.nn.relu(x) x = Dense(128, kernel_initializer = "he_normal")(x) x = tf.nn.relu(x) x = Dense(64, kernel_initializer ="he_normal")(x) x = tf.nn.relu(x) x = Dense(64, kernel_initializer ="he_normal")(x) x = tf.nn.relu(x) x = Dense(16, kernel_initializer ="he_normal")(x) x = tf.nn.relu(x) x = Dense(8, kernel_initializer ="he_normal")(x) x = tf.nn.relu(x) x = Dense(1, kernel_initializer ="he_normal")(x) x = tf.nn.sigmoid(x) model = Model(x_input, x) return model model = get_model() model.load_weights(str(os.getcwd()) +"/weights.hdf5") # + def detect(gray,frame): faces=face_cascade.detectMultiScale(gray, 1.3, 5) # faces return turple of x,y,w,h of face detected for (x,y,w,h) in faces: # when face is detected the region of interest is defined to check if the person has mask or not roi_frame = frame[y:y+h,x:x+w] roi_frame = cv2.cvtColor(roi_frame, cv2.COLOR_BGR2RGB) #BGR to RGB roi_frame = tf.cast(roi_frame, dtype = tf.float32)/255.0 #Standardization roi_frame = tf.image.resize(roi_frame, (INPUT_SHAPE[0], INPUT_SHAPE[1])) roi_frame = tf.expand_dims(roi_frame, axis=0) category = model(roi_frame, training = False) #CNN Prediction category = category.numpy() text = "not known" if category>0.5: color = (0, 0, 255) #BGR in opencv text = "No Mask " elif category<0.5: color = (0, 255, 0) text = "Mask " # To Draw line and write mask or no mask cv2.rectangle(frame, (x, y), (x+w, y+h),color,2) cv2.putText(frame, text, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2) return frame # for turning on webcam video_capture= cv2.VideoCapture(0) # infinite loop while True: _,frame = video_capture.read() gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) canvas = detect(gray,frame) cv2.imshow('Video',canvas) if cv2.waitKey(1) & 0xFF == ord('q'): #Press "q" to turn off the detection break video_capture.release() cv2.destroyAllWindows() # -
face_mask_detect.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # This flips all the images horizontally to double the data # (From: https://udacity-robotics.slack.com/archives/C7A5HT92M/p1507226777000533) import os import glob from scipy import misc import numpy as np def flip_and_save_images(img_dir, extension): os.chdir(img_dir) files = glob.glob("*." + extension) for i, file in enumerate(files): print(i) img = misc.imread(file, flatten=False, mode='RGB') flipped_img = np.fliplr(img) misc.imsave("flipped" + file, flipped_img) ################ flip_and_save_images(train_mask_directory, "png") flip_and_save_images(train_images_directory, "jpeg") # - # Adding checkpoints for each epoch, to see how each one did relative to the previous one # (From: https://udacity-robotics.slack.com/archives/C7A5HT92M/p1507151193000223) tensorBoard_cb = keras.callbacks.TensorBoard(log_dir='./logs') # checkpoint filepath="weights-improvement-{epoch:02d}-{val_loss:.2f}.h5" checkpoint_cb = keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, period=1) callbacks = [tensorBoard_cb, checkpoint_cb] # + # Code to delete raw images that do NOT contain the hero: # (From: https://udacity-robotics.slack.com/archives/C7A5HT92M/p1507092634000032) import glob import os import shutil import sys import argparse import numpy as np from scipy import misc def get_cam3_files(files): is_cam3 = lambda x: x.find('cam3_') != -1 return sorted(list(filter(is_cam3, files))) def get_cam3_file_id(filename, base_path): return filename.replace(base_path,'').replace('/','').replace('cam3_','').replace('.png','') def delete_all_cam_files(id, path): c1 = path+'/'+'cam1_'+id+'.png' c2 = path+'/'+'cam2_'+id+'.png' c3 = path+'/'+'cam3_'+id+'.png' c4 = path+'/'+'cam4_'+id+'.png' delete_file(c1) delete_file(c2) delete_file(c3) delete_file(c4) def delete_file(filename): try: os.remove(filename) except OSError: pass def contains_hero(filename): # Commenting next 2 lines based on post: https://udacity-robotics.slack.com/archives/C7A5HT92M/p1507092955000027?thread_ts=1507092634.000032&cid=C7A5HT92M # s = np.sum(misc.imread(filename)[:,:,0]) # return s < 16711680 # Instead, we use this: return misc.imread(filename)[:,:,0].max() == 255 # (From: https://udacity-robotics.slack.com/archives/C7A5HT92M/p1507092955000027?thread_ts=1507092634.000032&cid=C7A5HT92M) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('path', help='The image path to filter') args = parser.parse_args() base_path = args.path files = glob.glob(os.path.join(base_path, '*.png')) cam3 = get_cam3_files(files) for f in cam3: if(not contains_hero(f)): id = get_cam3_file_id(f, base_path) delete_all_cam_files(id, base_path) # + # To determine % of files that contain teh hero: # (From: https://udacity-robotics.slack.com/files/U4UKR0C5Q/F7DTF3D1C/Script_to_see_what___of_training_image_masks_contain_the_hero.py) #By tokyo_adam 4-10-17 import cv2 import numpy as np import glob, os #set to the directory where your masks are saved img_dir = "../data/train/masks/" total_files = 0 total_hero = 0 os.chdir(img_dir) for file in glob.glob("*.png"): total_files +=1 img = cv2.imread(file) blue = img[:,:,0] if np.any(blue == 255): total_hero += 1 percent_hero = 100. * total_hero / total_files print (percent_hero, "percent of files contain the hero") # - # # Simulator Instructions # # During data collection, when does the hero and crowd spawn? Here are the steps i follow: # * mark points for quad patrol as well as hero movement using `P`, `O` and `I`. # * Switch from local control to patrol/follow mode using `H`. # * Press `M` to spawn the hero and crowd ? This step is actually not very clear from the instructions. # Last point was: # https://udacity-robotics.slack.com/archives/C7A5HT92M/p1507278812000121 # + # Logging callback for Tensorboard # (From: https://udacity-robotics.slack.com/archives/C7A5HT92M/p1507762674000185?thread_ts=1507760279.000071&cid=C7A5HT92M) # #logger_cb = plotting_tools.LoggerPlotter() logger_cb = keras.callbacks.TensorBoard(log_dir='./logs/model1c', histogram_freq=1, batch_size=32, \ write_graph=True, write_grads=False, write_images=False, \ embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None) save_cb = keras.callbacks.ModelCheckpoint(filepath='./saved_chkpts/model1c/weights-e{epoch:02d}-{val_loss:.2f}.hdf5', verbose=1, period=5) callbacks = [logger_cb, save_cb] # - # Using the base dataset, after every run i see that my `iou3` (when hero is far) is very low which is bringing down the final score. I think this is because of less images where hero is far away in the total dataset. I am working on adding more images to the dataset to balance out the classes. # # For me the encoder block did matter. Having 2 convolution layers that do not change the size of the layer, before the third convolution that also resizes the layer by using a stride of 2. # # I also used 4-5 encoding blocks and the same number of decoding (although not sure that it actually mattered) # # Now 33% is very low and I cannot explain it. # # Use a lot of epochs.. 100 for example... and do not use all the data for each epoch. The problem is that the dataset is not balanced , so you will probably not get a good result (there are too few images with the target far away) # # Here are the counts from training data # ```no hero = 2576 # hero very far = 521 # hero not too close = 621 # hero close = 413``` # # These are calculated based on the number of blue pixels in the `mask.png`. the size of the mask is `256x256` # ```no hero = 0 pixels # hero very far = 1 to 40 pixels # hero not tool close = 40 to 400 pixels # hero close = > 400 pixels``` # # I was able to get 41.6% with base data and without changing the kernel size, i used # ```lr = 0.001 # epoch = 12 # batch size = 32 # ``` # I used 4 encoder layers and 4 decoder layers. strides 2 and upsampled by 2. I tried a lot of different variations but 43% is the max i could do. 20 is my max epochs and i seem to saturate before that. # # At every step, the code randomly picks `batch_size` number of images to train. so, steps per epoch is basically how many times to you do that in every epoch. training loss is getting updated at the end of every step and validation loss is getting updated at the end of every epoch. # # With just the provided data I could achieve ~41%. Could improve a bit more with adding my own data of the hero. I would echo @annnn's suggestion to try some experiments with model depth and hyperparams! Btw in my experience making really deep model actually made the final performance worse (I presume overfitting) so your milage may vary just with that. # + # Saving model: filepath="weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5" checkpoint_cb = keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, period=1) #...and add it to the callbacks list where the plotting function is called. You can tune what is saved in the checkpoint. That way you can select which ever epoch performs best # Eg: logger_cb = plotting_tools.LoggerPlotter() filepath="weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5" checkpoint_cb = keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, period=1) callbacks = [logger_cb,checkpoint_cb] # - # Best thing to increase the score: collecting your own data. Focus on hero far away # The dropout method is usually used in the last few layers. However, it would lead to slow convergence speed and long training time. As mentioned by @chedanix, if you don't train your model with hundreds of epochs or you don't notice there is overfitting issue for your model, there is no need to use dropout from my experience. # you can add `model.summary()` before `model.fit_generator` to see the entire summary of the architecture # Visualizing throgh keras # https://keras.io/visualization/ # # You can use Keras to generate a figure of your network: # If you run a cell with `plotting_tools.plot_keras_model(model, 'name_of_fig')` there will be two plots generated in `/data/figures`. I've used the one with _shapes at the end. # # Also, run "conda update --all" # Tip # Try to have more conv layers in encoders and decoders. And turn on the image augmentation. Then set steps of each epoch around 20 to 50. # # https://udacity-robotics.slack.com/archives/C7A5HT92M/p1511798020000230 # # https://udacity-robotics.slack.com/archives/C7A5HT92M/p1513618303000105 # Here's the network image: https://udacity-robotics.slack.com/files/U73U6DK55/F8GP5GQ6R/model.png # https://udacity-robotics.slack.com/files/U73U6DK55/F8GGSKT2P/image.png # https://udacity-robotics.slack.com/files/U73U6DK55/F8GP7RR9T/image.png # # How to check if I'm using the GPU afterall?: # > In a terminal, type nvidia-smi, on the same machine you are training on. That page will show you the gpu utilization. # # > Check this thread out as well if you are using AWS https://udacity-robotics.slack.com/archives/C7A5HT92M/p1510501438000047?thread_ts=1510501438.000047&cid=C7A5HT92M # # > https://udacity-robotics.slack.com/archives/C7A5HT92M/p1511251327000239 # https://udacity-robotics.slack.com/archives/C7A5HT92M/p1511758359000169 # https://udacity-robotics.slack.com/archives/C7A5HT92M/p1511917591000148 # # For persistent Jupyter session: # https://udacity-robotics.slack.com/archives/C7A5HT92M/p1510929720000073 # # # About encoders and decoders and 1x1: # https://udacity-robotics.slack.com/archives/C7A5HT92M/p1511695064000027 # https://udacity-robotics.slack.com/archives/C7A5HT92M/p1511895827000222 # Frmo: https://udacity-robotics.slack.com/files/U683C0H52/F8JCWV599/-.py class BatchIteratorSimple(Iterator): def __init__(self, data_folder, batch_size, image_shape, num_classes=3, training=True, shuffle=True, seed=None, shift_aug=False): self.num_classes = num_classes self.shift_aug = shift_aug self.data_folder = data_folder self.batch_size = batch_size self.training = training self.image_shape = tuple(image_shape) im_files = sorted(glob(os.path.join(data_folder, 'images', '*.jpeg'))) mask_files = sorted(glob(os.path.join(data_folder, 'masks', '*.png'))) if len(im_files) == 0: raise ValueError('No image files found, check your image diractories') if len(mask_files) == 0: raise ValueError('No mask files found, check your mask directories') self.file_tuples = list(zip(im_files, mask_files)) self.n = len(self.file_tuples) super(BatchIteratorSimple, self).__init__(self.n, batch_size, shuffle, seed) # + # How to visualize the NN: from tensorflow.contrib.keras.python.keras.utils import plot_model import pydot #scroll down to the training section and beaeath the existing line...: model.compile(optimizer=keras.optimizers.Adam(learning_rate), loss='categorical_crossentropy') #... add this line plot_model(model, to_file='model.png') # - # Running on local GPU: https://udacity-robotics.slack.com/archives/C7A5HT92M/p1514928010000521 # + You need to install, into your RoboND environment: * - Keras: * Use > pip install git+git://github.com/keras-team/keras.git Anything but this version will not work. * - Tensorflow GPU: * Use > pip install tensorflow-gpu==1.3 *Do not remove Tensorflow 1.2.1* The correct versions of CUDNN and cudatoolkit should install automatically, in this case cuda 8 and CUDNN 6.
docs/notes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="Tce3stUlHN0L" # ##### Copyright 2019 The TensorFlow Authors. # + cellView="form" colab={} colab_type="code" id="tuOe1ymfHZPu" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="MfBg1C5NB3X0" # # Keras 的分布式训练 # + [markdown] colab_type="text" id="r6P32iYYV27b" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://tensorflow.google.cn/tutorials/distribute/keras"><img src="https://tensorflow.google.cn/images/tf_logo_32px.png" />在 tensorflow.google.cn 上查看</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/tutorials/distribute/keras.ipynb"><img src="https://tensorflow.google.cn/images/colab_logo_32px.png" />在 Google Colab 运行</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/tutorials/distribute/keras.ipynb"><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png" />在 Github 上查看源代码</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/tutorials/distribute/keras.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png" />下载此 notebook</a> # </td> # </table> # + [markdown] colab_type="text" id="GEe3i16tQPjo" # Note: 我们的 TensorFlow 社区翻译了这些文档。因为社区翻译是尽力而为, 所以无法保证它们是最准确的,并且反映了最新的 # [官方英文文档](https://www.tensorflow.org/?hl=en)。如果您有改进此翻译的建议, 请提交 pull request 到 # [tensorflow/docs](https://github.com/tensorflow/docs) GitHub 仓库。要志愿地撰写或者审核译文,请加入 # [<EMAIL> Google Group](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-zh-cn)。 # + [markdown] colab_type="text" id="xHxb-dlhMIzW" # ## 概述 # # `tf.distribute.Strategy` API 提供了一个抽象的 API ,用于跨多个处理单元(processing units)分布式训练。它的目的是允许用户使用现有模型和训练代码,只需要很少的修改,就可以启用分布式训练。 # # 本教程使用 `tf.distribute.MirroredStrategy`,这是在一台计算机上的多 GPU(单机多卡)进行同时训练的图形内复制(in-graph replication)。事实上,它会将所有模型的变量复制到每个处理器上,然后,通过使用 [all-reduce](http://mpitutorial.com/tutorials/mpi-reduce-and-allreduce/) 去整合所有处理器的梯度(gradients),并将整合的结果应用于所有副本之中。 # # `MirroredStategy` 是 tensorflow 中可用的几种分发策略之一。 您可以在 [分发策略指南](../../guide/distribute_strategy.ipynb) 中阅读更多分发策略。 # # + [markdown] colab_type="text" id="MUXex9ctTuDB" # ### Keras API # # 这个例子使用 `tf.keras` API 去构建和训练模型。 关于自定义训练模型,请参阅 [tf.distribute.Strategy with training loops](training_loops.ipynb) 教程。 # + [markdown] colab_type="text" id="Dney9v7BsJij" # ## 导入依赖 # + colab={} colab_type="code" id="r8S3ublR7Ay8" # 导入 TensorFlow 和 TensorFlow 数据集 import tensorflow_datasets as tfds import tensorflow as tf tfds.disable_progress_bar() import os # + colab={} colab_type="code" id="jgLiitFbwXkR" print(tf.__version__) # + [markdown] colab_type="text" id="hXhefksNKk2I" # ## 下载数据集 # + [markdown] colab_type="text" id="OtnnUwvmB3X5" # 下载 MNIST 数据集并从 [TensorFlow Datasets](https://tensorflow.google.cn/datasets) 加载。 这会返回 `tf.data` 格式的数据集。 # + [markdown] colab_type="text" id="lHAPqG8MtS8M" # 将 `with_info` 设置为 `True` 会包含整个数据集的元数据,其中这些数据集将保存在 `info` 中。 除此之外,该元数据对象包括训练和测试示例的数量。 # # + colab={} colab_type="code" id="iXMJ3G9NB3X6" datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True) mnist_train, mnist_test = datasets['train'], datasets['test'] # + [markdown] colab_type="text" id="GrjVhv-eKuHD" # ## 定义分配策略 # + [markdown] colab_type="text" id="TlH8vx6BB3X9" # 创建一个 `MirroredStrategy` 对象。这将处理分配策略,并提供一个上下文管理器(`tf.distribute.MirroredStrategy.scope`)来构建你的模型。 # + colab={} colab_type="code" id="4j0tdf4YB3X9" strategy = tf.distribute.MirroredStrategy() # + colab={} colab_type="code" id="cY3KA_h2iVfN" print('Number of devices: {}'.format(strategy.num_replicas_in_sync)) # + [markdown] colab_type="text" id="lNbPv0yAleW8" # ## 设置输入管道(pipeline) # + [markdown] colab_type="text" id="psozqcuptXhK" # 在训练具有多个 GPU 的模型时,您可以通过增加批量大小(batch size)来有效地使用额外的计算能力。通常来说,使用适合 GPU 内存的最大批量大小(batch size),并相应地调整学习速率。 # + colab={} colab_type="code" id="p1xWxKcnhar9" # 您还可以执行 info.splits.total_num_examples 来获取总数 # 数据集中的样例数量。 num_train_examples = info.splits['train'].num_examples num_test_examples = info.splits['test'].num_examples BUFFER_SIZE = 10000 BATCH_SIZE_PER_REPLICA = 64 BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync # + [markdown] colab_type="text" id="0Wm5rsL2KoDF" # 0-255 的像素值, [必须标准化到 0-1 范围](https://en.wikipedia.org/wiki/Feature_scaling)。在函数中定义标准化。 # + colab={} colab_type="code" id="Eo9a46ZeJCkm" def scale(image, label): image = tf.cast(image, tf.float32) image /= 255 return image, label # + [markdown] colab_type="text" id="WZCa5RLc5A91" # 将此功能应用于训练和测试数据,随机打乱训练数据,并[批量训练](https://tensorflow.google.cn/api_docs/python/tf/data/Dataset#batch)。 请注意,我们还保留了训练数据的内存缓存以提高性能。 # # + colab={} colab_type="code" id="gRZu2maChwdT" train_dataset = mnist_train.map(scale).cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE) eval_dataset = mnist_test.map(scale).batch(BATCH_SIZE) # + [markdown] colab_type="text" id="4xsComp8Kz5H" # ## 生成模型 # + [markdown] colab_type="text" id="1BnQYQTpB3YA" # 在 `strategy.scope` 的上下文中创建和编译 Keras 模型。 # + colab={} colab_type="code" id="IexhL_vIB3YA" with strategy.scope(): model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(10) ]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) # + [markdown] colab_type="text" id="8i6OU5W9Vy2u" # ## 定义回调(callback) # + [markdown] colab_type="text" id="YOXO5nvvK3US" # 这里使用的回调(callbacks)是: # # * *TensorBoard*: 此回调(callbacks)为 TensorBoard 写入日志,允许您可视化图形。 # * *Model Checkpoint*: 此回调(callbacks)在每个 epoch 后保存模型。 # * *Learning Rate Scheduler*: 使用此回调(callbacks),您可以安排学习率在每个 epoch/batch 之后更改。 # # 为了便于说明,添加打印回调(callbacks)以在笔记本中显示*学习率*。 # + colab={} colab_type="code" id="A9bwLCcXzSgy" # 定义检查点(checkpoint)目录以存储检查点(checkpoints) checkpoint_dir = './training_checkpoints' # 检查点(checkpoint)文件的名称 checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}") # + colab={} colab_type="code" id="wpU-BEdzJDbK" # 衰减学习率的函数。 # 您可以定义所需的任何衰减函数。 def decay(epoch): if epoch < 3: return 1e-3 elif epoch >= 3 and epoch < 7: return 1e-4 else: return 1e-5 # + colab={} colab_type="code" id="jKhiMgXtKq2w" # 在每个 epoch 结束时打印LR的回调(callbacks)。 class PrintLR(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs=None): print('\nLearning rate for epoch {} is {}'.format(epoch + 1, model.optimizer.lr.numpy())) # + colab={} colab_type="code" id="YVqAbR6YyNQh" callbacks = [ tf.keras.callbacks.TensorBoard(log_dir='./logs'), tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix, save_weights_only=True), tf.keras.callbacks.LearningRateScheduler(decay), PrintLR() ] # + [markdown] colab_type="text" id="70HXgDQmK46q" # ## 训练和评估 # + [markdown] colab_type="text" id="6EophnOAB3YD" # 在该部分,以普通的方式训练模型,在模型上调用 `fit` 并传入在教程开始时创建的数据集。 无论您是否分布式训练,此步骤都是相同的。 # + colab={} colab_type="code" id="7MVw_6CqB3YD" model.fit(train_dataset, epochs=12, callbacks=callbacks) # + [markdown] colab_type="text" id="NUcWAUUupIvG" # 如下所示,检查点(checkpoint)将被保存。 # + colab={} colab_type="code" id="JQ4zeSTxKEhB" # 检查检查点(checkpoint)目录 # !ls {checkpoint_dir} # + [markdown] colab_type="text" id="qor53h7FpMke" # 要查看模型的执行方式,请加载最新的检查点(checkpoint)并在测试数据上调用 `evaluate` 。 # # 使用适当的数据集调用 `evaluate` 。 # + colab={} colab_type="code" id="JtEwxiTgpQoP" model.load_weights(tf.train.latest_checkpoint(checkpoint_dir)) eval_loss, eval_acc = model.evaluate(eval_dataset) print('Eval loss: {}, Eval Accuracy: {}'.format(eval_loss, eval_acc)) # + [markdown] colab_type="text" id="IIeF2RWfYu4N" # 要查看输出,您可以在终端下载并查看 TensorBoard 日志。 # # ``` # $ tensorboard --logdir=path/to/log-directory # ``` # + colab={} colab_type="code" id="LnyscOkvKKBR" # !ls -sh ./logs # + [markdown] colab_type="text" id="kBLlogrDvMgg" # ## 导出到 SavedModel # + [markdown] colab_type="text" id="Xa87y_A0vRma" # 将图形和变量导出为与平台无关的 SavedModel 格式。 保存模型后,可以在有或没有 scope 的情况下加载模型。 # + colab={} colab_type="code" id="h8Q4MKOLwG7K" path = 'saved_model/' # + colab={} colab_type="code" id="4HvcDmVsvQoa" model.save(path, save_format='tf') # + [markdown] colab_type="text" id="vKJT4w5JwVPI" # 在无需 `strategy.scope` 加载模型。 # + colab={} colab_type="code" id="T_gT0RbRvQ3o" unreplicated_model = tf.keras.models.load_model(path) unreplicated_model.compile( loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) eval_loss, eval_acc = unreplicated_model.evaluate(eval_dataset) print('Eval loss: {}, Eval Accuracy: {}'.format(eval_loss, eval_acc)) # + [markdown] colab_type="text" id="YBLzcRF0wbDe" # 在含 `strategy.scope` 加载模型。 # + colab={} colab_type="code" id="BBVo3WGGwd9a" with strategy.scope(): replicated_model = tf.keras.models.load_model(path) replicated_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) eval_loss, eval_acc = replicated_model.evaluate(eval_dataset) print ('Eval loss: {}, Eval Accuracy: {}'.format(eval_loss, eval_acc)) # + [markdown] colab_type="text" id="MUZwaz4AKjtD" # ### 示例和教程 # 以下是使用 keras fit/compile 分布式策略的一些示例: # 1. 使用`tf.distribute.MirroredStrategy` 训练 [Transformer](https://github.com/tensorflow/models/blob/master/official/nlp/transformer/transformer_main.py) 的示例。 # 2. 使用`tf.distribute.MirroredStrategy` 训练 [NCF](https://github.com/tensorflow/models/blob/master/official/recommendation/ncf_keras_main.py) 的示例。 # # [分布式策略指南](../../guide/distribute_strategy.ipynb#examples_and_tutorials)中列出的更多示例 # + [markdown] colab_type="text" id="8uNqWRdDMl5S" # ## 下一步 # # * 阅读[分布式策略指南](../../guide/distribute_strategy.ipynb)。 # * 阅读[自定义训练的分布式训练](training_loops.ipynb)教程。 # # 注意:`tf.distribute.Strategy` 正在积极开发中,我们将在不久的将来添加更多示例和教程。欢迎您进行尝试。我们欢迎您通过[ GitHub 上的 issue ](https://github.com/tensorflow/tensorflow/issues/new) 提供反馈。
site/zh-cn/tutorials/distribute/keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Introduction # # From the [PVSC44 TL sensitivity](PVSC44%20TL%20sensitivity.ipynb) we concluded that: # # * Overal MACC data is higher than corresponding static or optimized $T_L$ which leads to low dyanamic predictions. # * For at least 3 SURFRAD stations: bon, psu and sxf high $T_L$ in summer caused a seasonal bias in GHI predictions. # * For the high elevation stations: tbl and dra, that also have very high DNI and low DHI, optimization of $T_L$ led # to unphysical values, indicating that another factor may, such as sensor error, may be affecting predictions more than # atmospheric conditions. # # In Ineichen's review paper [Ineichen, 2016], underestimation of all models was also attributed to MACC AOD values. Therefore, we attempt to scale the ECMWF AOD data with the static $T_L$ values to reduce the error, but allow the real-time data to be used with the Simplified Solis and Bird models that don't use $T_L$ # # ## References # Ineihen, Pierre, "Validation of Models that Estimate Clear Sky Global and Beam Solar Irradiance", _Solar Energy_ 2016 DOI: 10.1016/j.solener.2016.03.017 # + # imports and settings import os import h5py from matplotlib import pyplot as plt import numpy as np import pandas as pd import pvlib import seaborn as sns import statsmodels.api as sm from scipy.optimize import minimize_scalar from pvsc44_clearsky_aod import ecmwf_macc_tools # %matplotlib inline sns.set_context('notebook', rc={'figure.figsize': (8, 6)}) sns.set(font_scale=1.5) # - # get the "metadata" that contains the station id codes for the SURFRAD data that was analyzed METADATA = pd.read_csv('metadata.csv', index_col=0) # load calculations for each station atm_params_3min_clear = {} for station_id in METADATA.index: with h5py.File('%s_3min_clear_atm_params.h5' % station_id, 'r') as f: np_atm_params_3min_clear = pd.DataFrame(np.array(f['data'])) np_atm_params_3min_clear['index'] = pd.DatetimeIndex(np_atm_params_3min_clear['index']) np_atm_params_3min_clear.set_index('index', inplace=True) np_atm_params_3min_clear.index.rename('timestamps', inplace=True) atm_params_3min_clear[station_id] = np_atm_params_3min_clear # + # filter out low light # CONSTANTS MODELS = {'solis': 'SOLIS', 'lt': 'Linke', 'macc': 'ECMWF-MACC', 'bird': 'Bird'} CS = ['dni', 'dhi', 'ghi'] LOW_LIGHT = 200 # threshold for low light in W/m^2 is_bright = {} for station_id, station_atm_params_3min_clear in atm_params_3min_clear.iteritems(): is_bright[station_id] = station_atm_params_3min_clear['ghi'] > LOW_LIGHT # - TL_SENS = pd.read_csv('TL_sensitivity.csv') TL_SENS # For each station, find the AOD scaling factor that makes the 2003 $T_L$ calculated equivalient to the static $T_L$ values. # # So I did that, but in the process I made a funny discovery - the MACC AOD only results in higher $T_L$ if you include low irradiance. But, if you filter out low light conditions, then the MACC AOD calculated $T_L$ actually matches the historical values well. # compare historic Linke turbidity to calculated # downsample to monthly averages to show long term trends f, ax = plt.subplots(2, 4, figsize=(24, 8), sharex=False) rc = 0 for station_id, station_atm_params_3min_clear in atm_params_3min_clear.iteritems(): r, c = rc // 4, rc % 4 station_tl = station_atm_params_3min_clear[['lt', 'lt_calc']][is_bright[station_id]] station_tl['lt'].groupby(lambda x: x.month).mean().plot(linewidth=5, ax=ax[r][c]) station_tl['lt_calc'].groupby(lambda x: x.month).mean().plot(linewidth=5, ax=ax[r][c]) for y in xrange(2003, 2013): lt = station_tl['lt_calc'][('%d-01-01 00:00:00' % y):('%d-12-31 23:59:59' % y)].groupby(lambda x: x.month).mean() lt.plot(linestyle=':', ax=ax[r][c]) ax[r][c].set_ylabel('$T_L$') ax[r][c].set_xlabel('month') ax[r][c].legend(['static', 'average', 'yearly']) ax[r][c].set_title('$T_L$ at %s' % station_id) ax[r][c].set_ylim([2, 6]) rc += 1 ax[1][3].axis('off') f.tight_layout() plt.savefig('Linke_turbidity_allyears_monthly.png') bon2003 = atm_params_3min_clear['bon'][['lt', 'lt_calc']][is_bright['bon']]['2003-01-01 00:00:00':'2003-12-31 23:59:59'] monthly_2003_tl = bon2003.resample('M').mean() monthly_2003_tl.plot() mean_2003_tl = monthly_2003_tl.mean() mean_2003_tl['lt']/mean_2003_tl['lt_calc'] monthly_2003_tl['scaled'] = monthly_2003_tl['lt_calc']*mean_2003_tl['lt']/mean_2003_tl['lt_calc'] monthly_2003_tl.plot() mean_monthly_2003_tl = monthly_2003_tl['lt'] / monthly_2003_tl['lt_calc'] mean_monthly_2003_tl atm_params_2003 = atm_params_3min_clear['bon'][['amp', 'pwat', 'tau700', 'lt']][is_bright['bon']]['2003-01-01 00:00:00':'2003-12-31 23:59:59'] def _poop(x, amp=atm_params_2003['amp'], pwat=atm_params_2003['pwat'], bbaod=atm_params_2003['tau700']): lt_calc = pvlib.atmosphere.kasten96_lt(amp, pwat, (x * bbaod)) lt_calc_monthly = lt_calc.resample('M').mean() lt_monthly = atm_params_2003['lt'].resample('M').mean() return np.sum((lt_calc_monthly - lt_monthly)**2) res = minimize_scalar(_poop) res monthly_2003_tl['scaled_monthly'] = pvlib.atmosphere.kasten96_lt(atm_params_2003['amp'], atm_params_2003['pwat'], res['x']*atm_params_2003['tau700']).resample('M').mean() monthly_2003_tl.plot() solis_scaled = pvlib.clearsky.simplified_solis( atm_params_3min_clear['bon']['apparent_elevation'], atm_params_3min_clear['bon']['tau700']*res['x'], atm_params_3min_clear['bon']['pwat'], pressure=atm_params_3min_clear['bon']['press'], dni_extra=atm_params_3min_clear['bon']['etr'] ) solis_scaled.rename(columns={'ghi': 'scaled_ghi', 'dni': 'scaled_dni', 'dhi': 'scaled_dhi'}, inplace=True) solis_scaled = pd.concat([solis_scaled, atm_params_3min_clear['bon'][['solis_ghi', 'solis_dni', 'solis_dhi', 'ghi', 'dni', 'dhi']]], axis=1) solis_scaled['solis_ghi_err'] = solis_scaled['solis_ghi'] - solis_scaled['ghi'] solis_scaled['solis_dni_err'] = solis_scaled['solis_dni'] - solis_scaled['dni'] solis_scaled['solis_dhi_err'] = solis_scaled['solis_dhi'] - solis_scaled['dhi'] solis_scaled['ghi_err'] = solis_scaled['scaled_ghi'] - solis_scaled['ghi'] solis_scaled['dni_err'] = solis_scaled['scaled_dni'] - solis_scaled['dni'] solis_scaled['dhi_err'] = solis_scaled['scaled_dhi'] - solis_scaled['dhi'] solis_scaled['ghi_norm'] = solis_scaled['ghi_err']**2 solis_scaled['dni_norm'] = solis_scaled['dni_err']**2 solis_scaled['dhi_norm'] = solis_scaled['dhi_err']**2 solis_scaled_annual = solis_scaled.resample('A').mean() solis_scaled_annual['ghi_rel'] = solis_scaled_annual['ghi_err'] / solis_scaled_annual['ghi'] solis_scaled_annual['dni_rel'] = solis_scaled_annual['dni_err'] / solis_scaled_annual['dni'] solis_scaled_annual['dhi_rel'] = solis_scaled_annual['dhi_err'] / solis_scaled_annual['dhi'] solis_scaled_annual['solis_ghi_rel'] = solis_scaled_annual['solis_ghi_err'] / solis_scaled_annual['ghi'] solis_scaled_annual['solis_dni_rel'] = solis_scaled_annual['solis_dni_err'] / solis_scaled_annual['dni'] solis_scaled_annual['solis_dhi_rel'] = solis_scaled_annual['solis_dhi_err'] / solis_scaled_annual['dhi'] solis_scaled_annual[['ghi_rel', 'dni_rel', 'solis_ghi_rel', 'solis_dni_rel']].plot() # So, as it turns out, scaling the MACC AOD isn't necessary, or at least the magnitude of MACC AOD isn't an issue. And it doesn't seem to be too high.
PVSC44 ECMWF AOD Sensitivity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 生成用户日志 # # 本代码的目的是把用户分组,并在每个组中统计用户的行为日志,以方便后续的并行化处理。 # # This code aims to group users into serveral groups, then statistic the user behaivors into each group. This process can simplify the operation in multi-processing # + import multiprocessing as mp import time import pandas as pd import numpy as np def reduce_mem_usage(df): """ iterate through all the columns of a dataframe and modify the data type to reduce memory usage. """ start_mem = df.memory_usage().sum() print('Memory usage of dataframe is {:.2f} MB'.format(start_mem)) for col in df.columns: col_type = df[col].dtype if col_type != object: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) else: df[col] = df[col].astype('category') end_mem = df.memory_usage().sum() print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) return df ## 把[user_id, item_id]矩阵 转换成 {user_id: [item_id, item_id, ...., item_id]}字典 def generate_logs_for_each_group(matrix, q): user_log = dict() for row in matrix: user_log.setdefault(row[0], []) user_log[row[0]].append(row[1]) print('This batc is finished') ## 把结果放到消息队列里 q.put(user_log) # - ## 指定用几个CPU处理 CPU_NUMS = 8 # round2 train的路径 path = '../ECommAI_EUIR_round2_train_20190816/' # + data = reduce_mem_usage(pd.read_csv(path+'user_behavior.csv', header=None)) user = pd.read_csv(path+'user.csv', header=None) item = pd.read_csv(path+'item.csv', header=None) data['day'] = data[3] // 86400 data['hour'] = data[3] // 3600 % 24 data = data.drop(3, axis=1) data.columns = ['userID','itemID','behavoir','day','hour'] user.columns = ['userID', 'sex', 'age', 'ability'] item.columns = ['itemID', 'category', 'shop', 'band'] data = data.drop_duplicates(['userID','itemID'],keep="last") data = data.sort_values(['day','hour'], ascending=True).reset_index(drop=True) users = list(set(user['userID'])) user_groups = [users[i: i + len(users) // CPU_NUMS] for i in range(0, len(users), len(users) // CPU_NUMS)] ## 进程用消息队列沟通 q = mp.Queue() for groupID in range(len(user_groups)): matrix = data[data['userID'].isin(user_groups[groupID])][['userID','itemID']].values task = mp.Process(target=generate_logs_for_each_group, args=(matrix, q, )) task.start() start_time = time.time() print('Waiting for the son processing') while q.qsize() != len(user_groups): pass end_time = time.time() print("Over, the time cost is:" + str(end_time - start_time)) # - for i in range(len(user_groups)): temp = q.get() ## 把生成的字典保存在文件中 f = open('full_logs/userlogs_group' + str(i) + '.txt','w') f.write(str(temp)) f.close()
Semi-Finals/underline_trainning/Step1 itemCF_based_on_Apriori/1_generate_user_logs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # How I created fire_data_clean_withacres.csv # + #Imports # %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV from sklearn.preprocessing import StandardScaler from sklearn.datasets import load_iris from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder from sklearn.feature_selection import SelectKBest, f_regression from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.impute import SimpleImputer from sklearn.compose import make_column_transformer from sklearn.svm import SVC # - # https://www.fire.ca.gov/incidents/ fire_df = pd.read_csv('./data/fire.csv') fire_df.info() fire_df['incident_is_final'].unique() # + jupyter={"outputs_hidden": true} # radius of circle can be determined from this number fire_df['incident_acres_burned'].unique() # - fire_df['incident_longitude'].unique() fire_df['incident_latitude'].unique() #need to drop flood columns fire_df['incident_type'].unique() # when you click on it, give a warnign message saying it's active # Or have the circle be pulsing or something fire_df['is_active'].unique() # + jupyter={"outputs_hidden": true, "source_hidden": true} df_test = fire_df.loc[(fire_df['incident_is_final'] == False) & (fire_df['is_active'] == 'Y')] df_test.head() # + jupyter={"outputs_hidden": true} fire_df['incident_dateonly_extinguished'].unique() # + jupyter={"outputs_hidden": true} fire_df['incident_dateonly_created'].unique() # + df = fire_df.loc[fire_df['incident_type'] == 'Wildfire'] df = df[['incident_name', 'incident_longitude', 'incident_latitude', 'incident_is_final', 'incident_acres_burned', 'is_active', 'incident_dateonly_extinguished', 'incident_dateonly_created' ]] df.rename(columns = { 'y':'year', 'incident_name': 'name', 'incident_acres_burned': "acres_burned", 'incident_longitude': 'x', 'incident_latitude': 'y', 'incident_dateonly_extinguished': 'extinguished_date', 'incident_dateonly_created': 'created_date' }, inplace = True) df['extinguished_date'] = pd.to_datetime(df['extinguished_date']) df['created_date'] = pd.to_datetime(df['created_date']) # - df.tail() # + jupyter={"outputs_hidden": true} df_current = df.loc[df['incident_is_final'] == False] df_current # - df_current_acres_burned = df_current.loc[df_current['acres_burned'] > 0] df_current_acres_burned # + jupyter={"outputs_hidden": true} df_burned_acres_and_in_year = df.loc[(df['acres_burned'] > 0) & (df['created_date'].dt.year == 2020)] df_burned_acres_and_in_year # - df_acres_burned_alltime = df.loc[(df['acres_burned'] > 0)] df_acres_burned_alltime.head() # https://stackoverflow.com/questions/53399137/pandas-issue-with-pandas-nat-when-changing-from-pandas-libs-tslib-nattype isinstance(df_burned_acres_and_in_year['extinguished_date'].iloc[0],pd._libs.tslibs.nattype.NaTType) # + my_str = [] for x in range(len(df_acres_burned_alltime['name'])): if (isinstance(df_acres_burned_alltime['extinguished_date'].iloc[x],pd._libs.tslibs.nattype.NaTType)) == True: if df_acres_burned_alltime['is_active'].iloc[x] == 'Y': # ongoing: extinguished_date = NaT AND is_active = Y my_str.append('ongoing') elif df_acres_burned_alltime['is_active'].iloc[x] == 'N': # contained: extinguished_date = NaT AND is_active = N my_str.append('contained') else: # extinguished: extinguished_date != NaT my_str.append('extinguished') df_acres_burned_alltime['status'] = my_str df_acres_burned_alltime.head() # + # get all fires with status = contained or ongoing df_still_concerning = df_acres_burned_alltime.loc[(df_acres_burned_alltime['status'] == 'ongoing') | (df_acres_burned_alltime['status'] == 'contained')] df_still_concerning.head() # - df_still_concerning['created_date'].dt.year.min() # so the fires that are still a concern only started within these last two years df_final_draft = df_still_concerning[['name', 'x', 'y', 'acres_burned', 'status']] df_final_draft['radius'] = np.log(df_final_draft['acres_burned']) df_final_draft.head() plt.hist(df_final_draft['radius']); df_final = df_final_draft[['name', 'x', 'y', 'radius', 'acres_burned', 'status']] df_final.head() plt.scatter(x = df_final['x'], y = df_final['y'], c = df_final['radius']) df_final.to_csv('./data/fire_data_clean_withacres.csv') # + # if the extinguished_date is empty but the is_active is N, that means the fire is ongoing, but contained #new column: 'contained', 'extinguished', 'ongoing' (will put as a pop up status) fire_status = [] # ongoing: extinguished_date = NaT AND is_active = Y # contained: extinguished_date = NaT AND is_active = N df_extinguished_isnull = df_burned_acres_and_in_year[df_burned_acres_and_in_year['extinguished_date'].isnull()] for x in range(len(df_extinguished_isnull['extinguished_date'])): # extinguished: extinguished_date != NaT type(df_extinguished_isnull['extinguished_date']) # - df_current = fire_df.loc[fire_df['incident_is_final'] == False] df_past = fire_df.loc[fire_df['incident_is_final'] == True] df_past['incident_date_extinguished'].head() # make this column the datetime index df_past['incident_date_extinguished'] = pd.to_datetime(df_past['incident_date_extinguished']) df_past.set_index('incident_date_extinguished', inplace = True) df_past.info() df_current['incident_date_last_update'].head()
code/create_dataframes_code/create_clean_fire_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ### Notebook 16: Simulating RADseq data # #### Eaton et al. 2015 # + ## Requirements ## - Python 2.7 ## - pyrad v.3.1.0 (http://github.com/dereneaton/pyrad) ## - simrrls v.0.0.7 (http://github.com/dereneaton/simrrls) # - import itertools import ete2 import numpy as np import toyplot from collections import OrderedDict, Counter # ### Generate trees for simulations # Make a balanced tree with 64 tips and a tree length = 10 for n, node in enumerate(Tbal.get_leaves()): node.name = "t"+str(n) print Tbal.write() # + ## base tree Tbal = ete2.Tree() ## branch lengths bls = 10/6. ## first nodes n1 = Tbal.add_child(dist=bls) n2 = Tbal.add_child(dist=bls) ## make balanced tree while len(Tbal.get_leaves()) < 64: thisrep = Tbal.get_descendants() for node in thisrep: if len(node.get_children()) < 1: node.add_child(dist=bls) node.add_child(dist=bls) ## set leaf names for n, node in enumerate(Tbal.get_leaves()): node.name = "t"+str(n) ## Save newick string to file Tbal.write(outfile="Tbal.tre", format=3) # - # cat Tbal.tre # ### Make an imbalanced tree of the same total tree length with 64 tips # + ## base tree Timb = ete2.Tree() ## scale branches to match balanced treelength brlen = (bls*6.)/63 ## first nodes n1 = Timb.add_child(dist=brlen) n2 = Timb.add_child(dist=brlen) while len(Timb.get_leaves()) < 64: ## extend others for tip in Timb.get_leaves()[:-1]: tip.dist += brlen ## extend the last node Timb.get_leaves()[-1].add_child(dist=brlen) Timb.get_leaves()[-1].add_sister(dist=brlen) ## set leaf names for n, node in enumerate(Timb.get_leaves()): node.name = "t"+str(n) ## write to file Timb.write(outfile="Timb.tre", format=3) # - # cat Timb.tre # ### Check tree lengths # + print set([i.get_distance(Tbal) for i in Tbal]), 'treelength' print len(Tbal), 'tips' print set([i.get_distance(Timb) for i in Timb]), 'treelength' print len(Timb), 'tips' # - # ### Get topology from empirical study of Viburnum with 64 tips # Make tree ultrametric using the penalized likelihood # %load_ext rpy2.ipython # + magic_args="-w 400 -h 600" language="R" # library(ape) # # ## make tree ultrametric using penalized likelihood # Vtree <- read.tree("~/Dropbox/RAxML_bestTree.VIB_small_c85d6m4p99") # Utree <- ladderize(chronopl(Vtree, 0.5)) # Utree <- drop.tip(Utree, "clemensiae_DRY6_PWS_2135") # # ## multiply bls so tree length=6 # Utree$edge.length <- Utree$edge.length*3 # # ## save the new tree # write.tree(Utree, "Tvib.tre") # plot(Utree, cex=0.7, edge.width=2) # #edgelabels(round(Utree$edge.length,3)) # - #### load TVib tree into Python and print newick string Tvib = ete2.Tree("Tvib.tre") # ! cat Tvib.tre # # Simulate sequence data on each tree # Here I use the _simrrls_ program to simulate RADseq data on each input topology with locus dropout occurring with respect to phylogenetic distances. Find simrrls in my github profile. # + language="bash" # ## balanced tree # mkdir -p simulations/Tbal_full/ # mkdir -p simulations/Tbal_mut/ # mkdir -p simulations/Tbal_cov/ # # ## imbalanced tree # mkdir -p simulations/Timb_full/ # mkdir -p simulations/Timb_mut/ # mkdir -p simulations/Timb_cov/ # # ## empirical Viburnum tree # mkdir -p simulations/Tvib_full/ # mkdir -p simulations/Tvib_mut/ # mkdir -p simulations/Tvib_cov/ # - # #### show simrrls options # + language="bash" # simrrls -h # - # ### Simulate RAD data on the balanced tree without missing data # + language="bash" # simrrls -mc 0 -ms 0 -t Tbal.tre \ # -L 1000 -l 100 \ # -u 1e-9 -N 5e5 \ # -f rad -c1 CTGCAG \ # -o simulations/Tbal_full/Tbal # - # ### And with missing data from mutation-disruption # + language="bash" # simrrls -mc 1 -ms 1 -t Tbal.tre \ # -L 1000 -l 100 \ # -u 1e-9 -N 5e5 \ # -f rad -c CTGCAG \ # -s 300,600 \ # -o simulations/Tbal_mut/Tbal # - # ### And with missing data from low sequencing coverage # + language="bash" # simrrls -D 0 -t Tbal.tre \ # -L 1000 -l 100 \ # -u 1e-9 -N 5e5 \ # -f rad -c CTGCAG \ # -d 5,5 \ # -o simulations/Tbal_cov/Tbal # - # ### Assemble data sets in _pyRAD_ (v. 3.1.0) # + language="bash" # ## new params file # pyrad -n # + language="bash" # ## add phy and nex outputs # sed -i '/## 1. /c\simulations/Tbal_full/ ## 1. working dir ' params.txt # sed -i '/## 2. /c\simulations/Tbal_full/*.gz ## 2. data loc ' params.txt # sed -i '/## 3. /c\simulations/Tbal_full/*barcodes.txt ## 3. Bcode ' params.txt # sed -i '/## 6. /c\TGCAG ## 6. Cutter ' params.txt # sed -i '/## 7. /c\4 ## 7. Nproc ' params.txt # sed -i '/## 10. /c\.82 ## 10. clust thresh' params.txt # sed -i '/## 11. /c\rad ## 11. datatype ' params.txt # sed -i '/## 13. /c\6 ## 13. maxSH' params.txt # sed -i '/## 14. /c\Tbal ## 14. outname' params.txt # sed -i '/## 16. /c\ ## 16. addon taxa' params.txt # sed -i '/## 24./c\99 ## 24. maxH' params.txt # sed -i '/## 30./c\n,p,s ## 30. out format' params.txt # + language="bash" # pyrad -p params.txt -q # + language="bash" # sed -i '/## 1. /c\simulations/Tbal_mut ## 1. working dir ' params.txt # sed -i '/## 2. /c\simulations/Tbal_mut/*.gz ## 2. data loc ' params.txt # sed -i '/## 3. /c\simulations/Tbal_mut/*barcodes.txt ## 3. Bcode ' params.txt # + language="bash" # pyrad -p params.txt -q # + language="bash" # sed -i '/## 1. /c\simulations/Tbal_cov ## 1. working dir ' params.txt # sed -i '/## 2. /c\simulations/Tbal_cov/*.gz ## 2. data loc ' params.txt # sed -i '/## 3. /c\simulations/Tbal_cov/*barcodes.txt ## 3. Bcode ' params.txt # + language="bash" # pyrad -p params.txt -q # - # ### Functions for measuring shared data def getarray(loci, tree): """ parse the loci list and return a presence/absence matrix ordered by the tips on the tree""" ## order (ladderize) the tree tree.ladderize() ## get tip names names = tree.get_leaf_names() ## make empty matrix lxs = np.zeros((len(names), len(loci))) ## fill the matrix for loc in xrange(len(loci)): for seq in loci[loc].split("\n"): if ">" in seq: tname = seq.split()[0][1:-1] lxs[names.index(tname),loc] += 1 return lxs def countmatrix(lxsabove, lxsbelow, max=0): """ fill a matrix with pairwise data sharing between each pair of samples. You could put in two different 'share' matrices to have different results above and below the diagonal. Can enter a max value to limit fill along diagonal. """ share = np.zeros((lxsabove.shape[0], lxsbelow.shape[0])) ## fill above names = range(lxsabove.shape[0]) for row in lxsabove: for samp1,samp2 in itertools.combinations(names,2): shared = lxsabove[samp1, lxsabove[samp2,]>0].sum() share[samp1,samp2] = shared ## fill below for row in lxsbelow: for samp2,samp1 in itertools.combinations(names,2): shared = lxsabove[samp1, lxsabove[samp2,]>0].sum() share[samp1,samp2] = shared ## fill diagonal if not max: for row in range(len(names)): share[row,row] = lxsabove[row,].sum() else: for row in range(len(names)): share[row,row] = max return share locidata = open("simulations/Tbal_mut/outfiles/Tbal.loci") loci = locidata.read().split("|\n")[:-1] lxs = getarray(loci, Tbal) print lxs.shape print lxs share = countmatrix(lxs, lxs) print share.shape print share # ### Plotting function ## Get ordered names of tips on the tree tree = Tbal tree.ladderize() names = tree.get_leaf_names() floater = ["Taxon: %s" % i for i in names] # + colormap = toyplot.color.LinearMap(toyplot.color.brewer("Spectral"), domain_min=share.min(), domain_max=share.max()) canvas = toyplot.Canvas(width=1200, height=900) table = canvas.matrix(share, colormap=colormap, label="", bounds=(50, 500, 50, 500), step=5) ## make floater for grid for i,j in itertools.product(range(len(share)), repeat=2): table.body.cell(i,j).title='%s, %s : %s' % (names[i], names[j], int(share[i,j])) ## put box around grid table.body.grid.vlines[...,[0,-1]] = 'single' table.body.grid.hlines[[0,-1],...] = 'single' ## remove top and left labels for j in range(share.shape[1]): table.top.cell(0, j).data = "" for i in range(share.shape[0]): table.left.cell(i, 0).data = "" ## canvas for barplot axes = canvas.axes(bounds=(550, 650, 60, 510), label="", xlabel="", ylabel="") ## create barplot axes.bars(share.diagonal()[::-1], along="y", title = floater) ## make floater for barplot zf = zip(names[::-1], share.diagonal()[::-1]) barfloater = ["%s: %s" % (i,int(j)) for i,j in zf] ## Hide yspine, move labels to the left, ## use taxon names, rotate angle, align. axes.y.spine.show = False axes.y.ticks.labels.offset = -5 axes.y.ticks.locator = toyplot.locator.Explicit(range(64), labels=names[::-1]) axes.y.ticks.labels.angle = 0 axes.y.ticks.labels.style = {"baseline-shift":0, "text-anchor":"end", "font-size":"9px"} ## Rotate xlabels, align with ticks, ## change to thousands, move up on canvas, ## show ticks, and hide popup coordinates axes.x.ticks.labels.angle = 90 axes.x.ticks.labels.offset = 20 axes.x.ticks.locator = toyplot.locator.Explicit( [0,200,400,600,800,1000], ["0", "200", "400", "600", "800", "1000"]) axes.x.ticks.labels.style = {"baseline-shift":0, "text-anchor":"end", "-toyplot-anchor-shift":"15px"} axes.x.ticks.show = True axes.coordinates.show = False # -
old_sim_nb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="m0HxmCDXrCZJ" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} # %matplotlib inline # + id="-gr4suLdrEBI" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} import numpy as np import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (7,7) # Make the figures a bit bigger from keras.datasets import mnist from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.utils import np_utils # + id="kQApfw9IrFvP" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 51} outputId="d6ff9f55-5e2d-43fe-e89b-c26af744bf0d" executionInfo={"status": "ok", "timestamp": 1528032009983, "user_tz": -120, "elapsed": 710, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "115843019312458301179"}} nb_classes = 10 # the data, shuffled and split between tran and test sets (X_train, y_train), (X_test, y_test) = mnist.load_data() print("X_train original shape", X_train.shape) print("y_train original shape", y_train.shape) # + id="oDUpOuX0rH0_" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 443} outputId="be281d82-334b-4910-ec13-95558840341f" executionInfo={"status": "ok", "timestamp": 1528032011395, "user_tz": -120, "elapsed": 1218, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "115843019312458301179"}} for i in range(9): plt.subplot(3,3,i+1) plt.imshow(X_train[i], cmap='gray', interpolation='none') plt.title("Class {}".format(y_train[i])) # + id="mUxLKgForJkn" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 51} outputId="5f547a11-83f6-462d-8a51-642a3a2b1b45" executionInfo={"status": "ok", "timestamp": 1528032011919, "user_tz": -120, "elapsed": 498, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "115843019312458301179"}} X_train = X_train.reshape(60000, 784) X_test = X_test.reshape(10000, 784) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255 print("Training matrix shape", X_train.shape) print("Testing matrix shape", X_test.shape) # + id="bJCFrPAzrLeu" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} Y_train = np_utils.to_categorical(y_train, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes) # + id="WSFwx8AxrN4-" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(10)) model.add(Activation('softmax')) # + id="9C7RojtlsRlp" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['acc']) # + id="AOSWAK1FsTwp" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 207} outputId="1e838b2c-dcb1-4952-ec04-6a9e7328575b" executionInfo={"status": "ok", "timestamp": 1528032063838, "user_tz": -120, "elapsed": 49667, "user": {"displayName": "Jann<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "115843019312458301179"}} model.fit(X_train, Y_train, batch_size = 128, epochs=4, validation_data=(X_test, Y_test)) # + id="UWSozJxYtg5Q" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(10)) model.add(Activation('softmax')) # + id="I5Jlu4s-tkKQ" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} from keras.optimizers import Adam # + id="Ub6p12C2uYJM" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 717} outputId="5d10f962-7845-4c86-eacc-d24b0bd09a80" executionInfo={"status": "ok", "timestamp": 1528032338945, "user_tz": -120, "elapsed": 273461, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "115843019312458301179"}} init_lr = 1e-6 losses = [] lrs = [] for i in range(20): model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(10)) model.add(Activation('softmax')) opt = Adam(lr=init_lr*2**i) model.compile(loss='categorical_crossentropy', optimizer=opt,metrics=['acc']) hist = model.fit(X_train, Y_train, batch_size = 128, epochs=1) loss = hist.history['loss'][0] losses.append(loss) lrs.append(init_lr*2**i) # + id="MN-wiXxLu3Q6" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 432} outputId="cbb94e6b-0d16-4991-87d5-cebd7c36c717" executionInfo={"status": "ok", "timestamp": 1528032339934, "user_tz": -120, "elapsed": 946, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "115843019312458301179"}} #lrs[0] = init_lr fig, ax = plt.subplots(figsize = (10,7)) plt.plot(lrs,losses) ax.set_xscale('log') # + id="g3R8-0s2vZT6" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 170} outputId="6509ad3c-c535-4936-feb4-003306e04668" executionInfo={"status": "ok", "timestamp": 1528014652823, "user_tz": -120, "elapsed": 16748, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "115843019312458301179"}} i = 1 model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(10)) model.add(Activation('softmax')) opt = Adam(lr=init_lr*2**i) model.compile(loss='categorical_crossentropy', optimizer=opt,metrics=['acc']) hist = model.fit(X_train, Y_train, batch_size = 128, epochs=1) loss = hist.history['loss'][0] # + id="VEqJqsYKxQPU" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} outputId="991d4b7c-fb74-464e-9221-a400e4a3c406" executionInfo={"status": "ok", "timestamp": 1528014705865, "user_tz": -120, "elapsed": 498, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "115843019312458301179"}} hist.history['loss'][0] # + id="XdPPPNIexXlX" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
8.4 LR_Search.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.12 64-bit (''pytorch_m1'': conda)' # language: python # name: python3 # --- # ### GITHUB setup instructions are in a separate notebook # # #### it's published on the github repo # # SVM MODEL FOR NBA ROOKIE PLAYERS # # Load Packages # + # Load the packages needed for Modelling upfront import pandas as pd import numpy as np # packages for data processing from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV # packages for modelling from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier from sklearn.svm import SVC # packages for evaluation metrics from sklearn.metrics import mean_squared_error as mse from sklearn.metrics import mean_absolute_error as mae from sklearn.metrics import roc_auc_score from joblib import dump # - # # Load the data # Load the training data set train = pd.read_csv('/Users/jasle1/Desktop/MDSI/ADSI/AT_1/adsi_at1/data/raw/train.csv') # Load the test data set test = pd.read_csv('/Users/jasle1/Desktop/MDSI/ADSI/AT_1/adsi_at1/data/raw/test.csv') # # Explore the Data # check rows and columns of the training set train.shape # check rows and columns of the test set test.shape train.head() test.head() train.info() test.info() train.describe() # # Transform the Data # extract the target variable out as y = target y_train = train.pop('TARGET_5Yrs') y_train.shape # create a copy of test set for use later - this will merge with the prediction file for kaggle submission test_ID = test.copy() test_ID # Standardise the dataset scaler = StandardScaler() train = scaler.fit_transform(train) test = scaler.fit_transform(test) # Split the training data into train and validation sets for evaluation # set the validation set at 20% of the full training set X_train, X_val, y_train, y_val = train_test_split(train, y_train, test_size=0.2, random_state = 42) # # Train SVM Model # + # Instantiate SVM. svc = SVC( C=5, kernel="rbf", gamma="scale", probability= True ) # Fit on training data. svc.fit(X_train, y_train) # Evaluate model. print(f'Training Score: {svc.score(X_train, y_train)}') print(f'Testing Score: {svc.score(X_val, y_val)}') # - # Make predictions on the validation dataset val_pred = svc.predict_proba(X_val) val_pred # # Evaluate Model Performance # + # Calculate auc scores for performance evaluation auc_score = roc_auc_score(y_val, val_pred[:,1]) print(f'The Support Vector Machine (SVM) ROC AUC score is {auc_score}') # - # Make predictions on the test set now test_pred = svc.predict_proba(test) test_pred # *** save scaler in the models folder called "scaler.joblib" for use next time dump(svc, '/Users/jasle1/Desktop/MDSI/ADSI/AT_1/adsi_at1/models/SVC_scaled_2.13.joblib') # # Prepare the file for extracting final prediction output - for Kaggle submission # Convert the datasets into pandas dataframe for easy merge with prediction file test_id = pd.DataFrame(test_ID.iloc[:,0]) test_id # Convert the datasets into pandas dataframe for easy merge with prediction file test_pred = pd.DataFrame(test_pred[:,1]) test_pred.head() # it seems that the framework is giving out prediction for both yes and no # Now, merge the test data set with predictions data df = pd.concat([test, test_pred], axis=1) df.head() # Now, merge the test data set with predictions data - IF SCALED df = pd.concat([test_id, test_pred], axis=1) df.head() # Rename the '0' prediction column to align with Kaggle submission requirements df_final = df.rename(columns={0: 'TARGET_5Yrs'}) df_final.head() # Extract the ID and Prediction columns for Kaggle Submission submission = df_final[['Id', 'TARGET_5Yrs']] submission.head() # save the submission file for kaggle upload submission.to_csv('jasleen_SVM_scaled_week2.13', index=False)
notebooks/Kaur_Jasleen/Kaur_Jasleen-13368028-week2_SVM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) import quandl import pandas as pd import numpy as np import matplotlib.pyplot as plt # * This notebook contain the complete rutine to download the Dow Jones stock market time series. In relation with the code: # # 1. This notebook is inspired in the folloing tutorials: # # * Part I: https://medium.com/python-data/effient-frontier-in-python-34b0c3043314 # # * Part II: https://medium.com/python-data/efficient-frontier-portfolio-optimization-with-python-part-2-2-2fe23413ad94 # # 2. The time series have been downloades from the Python engine of https://www.quandl.com # _________________________ # + # - # # 1. Download the data from Quandl app quandl.ApiConfig.api_key = 'PASTE YOUR API KEY HERE' selected = ["AXP","AAPL","BA","CAT","CSCO","CVX","DD","XOM","GE","GS", "HD","IBM","INTC","JNJ","KO","JPM","MCD","MMM","MRK","MSFT", "NKE","PFE","PG","TRV","UNH","UTX","VZ","V","WMT","DIS"] df1 = quandl.get_table('WIKI/PRICES', ticker = selected, qopts = { 'columns': ['date', 'ticker', 'adj_close'] }, date = { 'gte': '2010-1-1', 'lte': '2019-12-31' }, paginate = True) print(df1.shape) df1.head() print("Dow Jones Index firms: ",len(selected)) print("Founded firms: ",len(df1["ticker"].unique())) # # 2. Save the time series in .pkl format df1 = df1.pivot(columns = 'ticker',index = "date",values = "adj_close") df1 = pd.DataFrame(df1.to_records()).rename(columns={'date':'time'}) df1.head() df1.tail() df1.shape df2 = df1.sort_values("time") df2 = df2.set_index("time") df2.tail() df2.to_pickle("data sets/df_dow_jones.pkl") df3 = pd.read_pickle("data sets/df_dow_jones.pkl") df3.head() # + plt.style.use('seaborn-dark') df3.plot(cmap= "viridis",figsize=(20,15)) plt.grid() plt.show() # -
01 stock market data download.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <strong><h2>Machine Learning Analysis: Multi-Model Analysis on Formatted Data</h2></strong> # - With sentiment data in mind, let's take <strong>most common token occurrences</strong> in each clinical paper and attempt to <strong>predict its assigned class.</strong> # - We will use a multitude of multivariate classifiers (9 classes) and derive accuracy scores and confusion matrices. # + import pickle import numpy as np import pandas as pd import seaborn as sns from scipy import stats as scp from wordcloud import STOPWORDS from matplotlib import pyplot as plt from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn import svm from sklearn.ensemble import RandomForestClassifier,ExtraTreesClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import confusion_matrix # - TRAINING_TEXT_PATH = "./datasets/training/training_text" df_text = pd.read_csv(TRAINING_TEXT_PATH, sep="\n") df_text.head() TRAINING_VARIANTS_PATH = "./datasets/training/training_variants" df_variants = pd.read_csv(TRAINING_VARIANTS_PATH) df_variants.head() cleaned_text_data = list() for item in df_text['ID,Text']: data = item.split("||")[1] cleaned_text_data.append(data) df_text_clean = pd.DataFrame({"Clinical Evidence": cleaned_text_data}) df_text_clean.head() df_clean = pd.concat([df_variants, df_text_clean], axis=1) df_clean.head() df_clean.describe() df_class = df_clean[["Class"]] df_class.tail() # + # Bind more stopwords to tokens def _assign_stopwords(stop_words): for word in stop_words: STOPWORDS.add(word) stop_words = ["et", "al", "â", "Figure", "figure", "fig", "Supplementary", "We", "The", "Fig.", "In", "al.", "al.,", "(Fig."] _assign_stopwords(stop_words) # - # Initial training and test np.random.seed(0) train, test = train_test_split(df_clean, test_size=0.2) # Get training and testing data for X and Y X_train = train["Clinical Evidence"].values X_test = test["Clinical Evidence"].values Y_train = train["Class"].values Y_test = test["Class"].values # Initialize classifier models and save to iterable list svc = svm.LinearSVC() rfc = RandomForestClassifier() dtc = DecisionTreeClassifier() nbc = MultinomialNB() clfs = [svc, rfc, dtc, nbc] # Create text classifier pipeline for vectorizing, transforming, then fitting data text_clf = Pipeline([("vect", TfidfVectorizer(lowercase=True, stop_words="english", encoding="utf-8")), ("tfidf", TfidfTransformer()), # Does inverse sparse topic mapping ("clf", svc) # Fits data to classifier ]) # + # WARNING: Super long runtime # TODO: Benchmark each segment to identify laggards (suspect: PIPELINE) def model_predict(clf): text_clf = Pipeline([("vect", TfidfVectorizer(lowercase=True, stop_words="english", encoding="utf-8")), ("tfidf", TfidfTransformer()), ("clf", clf)]) text_clf.fit(X_train, Y_train) Y_test_pred = text_clf.predict(X_test) accuracy_score = np.mean(Y_test_pred == Y_test) cmat = confusion_matrix(Y_test, Y_test_pred) return accuracy_score, text_clf svc_acc, model = model_predict(clfs[0]) # rfc_acc, rfc_cmat = model_predict(clfs[1]) # dtc_acc, dtc_cmat = model_predict(clfs[2]) # nbc_acc, nbc_cmat = model_predict(clfs[3]) print(">> ACCURACY OF SUPPORT VECTOR CLASSIFIER IS: {:.4f}".format(svc_acc)) # print(">> ACCURACY OF RANDOM FOREST CLASSIFIER IS: {:.4f}".format(rfc_acc)) # print(">> ACCURACY OF DECISION TREE CLASSIFIER IS: {:.4f}".format(dtc_acc)) # print(">> ACCURACY OF MULTINOMIAL NAÏVE BAYES CLASSIFIER IS: {:.4f}\n".format(nbc_acc)) # - pkl_filename = "my_model.pkl" with open(pkl_filename, "wb") as file: pickle.dump(model, file) # + # CONFUSION MATRIX PLOTS def _plot_cmat(cmat, index): df_cm = pd.DataFrame(cmat, index = [i for i in range(1, 10)], columns = [i for i in range(1, 10)]) plt.figure(figsize = (10,7)) sns.heatmap(df_cm, annot=True) plt.title("CONFUSION MATRIX {}".format(index + 1)) for index, item in enumerate([svc_cmat, rfc_cmat, dtc_cmat, nbc_cmat]): _plot_cmat(item, index) # - # ## TODO: # - Scale data using StandardScaler() # - Implement Correlation Heatmap # - See if PCA helps # - Visualize Decision Tree and Pipeline Process # - Publish to Kaggle (late) # - Create Medium post and tutorial # - Benchmark for time and memory effectiveness # # ## NOTES: # - BRCA1 correlates highly with onset of breast cancer # - Check baseline prediction for class (should be 1/9 ~ 11.1%)
Final-Project/Redefining_Cancer_Treatment-Machine_Learning_Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import numpy as np import matplotlib.pyplot as plt from keras.models import Sequential from keras.layers import Dense from keras import regularizers # %pylab inline plt.rcParams["figure.figsize"] = (8,6) def pca(X=np.array([]), no_dims=50): """ Runs PCA on the NxD array X to reduce dimension """ (n, d) = X.shape X = X - np.tile(np.mean(X, 0), (n, 1)) (l, M) = np.linalg.eig(np.dot(X.T, X)) Y = np.dot(X, M[:, 0:no_dims]) return Y train_configs = np.loadtxt("configs_L_100_phi0.txt") train_labels = np.loadtxt("labels_L_100_phi0.txt") NC = len(train_configs) L = len(train_configs[0]) print("loaded %i configurations for a system of size %i" %(NC, L)) def prepoc(config): return [2*i - 1 for i in config] # prepare data for training # exclude disorder strength between 1.5 and 2.5 # note that I change input to be in [-1, 1] instead of [0,1] lambda_low = 1.95 lambda_high = 2.05 x = [] y = [] for i, label in enumerate(train_labels): if label < lambda_low: x.append(prepoc(train_configs[i])) y.append((1,0)) if label > lambda_high: x.append(prepoc(train_configs[i])) y.append((0, 1)) x = np.array(x) y = np.array(y) PCA_coord = pca(x, no_dims=2) plt.scatter(PCA_coord[:,0], PCA_coord[:,1], 30, train_labels) plt.axis('off') plt.show() # + hidden_layer = 100 # create model model = Sequential() model.add(Dense(hidden_layer, input_dim=L, kernel_initializer='random_normal', activation='sigmoid', kernel_regularizer=regularizers.l2(0.01))) model.add(Dense(2, kernel_initializer='random_normal', activation='softmax', kernel_regularizer=regularizers.l2(0.0))) # Compile model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # - model.fit(x, y, epochs=100, batch_size=10, verbose=0) test_x_1 = np.array(np.loadtxt("test_configs_L_100_phi0.txt")) test_label = np.array(np.loadtxt("test_labels_L_100_phi0.txt")) t_x_1 = [[2*i -1 for i in config] for config in test_x_1] prediction = model.predict(t_x_1) # + lambdas = list(np.sort(list(set(test_label)))) Nl = len(lambdas) phase1 = np.zeros(Nl) phase2 = np.zeros(Nl) points = np.zeros(Nl) lastT = 0. for i, T in enumerate(test_label): j = lambdas.index(T) phase1[j]+=prediction[i:i+1, 0][0] phase2[j]+=prediction[i:i+1, 1][0] points[j]+=1. for j in range(Nl): phase1[j] /= points[j] phase2[j] /= points[j] # - plt.plot(lambdas, phase1, 'b', label="delocalized") plt.plot(lambdas, phase2, 'r', label="localized") plt.legend() plt.show() test_x_2 = np.array(np.loadtxt("test_configs_L_100_phi10.txt")) test_label_2 = np.array(np.loadtxt("test_labels_L_100_phi10.txt")) t_x_2 = [[2*i -1 for i in config] for config in test_x_2] prediction2 = model.predict(t_x_2) # + lambdas = list(np.sort(list(set(test_label_2)))) Nl = len(lambdas) phase1_2 = np.zeros(Nl) phase2_2 = np.zeros(Nl) points = np.zeros(Nl) lastT = 0. for i, T in enumerate(test_label_2): j = lambdas.index(T) phase1_2[j]+=prediction2[i:i+1, 0][0] phase2_2[j]+=prediction2[i:i+1, 1][0] points[j]+=1. for j in range(Nl): phase1_2[j] /= points[j] phase2_2[j] /= points[j] # - # This is just a small reminder that we didn't actually learn anything about # the physics of localization, just where in the localized phase the particles would sit plt.plot(lambdas, phase1, 'b', label="$\phi = 0$") plt.plot(lambdas, phase2, 'r', label="$\phi = 0$") plt.plot(lambdas, phase1_2, 'b--', label="$\phi = 10$") plt.plot(lambdas, phase2_2, 'r--', label="$\phi = 10$") plt.legend() plt.xlabel("$\lambda$") plt.show()
AA/Aubry-Andre-Preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import json from torchfly.text.utils import fix_tokenized_punctuations # + with open("emnlp_news.txt") as f: data = f.readlines() data = [fix_tokenized_punctuations(item).strip() for item in data] # - with open("train.jsonl", "w") as f: for item in data: line = {"source": "", "target": item} line = json.dumps(line) f.write(line) f.write("\n") # + with open("test_emnlp.txt") as f: data = f.readlines() data = [fix_tokenized_punctuations(item).strip() for item in data] # - with open("test.jsonl", "w") as f: for item in data: line = {"source": "", "target": item} line = json.dumps(line) f.write(line) f.write("\n")
data/EMNLP_NEWS/Process.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Some sentiment analysis results # I've only ran some of the models at the sentiment corpora. Performance is not great: 60-70%, SOTA is around 90% # + # %cd ~/NetBeansProjects/ExpLosion/ from notebooks.common_imports import * from gui.output_utils import * sns.timeseries.algo.bootstrap = my_bootstrap sns.categorical.bootstrap = my_bootstrap # - ids = Experiment.objects.filter(labelled__in=['movie-reviews-tagged', 'aclImdb-tagged'], clusters__isnull=False).values_list('id', flat=True) print(ids) df = dataframe_from_exp_ids(ids, {'id':'id', 'labelled': 'labelled', 'algo': 'clusters__vectors__algorithm', 'unlab': 'clusters__vectors__unlabelled', 'num_cl': 'clusters__num_clusters'}).convert_objects(convert_numeric=True) performance_table(df) # #MR is too small- CI is almost 12% wide!
notebooks/sentiment_results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Transactions-EDA" data-toc-modified-id="Transactions-EDA-1">Transactions EDA</a></span></li><li><span><a href="#EDA-for-transaction_data.csv" data-toc-modified-id="EDA-for-transaction_data.csv-2">EDA for <code>transaction_data.csv</code></a></span></li><li><span><a href="#Business-Summary/tl;dr" data-toc-modified-id="Business-Summary/tl;dr-3">Business Summary/tl;dr</a></span></li><li><span><a href="#EDA-for-transaction_data.csv" data-toc-modified-id="EDA-for-transaction_data.csv-4">EDA for <code>transaction_data.csv</code></a></span><ul class="toc-item"><li><span><a href="#Data-Dictionary-for-transaction_data.csv" data-toc-modified-id="Data-Dictionary-for-transaction_data.csv-4.1">Data Dictionary for <code>transaction_data.csv</code></a></span></li></ul></li><li><span><a href="#General-Sales-Description" data-toc-modified-id="General-Sales-Description-5">General Sales Description</a></span><ul class="toc-item"><li><span><a href="#Removing-Empty-Sales-Rows" data-toc-modified-id="Removing-Empty-Sales-Rows-5.1">Removing Empty Sales Rows</a></span></li><li><span><a href="#Loyalty-Program" data-toc-modified-id="Loyalty-Program-5.2">Loyalty Program</a></span><ul class="toc-item"><li><span><a href="#Customer-Retention-and-Direct-Marketing" data-toc-modified-id="Customer-Retention-and-Direct-Marketing-5.2.1">Customer Retention and Direct Marketing</a></span></li><li><span><a href="#Subject-Matter-Expertise" data-toc-modified-id="Subject-Matter-Expertise-5.2.2">Subject Matter Expertise</a></span></li><li><span><a href="#Disclaimer-on-Sample-Bias" data-toc-modified-id="Disclaimer-on-Sample-Bias-5.2.3">Disclaimer on Sample Bias</a></span></li></ul></li><li><span><a href="#Finding-Christmas" data-toc-modified-id="Finding-Christmas-5.3">Finding Christmas</a></span><ul class="toc-item"><li><span><a href="#Weekly-Seasonality" data-toc-modified-id="Weekly-Seasonality-5.3.1">Weekly Seasonality</a></span></li></ul></li><li><span><a href="#Adding-Year" data-toc-modified-id="Adding-Year-5.4">Adding Year</a></span><ul class="toc-item"><li><span><a href="#Attaching-Hypothetical-Date-Range" data-toc-modified-id="Attaching-Hypothetical-Date-Range-5.4.1">Attaching Hypothetical Date Range</a></span></li></ul></li><li><span><a href="#Top-15-Sales-Dates" data-toc-modified-id="Top-15-Sales-Dates-5.5">Top 15 Sales Dates</a></span></li><li><span><a href="#Interpretation-and-Final-Datetime-Logic" data-toc-modified-id="Interpretation-and-Final-Datetime-Logic-5.6">Interpretation and Final Datetime Logic</a></span><ul class="toc-item"><li><span><a href="#Adding-Hour-and-Minute" data-toc-modified-id="Adding-Hour-and-Minute-5.6.1">Adding Hour and Minute</a></span></li></ul></li></ul></li><li><span><a href="#add_datetime-function" data-toc-modified-id="add_datetime-function-6"><code>add_datetime</code> function</a></span></li></ul></div> # - # # Transactions EDA # # EDA for `transaction_data.csv` # Let's take a look at `transaction_data.csv`, where the transactions and sales value is recorded. The business summary is described below; the table description follows, and a general summary of sales data is included. We then were able to add datetime information and a function to do so. # # The final `add_datetime` function is at the end of the notebook. # # Business Summary/tl;dr # # - Created an argument which places our data in time and space; its likely from the (north-east) USA from 2004-2006. # - Created add_datetime(df) logic for project package # - Noted inconsistencies (empty sales rows and leading tail) to be truncated for ELT pipeline; # - load_merged() function to be described in the following notebook. Once we have clean transactions with products descriptions, we can perform basic sales analysis. # + code_folding=[] #import modules import pandas as pd import numpy as np import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (14, 6) plt.style.use('seaborn') import seaborn as sns import datetime import glob # - # # EDA for `transaction_data.csv` # + code_folding=[] # loading transactions and products; forming `merged` dataframe and dropping 0-'QUANTITY' rows transactions = pd.read_csv('data/transaction_data.csv') # + code_folding=[0] transactions.info() # - transactions.head() transactions.columns # ## Data Dictionary for `transaction_data.csv` # # `household_key` - identifies the household which made the transaction # # `BASKET_ID` - identifies the basket in which the transaction occurred # # `DAY` - identifies the day of the transaction # # `PRODUCT_ID` - identifies the product purchased # # `QUANTITY` - identifies the quantity of item purchased # # `SALES_VALUE` - identifies the dollars received by the store for the purchase, including manufacturer redemption/rebates # # `STORE_ID` - identifies the store where the transaction occurred # # `RETAIL_DISC` - identifies the retail (loyalty) discount # # `TRANS_TIME` - identifies the hour and minute of the day when the transaction was recorded # # `WEEK_NO` - identifies the week of the transaction # # `COUPON_DISC` - identifies the amount paid by manufacturer to store; used to calculate 'shelf price' paid by customer # # `COUPON_MATCH_DISC` - net losses for the company due to coupon/price-matching (presumably listed by other retailers) # All values are numeric. # + code_folding=[] # checking nulls transactions.isna().sum() # - # We have about 2.6 million rows of item-level transactions. The unique identifier for this table is a combination of BASKET_ID and PRODUCT_ID; transactions.duplicated().sum() # There are no duplicate rows. # --- # # General Sales Description transactions['SALES_VALUE'].sum() # Transactions total more than 8 million dollars in revenue. We don't have product descriptions in this data yet, so it's difficult to tell much just yet. Let's look at the sales over time, and the histogram of total spend by household. plt.subplots(1,2,figsize=(16,6)) plt.subplot(1,2,1) transactions.groupby('DAY')['SALES_VALUE'].sum().plot() plt.title('Daily Sales', fontsize=25) plt.ylabel('Dollars ($)',fontsize=20) plt.xlabel('Day', fontsize=20) plt.subplot(1,2,2) transactions.groupby('household_key')['SALES_VALUE'].sum().plot(kind='hist', bins=500) plt.title('Histogram of household total sales',fontsize=25) plt.xlabel('Total Sales',fontsize=20) plt.ylabel('Number of Households',fontsize=20) plt.tight_layout() # ## Removing Empty Sales Rows # Later in the analysis I ran into trouble when calculating means for certain sales groups. This was because some of the rows in this table have 0 sales value. I'm going to remove these rows as part of the ETL process. # # These rows could be cancelled transactions, returns, or other human error. The alternative is that they are somehow involved in coupon redemption; or loyalty rewards. I'll describe the impact of these rows on the discount columns. # + no_sales = transactions[(transactions['SALES_VALUE'] == 0)] no_sales.head() # - # Around 19000 rows show 0 sales value. no_sales[['RETAIL_DISC', 'COUPON_DISC', 'COUPON_MATCH_DISC']].sum() # These rows represent about one quarter of the total `COUPON_DISC` value of transactions in the table: transactions['COUPON_DISC'].sum() transactions['RETAIL_DISC'].sum() # Whoa. The empty sales rows only account for one one-thousandth of the whopping \\$1.4 million dollars in `RETAIL_DISC`. We'll come back to that in a second, but for now; are there any rows which are completely empty of sales value and for which a quantity was not recorded? no_items = transactions[(transactions['QUANTITY'] <= 0) & (transactions['SALES_VALUE'] <= 0)] no_items # We don't know what these rows mean -- it could be an item return or a cancelled scan; we notice there is one row with `QUANTITY` not equal to 0. I've opted to drop these rows and thereby not include them in the association rules algorithm later. They also mess up the calculation of mean sales totals for certain product groups, campaigns, or households. # empty_rows = transactions[(transactions['SALES_VALUE'] == 0) & # (transactions['RETAIL_DISC']==0) & # (transactions['COUPON_DISC']==0) & # (transactions['COUPON_MATCH_DISC']==0) # ] # ## Loyalty Program # abs(transactions['RETAIL_DISC'].sum()) / transactions['SALES_VALUE'].sum() * 100 # Loyalty discounts accounted for $1.4M, equivalent to **~17.4\% of our total revenue.** We already believed that these customers were frequent shoppers, so it makes sense that they would be taking advantage of our loyalty program. # ### Customer Retention and Direct Marketing # # The strong engagement with `RETAIL_DISC` column immediately offers a point of engagement with our customers. # # The loyalty program is a forum where the grocer could advertise; for example through a phone app where they could choose what types of products they'd like to receive notifications or promotions for. Not only would an app offer personalization, it could also drive brand identity, should customers be able to select specific products or types of products that they want to engage with. # # If we're offering direct marketing campaigns as a means of bringing people into the store, then why not use the loyalty program instead? It seems to be doing well with our repeat customers. Strong engagement with a loyalty program will keep shoppers coming back again and again; offering consistent mutual value. It can cost 5-25 times more in terms of advertising to acquire a new customer than to keep an existing one. # # # ### Subject Matter Expertise # The task of creating a new loyalty program is a complicated one, and you probably risk offending your customers by replacing or reducing an existing loyalty program, so any changes to it should be carefully thought out. Customer attraction/retention on a store level can't be easily examined without more context -- there are a ton of factors at play. I'm not a subject matter expert by any means. # # The scope of this data is insufficient to examine for example the local competitors for each store or the implications of restructuring the Loyalty Program on the entire market population. However, it is worth noting that this loyalty program represents many of the dynamics at play, from a statistical and data perspective. # # # ### Disclaimer on Sample Bias # We have no idea how large the real underlying population might be with a grocery chain of 500+ stores. This sample group, to our knowledge, is defined only by the fact that 800+ of them agreed to fill out a customer survey. We aren't sure if there was another selection process which might have added another level of bias. # # We should be careful extrapolating and projecting the results onto the store chain as a whole, especially for something as meaningful to our Client's brand identity as a loyalty program is. # ## Finding Christmas # I decided to try to place the data in time and space. At the very least I'd like to have 'day of the week' in order to examine seasonality. From there it should simply be a matter of looking up the years where Christmas fell on those days of the week. # # Let's look at the lowest `SALES_VALUE` per `DAY` on the chart, disregarding the first 120 days (we'll assume there is an issue with data acquisition there). # + code_folding=[] #print lowest sales days: print(f"Lowest Sum of SALES_VALUE by Day: \n{transactions[transactions['DAY'] > 120].groupby('DAY')['SALES_VALUE'].sum().nsmallest(20).head(8)}") # find Christmas Day, when stores are closed print() print(f"The lowest Sales totals were on days 643 and 278. They were {643-278} days apart.\nThe third and fourth lowest were on days 611 and 247. They were {611-247} days apart.") # - # I am going to assume these two lowest points to be Christmas Day, despite not having precisely $0 in sales. # # This gives us a solid chance of identifying the real dates from which this data originates, because Christmas falls on a different day of the week each year. Let's try to find the day of the week of those two Christmases. # ### Weekly Seasonality # By looking more closely at the data and leveraging some domain knowledge, we can identify days of the week of our sales data by finding a weekly seasonal pattern. # # These highest-grossing days are **Saturdays, the busiest day for grocery shopping**. Using this information, we can extrapolate the days of the week for our two Christmases, and then check which years had that sequence of Christmas weekdates. # + code_folding=[0] ### Using a closer view, we can see the weekly seasonality. start=120 stop=160 myslice = transactions[transactions['DAY'].between(start, stop)] plt.figure(figsize=(14,4)) plt.title(f'Sum of SALES_VALUE, by DAY {start} - {stop}') plt.plot(myslice.groupby('DAY')['SALES_VALUE'].sum()) sats = [124, 131, 138, 145] for sat in range(sats[0], stop, 7): plt.axvline(sat, color='red') plt.ylabel('Sales ($)') plt.xlabel('DAY') plt.xticks(range(sats[0], stop, 7)) plt.show() # + code_folding=[0] # (CHRISTMAS 1) and extrapolated to find that DAY 278 actually is a Saturday: plt.figure(figsize=(14,4)) start = 200 stop = 307 myslice = transactions[transactions['DAY'].between(start, stop)] plt.title(f'Sum of SALES_VALUE, DAY {start} - {stop}') plt.plot(myslice.groupby('DAY')['SALES_VALUE'].sum(), color='red') sats = [201] for sat in range(sats[0], stop, 7): plt.axvline(sat, color='blue') plt.xticks(range(sats[0], stop, 7)) plt.ylabel('Sales ($)') plt.xlabel('DAY') plt.axvline(278, color='green') plt.show(); # + code_folding=[0] # (CHRISTMAS 2) DAY 643 is a Sunday. plt.figure(figsize=(14,4)) start = 551 stop = 658 myslice = transactions[transactions['DAY'].between(start, stop)] plt.title(f'Sum of SALES_VALUE, DAY {start} - {stop}') plt.plot(myslice.groupby('DAY')['SALES_VALUE'].sum(), color='red') sats = [551] for sat in range(sats[0], stop, 7): plt.axvline(sat, color='blue') plt.xticks(range(sats[0], stop, 7)) plt.ylabel('Sales ($)') plt.xlabel('DAY') plt.axvline(643, color='green') plt.show(); # 643 is a Sunday # + code_folding=[0] # plot sum of sales by day, identifying christmas and weekly seasonality start = 222 stop = 713 myslice = transactions[transactions['DAY'].between(start, stop)] plt.figure(figsize=(18, 8)) plt.title('SALES VALUE by DAY') plt.ylabel('Sales ($)') plt.xlabel('DAY') plt.plot(myslice.groupby('DAY')['SALES_VALUE'].sum(), c='red') plt.xticks(rotation=45) for i in range(start, stop, 7): plt.axvline(i, c='blue', alpha=0.7) sats=[222] plt.axvline(278, c='g') plt.axvline(643, c='g') plt.xticks(range(sats[0], stop, 28)) plt.show(); # - # ## Adding Year # Day 278 is itself a Saturday, and 643 is a Sunday. Let's look up when that combination occured in recent history: # # [https://www.timeanddate.com/holidays/us/christmas-day#tb-hol_obs](https://www.timeanddate.com/holidays/us/christmas-day#tb-hol_obs) # # Christmas fell on a Saturday and then a Sunday twice in the past 20 years; in 2004/2005, and in 2010/2011. Unfortunately (and predictably), American Thanksgiving also lined up exactly.We'll have to look for more information in the data. # Despite being unsure of the specific year, we can now assign a date(time) range to our data -- at the very least, we know the weekdays will match up. We're down to two potential timeframes; that's closer than we were. In classic statistics fashion, I flipped a coin. Let's make a start. # # ### Attaching Hypothetical Date Range # I'm going to create a datetime range which corresponds to the number of days in our data, and put it up alongside the information we do have; first using the year 2004, and then the year 2010. # + code_folding=[] # extrapolating datetime range from first christmas as DAY 278, year=2004 import datetime # set year, and first christmas a date year=2004 christmas1 = datetime.date(year, 12, 25) # timedelta to day 1 delta = datetime.timedelta(days=277) #278-1 # DAY 1 is christmas minus the delta (1 + 277 = 278) first = christmas1 - delta # DAY 711 is (1 + 710) last = first + datetime.timedelta(710) print(f'first: {first}, last: {last}') # creating range and constructing a map of values for 'DAY' column; # create a range of datetime objects myrange = pd.date_range(first, last) #len(myrange) # 711 # map datetime index to DAY; enumerate() indexes from 0, so we add 1 mymap = {i+1:x for i, x in enumerate(myrange)} # check the column to add # transactions['DAY'].map(lambda x: mymap[x]) # check index(day) 278 is still christmas print(f'Confirming christmas 1 is at DAY 278: {mymap[278]}') #mymap # mapping and applying to transactions df... transactions['datetime'] = transactions['DAY'].map(lambda x: mymap[x]) transactions['datetime'] # - # So we have a datetime range spanning from March 23, 2004 to March 3, 2006. # # We can easily go back and check the year to the equivalent dates beginning in 2010, should that be necessary -- the code will require just one change. # # Let's dive a little bit deeper to identify other outlying sales points, and perhaps associate them with world events unique to either 2004-2006, or 2010-2012... I'm thinking Superbowls, Black Friday, etc. -- or maybe we get lucky. # ## Top 15 Sales Dates # Let's look at the outlying sales dates. # + code_folding=[] # TOP 15 sales dates in the data, sorted by DAY transactions[transactions['DAY'] > 120].groupby('DAY')['SALES_VALUE'].sum().nlargest(15).reset_index().sort_values('DAY').set_index('DAY').plot(kind='barh') plt.title('Top 15 Highest-Grossing Sales Days') plt.legend([]) plt.xlabel('Sum of Sales Across all stores ($)') plt.show() # - # The top 15 sales dates are listed above, by descending order of DAY. # # There were back-to-back high-ranking sales numbers on DAY 691 and 692 -- an anomaly. # # DAY 692 actually had higher sales than the friday before the first 'Christmas'. # # What could possibly drive sales up so high as to compete with Christmas? # # ## Interpretation and Final Datetime Logic # So what happened on DAY 691 and 692? mymap[691] # Second-Highest sales date, even above First Christmas -- # February 11, 2006 is the date, if we are correct about the year. # I searched back through Google. # # It turns out **there was a huge storm in the Northeastern US on February 11/12 of 2006** -- As per [https://www.thepeoplehistory.com/february11th.html](https://www.thepeoplehistory.com/february11th.html); # # `What has become known as the Blizzard of 2006 started on the evening of the 11th. Heavy snow fell across the northeastern United States from Virginia to Maine through to the evening of the 12th.` # # There is no equivalent for those days in 2012, as far as I can tell. # # 2006 is a likely bet for the correct year, and moreover gives us an idea of the location of at least some of these stores -- the Northeastern United States. # # Jackpot. # ### Adding Hour and Minute # There's one more thing we need to do to complete the datetime column: add in the transaction time, stored in `TRANS_TIME`. # + code_folding=[] # converting TRANS_TIME to str to find LENGTH of entry; vectorizing the string lengths and counting values: vec = transactions['TRANS_TIME'].astype('str') vec2 = pd.Series([len(x) for x in vec]) transactions['vec2'] = vec2 vec2.value_counts() # - # So, there are `TRANS_TIME` values with up to 4 digits, and much fewer as we approach 0 digits. Seems promising. # # Let's look at the length of each entry: # + code_folding=[0] # 00:0X -- 1 digit entries -- unique values transactions[transactions['vec2']==1]['TRANS_TIME'].unique() # 0-9 values... one digit of minutes # + code_folding=[] # 00:XX -- 2 digit entries # nothing over 59... two digits of minutes transactions[transactions['vec2']==2]['TRANS_TIME'].min(), transactions[transactions['vec2']==2]['TRANS_TIME'].max() # + code_folding=[] # 0X:XX -- 3 digit entries -- number of unique values [1,2,...9 * 60] # we would expect to see single digit hours with 60 minutes each: transactions[transactions['vec2']==3]['TRANS_TIME'].min(), transactions[transactions['vec2']==3]['TRANS_TIME'].max() # - # 3-digit TRANS_TIME had 540 unique values -- exactly 9 hours worth of 3-digit timestamps. This corresponds to 01:00 through 9:59. # 00:01 through 00:59 # In order to map the `TRANS_TIME` to datetime effectively, let's fill the empty spaces in the column with 0's, such that we can apply timedelta. # + code_folding=[] # define a function to split TRANS_TIME into hour and minute values: def split_column(row): if len(str(row)) ==1: hour = "00" minute = "0"+str(row)[1:] elif len(str(row)) == 2: hour = "00" minute = str(row) elif len(str(row)) ==3: hour = "0" + str(row)[0] minute = str(row)[-2:] elif len(str(row)) ==4: hour = str(row)[:2] minute = str(row)[2:] return hour, minute # split TRANS_TIME and create HOUR and MINUTE vectors; convert to timedelta format; a = transactions['TRANS_TIME'].apply(split_column) transactions['HOUR'] = [element[0] for element in a] transactions['MINUTE'] = [element[1] for element in a] transactions['HOUR'] = pd.to_timedelta(transactions['HOUR'].astype('int'), unit='hour') transactions['MINUTE'] = pd.to_timedelta(transactions['MINUTE'].astype('int'), unit='minute') # broadcast timedelta to the datetime column and drop 'vec2', 'HOUR', 'MINUTE' columns transactions['datetime'] = transactions['datetime'] + transactions['HOUR'] + transactions['MINUTE'] # dropping the columns we used transactions.drop(['vec2', 'HOUR', 'MINUTE'], axis=1, inplace=True) transactions['datetime'] # + # transactions['datetime'].to_csv('data/outputs/transactions_datetime.csv') # - # # `add_datetime` function # below is the `add_datetime` function for the project package. def add_datetime(df): def make_date_map(df, dates='DAY'): # 'DAY' 1 == 2004-03-23 day1 = datetime.datetime(2004, 3, 23) # as derived in transactions notebook; datetime for 'DAY' == 1 ineedthismany = df[dates].max() last = day1 + datetime.timedelta(days=int(ineedthismany)) date_range = pd.date_range(day1, last) # date range for our data # map datetime index to DAY; enumerate() indexes from 0, so we add 1 date_map = {i+1:x for i, x in enumerate(date_range)} output = df[dates].map(date_map) output = pd.to_datetime(output) return output def make_time_map(df, times='TRANS_TIME'): '''''' # pad zeros output = df[times].astype(str).str.zfill(4) # split to hours and minutes hours = output.str[:2] minutes = output.str[2:] # convert to timedelta hours = pd.to_timedelta(hours.astype('int'), unit='hour') minutes = pd.to_timedelta(minutes.astype('int'), unit='minute') output = hours + minutes return output return make_date_map(df) + make_time_map(df) add_datetime(transactions) # ---
1.1 -- Transactions EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from moviepy.editor import AudioFileClip import os import tqdm from collections import Counter from decimal import * import csv import numpy as np curr=os.getcwd() audio_dir=curr+'/audio_data'# path to wav or mp3 audio files audio_list=os.listdir(audio_dir) # + frame_list=[] time_list=[] fps_list=[] for i in tqdm.tqdm(audio_list): input_path=audio_dir+'/'+i audioclip = AudioFileClip(input_path) frame_list.append(audioclip.duration*audioclip.fps) fps_list.append(audioclip.fps) time_list.append(audioclip.duration) audioclip.close() # + import matplotlib.pyplot as plt import numpy as np # %matplotlib inline plt.hist(time_list, bins=30) plt.ylabel('Probability'); print(max(time_list)) print(min(time_list)) # - # # Fixed number of audio chunks # ## Time stamp calculation # 0 T # # 0 t<br> # t-overlap t-overlap+t<br> # 2*(t-overlap) 2*(t-overlap)+t<br> # .<br> # .<br> # .<br> # (time_stamps-1)*(t-overlap) (time_stamps-1)*(t-overlap)+t<br> # ............................<br> # (time_stamps-1)*(t-overlap)+t=min_length<br> # # min_length=3960 ms<br> # overlap=50ms<br> # t=100ms<br> # # we get time_stamps=76 # # ## Final feature dimension for variable length audio file input # Each audio file is broken into 76 chunks with an overlap of 50ms. # Feature Dimension -[76,1582] # # # # # # # open_dir=curr+'/open_chunks'#path to save features # + time_stamp=Decimal(76) over_lap=Decimal(50) error_num=0 for i in tqdm.tqdm(audio_list): if(not os.path.isfile(open_dir+'/'+i[:-4]+'.npy')): #getting duration of audio file org_path=audio_dir+'/'+i audioclip = AudioFileClip(org_path) dur=Decimal(audioclip.duration*1000.0) audioclip.close() #each chunk duration sample_dur=Decimal(dur + over_lap*(time_stamp-1) )/time_stamp step=sample_dur-over_lap count=0 ini=Decimal(0.0) # features appended in a list ini<=dur-sample_dur or opensmile_array=[] while(count<76): start=float(ini/1000) end=float((ini+sample_dur)/1000) audioclip=AudioFileClip(org_path).subclip(start,end) # input_path=folder_path+'/'+str(count)+'.wav' input_path=open_dir+'/'+str(count)+'.wav' audioclip.write_audiofile(input_path,logger =None) audioclip.close() #opensmile features csv_output_path=open_dir+'/'+str(count)+'.csv' command="SMILExtract -C config/IS10_paraling.conf -I '"+input_path+"' -O '"+csv_output_path+"'" os.system(command) #creating npy file from csv file with open(csv_output_path) as csvfile: csv_reader=csv.reader(csvfile,delimiter=',') cnt=1 for row in csv_reader: if cnt==1590: feat=row[1:] feat=feat[:-1] cnt+=1 np_array=np.zeros((1582)) for k in range(1582): np_array[k]=float(feat[k]) if np.count_nonzero(np_array)<1000: print("zeros values problem,",np.count_nonzero(np_array)," sample_dur",sample_dur) opensmile_array.append(np_array) os.remove(input_path) os.remove(csv_output_path) ini=ini+step count+=1 np_file_name=i[:-4]+'.npy' if len(opensmile_array)!=time_stamp: print("time dimension ",len(opensmile_array)) print("filename ",i[:-4]) np.save(os.path.join(open_dir,np_file_name),np.array(opensmile_array)) # - # # Fixed chunk duration -> number of chunks is variable # ## Final feature dimension for variable length audio file input # Each audio file is divided into chunks of 75 ms with an overlap of 30ms. # Feature Dimension -[Variable,1582] open_dir=curr+'/open_chunks'#path to save features # + chunk_len=Decimal(75)# in ms over_lap=Decimal(30)# 40% of 75 error_num=0 min_time=10000 for i in tqdm.tqdm(audio_list): if(not os.path.isfile(open_dir+'/'+i[:-4]+'.npy')): print(i) #getting duration of audio file org_path=audio_dir+'/'+i audioclip = AudioFileClip(org_path) dur=Decimal(audioclip.duration*1000.0) audioclip.close() step=chunk_len-over_lap ini=Decimal(0.0) count=0 # features appended in a list ini<=dur-sample_dur or opensmile_array=[] while(ini<dur-chunk_len): start=float(ini/1000) end=float((ini+chunk_len)/1000) audioclip=AudioFileClip(org_path).subclip(start,end) # input_path=folder_path+'/'+str(count)+'.wav' input_path=open_dir+'/'+str(count)+'.wav' audioclip.write_audiofile(input_path,logger =None) audioclip.close() #opensmile features csv_output_path=open_dir+'/'+str(count)+'.csv' command="SMILExtract -C config/IS10_paraling.conf -I '"+input_path+"' -O '"+csv_output_path+"'" os.system(command) #creating npy file from csv file with open(csv_output_path) as csvfile: csv_reader=csv.reader(csvfile,delimiter=',') cnt=1 for row in csv_reader: if cnt==1590: feat=row[1:] feat=feat[:-1] cnt+=1 np_array=np.zeros((1582)) for k in range(1582): np_array[k]=float(feat[k]) # if np.count_nonzero(np_array)<1000: # print("zeros values problem,",np.count_nonzero(np_array)," sample_dur",sample_dur) opensmile_array.append(np_array) os.remove(input_path) os.remove(csv_output_path) ini=ini+step count+=1 np_file_name=i[:-4]+'.npy' print("Total timestamps ") print(len(opensmile_array)) np.save(os.path.join(open_dir,np_file_name),np.array(opensmile_array)) else: arr=np.load(open_dir+'/'+i[:-4]+'.npy') if np.shape(arr)[0]<min_time: min_time= np.shape(arr)[0] print("min time stamps is ",min_time) # - print("min time stamps is ",str(min_time))
Audio_models/audio_LSTM_feature.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from qiskit import * from qiskit.tools.visualization import plot_histogram # # One Y-Gate for |0> # + qr = QuantumRegister(1) cr = ClassicalRegister(1) circuit = QuantumCircuit(qr, cr) circuit.y(0) circuit.measure(qr, cr) circuit.draw(output='mpl') # - backend = BasicAer.get_backend('qasm_simulator') job = qiskit.execute(circuit, backend, shots=4096, memory=True) result = job.result().get_counts(circuit) plot_histogram(result) # # One Y-Gate for |1> # + qr = QuantumRegister(1) cr = ClassicalRegister(1) circuit = QuantumCircuit(qr, cr) circuit.x(0) circuit.barrier() circuit.y(0) circuit.measure(qr, cr) circuit.draw(output='mpl') # - backend = BasicAer.get_backend('qasm_simulator') job = qiskit.execute(circuit, backend, shots=4096, memory=True) result = job.result().get_counts(circuit) plot_histogram(result) # # Two Y-Gates for |0> # + qr = QuantumRegister(1) cr = ClassicalRegister(1) circuit = QuantumCircuit(qr, cr) circuit.y(0) circuit.y(0) circuit.measure(qr, cr) circuit.draw(output='mpl') # - backend = BasicAer.get_backend('qasm_simulator') job = qiskit.execute(circuit, backend, shots=4096, memory=True) result = job.result().get_counts(circuit) plot_histogram(result) # # Two Y-Gates for |1> # + qr = QuantumRegister(1) cr = ClassicalRegister(1) circuit = QuantumCircuit(qr, cr) circuit.x(0) circuit.barrier() circuit.y(0) circuit.y(0) circuit.measure(qr, cr) circuit.draw(output='mpl') # - backend = BasicAer.get_backend('qasm_simulator') job = qiskit.execute(circuit, backend, shots=4096, memory=True) result = job.result().get_counts(circuit) plot_histogram(result)
Y Gates.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import torch import espaloma as esp ds = esp.data.dataset.GraphDataset.load("zinc_param") len(ds) # + # define a layer layer = esp.nn.layers.dgl_legacy.gn("SAGEConv") # define a representation representation = esp.nn.Sequential( layer, [128, "relu", 128, "relu", 128, "relu"], ) # define a readout readout = esp.nn.readout.node_typing.NodeTyping( in_features=128, n_classes=100 ) # not too many elements here I think? net = torch.nn.Sequential( representation, readout ) # - net.load_state_dict( torch.load( "results/net1300.th" ) ) for name, param in net.named_parameters(): print(name) import dgl dgl.__version__ list( torch.load( "results/net1300.th" ).keys() ) g = next(iter(ds.view(batch_size=len(ds)))) g = net(g) idx_2_str = esp.graphs.legacy_force_field.LegacyForceField("gaff-1.81")._idx_2_str y_pred = g.nodes['n1'].data['nn_typing'].argmax(dim=-1) y_true = g.nodes['n1'].data['legacy_typing'] y_pred = [idx_2_str[idx.item()] for idx in y_pred] y_true = [idx_2_str[idx.item()]for idx in y_true] pairs = list(zip(y_true, y_pred)) cs = ["c", "c1", "c2", "c3", "ca", "cc", "cp", "ce"] idx_2_str_c = dict(zip(range(len(cs)), cs)) str_2_idx_c = dict(zip(cs, range(len(cs)))) pairs = [pair for pair in pairs if pair[0] in cs and pair[1] in cs] confusion = np.zeros((len(cs), len(cs))) for x, y in pairs: confusion[ str_2_idx_c[x], str_2_idx_c[y] ] += 1 confusion /= confusion.sum(axis=0, keepdims=True) confusion *= 100 confusion[confusion == 0] = np.nan # + abundance = {c: 0 for c in cs} for x, y in pairs: if x in cs: abundance[x] += 1 abundance = {key: value / len(pairs) for key, value in abundance.items()} # - len(pairs) import seaborn as sns from matplotlib import pyplot as plt fig = plt.figure(figsize=(5, 5)) sns.set_style("dark") sns.heatmap( confusion, linewidths=.5, annot=True, fmt=".1f", xticklabels=["%s \n%.1f" % (c, abundance[c] * 100) for c in str_2_idx_c.keys()], yticklabels=str_2_idx_c.keys(), cmap=sns.cubehelix_palette(start=.5, rot=-.5,as_cmap=True2), cbar=False, ) plt.savefig("wrongs.png", dpi=300)
espaloma_notebooks/typing/confusion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from datetime import datetime import sys sys.path.insert(0,'..') from gamma.utils import from_seconds, convert_picks_csv, association import numpy as np import os from tqdm import tqdm import time # + catalog_dir = os.path.join("./") if not os.path.exists(catalog_dir): os.makedirs(catalog_dir) figure_dir = os.path.join("./figures") if not os.path.exists(figure_dir): os.makedirs(figure_dir) station_csv = "stations.csv" pick_json = "picks.json" # pick_json = "picks_gamma_old.csv" catalog_csv = "catalog_gamma.csv" picks_csv = "picks_gamma.csv" config = {'center': (-117.504, 35.705), 'xlim_degree': [-118.004, -117.004], 'ylim_degree': [35.205, 36.205], 'degree2km': 111.19492474777779, 'starttime': datetime(2019, 7, 4, 17, 0), 'endtime': datetime(2019, 7, 5, 0, 0)} # 'starttime': datetime(2019, 7, 4, 0, 0), # 'endtime': datetime(2019, 7, 10, 0, 0)} config["x(km)"] = (np.array(config["xlim_degree"])-np.array(config["center"][0]))*config["degree2km"] config["y(km)"] = (np.array(config["ylim_degree"])-np.array(config["center"][1]))*config["degree2km"] config["z(km)"] = (0, 20) ## read picks picks = pd.read_json(pick_json) # picks = pd.read_csv(pick_json, sep="\t") # picks = picks[["id", "timestamp", "type", "prob", "amp"]] # picks["timestamp"] = picks["timestamp"].apply(lambda x: datetime.fromisoformat(x)) # picks["time_idx"] = picks["timestamp"].apply(lambda x: x.strftime("%Y-%m-%dT%H")) ## process by hours picks["time_idx"] = picks["timestamp"].apply(lambda x: x.strftime("%Y-%m-%d")) ## process by days ## read stations stations = pd.read_csv(station_csv, delimiter="\t") stations = stations.rename(columns={"station":"id"}) stations["x(km)"] = stations["longitude"].apply(lambda x: (x - config["center"][0])*config["degree2km"]) stations["y(km)"] = stations["latitude"].apply(lambda x: (x - config["center"][1])*config["degree2km"]) stations["z(km)"] = stations["elevation(m)"].apply(lambda x: -x/1e3) ### setting GMMA configs config["dims"] = ['x(km)', 'y(km)', 'z(km)'] config["use_amplitude"] = True config["vel"] = {"p": 6.0, "s": 6.0 / 1.73} config["method"] = "BGMM" if config["method"] == "BGMM": config["oversample_factor"] = 4 if config["method"] == "GMM": config["oversample_factor"] = 1 # DBSCAN config["bfgs_bounds"] = ((config["x(km)"][0]-1, config["x(km)"][1]+1), #x (config["y(km)"][0]-1, config["y(km)"][1]+1), #y (0, config["z(km)"][1]+1), #x (None, None)) #t config["dbscan_eps"] = min(6, np.sqrt((stations["x(km)"].max()-stations["x(km)"].min())**2 + (stations["y(km)"].max()-stations["y(km)"].min())**2)/(6.0/1.75)) #s config["dbscan_min_samples"] = min(3, len(stations)) # Filtering config["min_picks_per_eq"] = min(15, len(stations) // 2) config["max_sigma11"] = 2.0 #s config["max_sigma22"] = 1.0 #m/s config["max_sigma12"] = 1.0 #covariance # print(config) for k, v in config.items(): print(f"{k}: {v}") # + start_time = time.time() pbar = tqdm(sorted(list(set(picks["time_idx"])))) event_idx0 = 0 ## current earthquake index assignments = [] if (len(picks) > 0) and (len(picks) < 5000): catalogs, assignments = association( picks, stations, config, event_idx0, method=config["method"], pbar=pbar, ) event_idx0 += len(catalogs) else: catalogs = [] for i, segment in enumerate(pbar): picks_ = picks[picks["time_idx"] == segment] if len(picks_) == 0: continue catalog, assign = association( picks_, stations, config, event_idx0, method=config["method"], pbar=pbar, ) event_idx0 += len(catalog) catalogs.extend(catalog) assignments.extend(assign) ## create catalog catalogs = pd.DataFrame( catalogs, columns=["time(s)"] + config["dims"] + ["magnitude", "sigma_time", "sigma_amp", "cov_time_amp", "event_idx", "prob_gamma"] ) catalogs["time"] = catalogs["time(s)"].apply(lambda x: from_seconds(x)) catalogs["longitude"] = catalogs["x(km)"].apply( lambda x: x / config["degree2km"] + config["center"][0] ) catalogs["latitude"] = catalogs["y(km)"].apply( lambda x: x / config["degree2km"] + config["center"][1] ) catalogs["depth(m)"] = catalogs["z(km)"].apply(lambda x: x * 1e3) catalogs.sort_values(by=["time"], inplace=True) with open(catalog_csv, 'w') as fp: catalogs.to_csv( fp, sep="\t", index=False, float_format="%.3f", date_format='%Y-%m-%dT%H:%M:%S.%f', columns=[ "time", "magnitude", "longitude", "latitude", "depth(m)", "sigma_time", "sigma_amp", "cov_time_amp", "event_idx", "prob_gamma", "x(km)", "y(km)", "z(km)", ], ) catalogs = catalogs[ ['time', 'magnitude', 'longitude', 'latitude', 'depth(m)', 'sigma_time', 'sigma_amp'] ] ## add assignment to picks assignments = pd.DataFrame(assignments, columns=["pick_idx", "event_idx", "prob_gamma"]) picks = ( picks.join(assignments.set_index("pick_idx")).fillna(-1).astype({'event_idx': int}) ) picks.sort_values(by=["timestamp"], inplace=True) with open(picks_csv, 'w') as fp: picks.to_csv( fp, sep="\t", index=False, date_format='%Y-%m-%dT%H:%M:%S.%f', columns=["id", "timestamp", "type", "prob", "amp", "event_idx", "prob_gamma"], ) print(f"Total time: {time.time() - start_time} seconds") # + result_label="GaMMA" catalog_label="SCSN" import matplotlib.pyplot as plt import matplotlib.dates as mdates # + stations = pd.read_csv(("stations.csv"), delimiter="\t") events = pd.read_csv(("catalog_scsn.csv"), delimiter="\t") events["time"] = events["time"].apply(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S.%f")) catalog = pd.read_csv(("catalog_gamma.csv"), delimiter="\t") catalog["time"] = catalog["time"].apply(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S.%f")) # catalog["covariance"] = catalog["covariance"].apply(lambda x: [float(i) for i in x.split(",")]) plt.figure() plt.hist(catalog["time"], range=(config["starttime"], config["endtime"]), bins=24, edgecolor="k", alpha=1.0, linewidth=0.5, label=f"{result_label}: {len(catalog['time'])}") plt.hist(events["time"], range=(config["starttime"], config["endtime"]), bins=24, edgecolor="k", alpha=1.0, linewidth=0.5, label=f"{catalog_label}: {len(events['time'])}") plt.ylabel("Frequency") plt.xlabel("Date") plt.gca().autoscale(enable=True, axis='x', tight=True) plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m-%d:%H')) plt.gcf().autofmt_xdate() plt.legend() plt.savefig(os.path.join(figure_dir, "earthquake_number.png"), bbox_inches="tight", dpi=300) plt.savefig(os.path.join(figure_dir, "earthquake_number.pdf"), bbox_inches="tight") plt.show() # + fig = plt.figure(figsize=plt.rcParams["figure.figsize"]*np.array([1.5,1])) box = dict(boxstyle='round', facecolor='white', alpha=1) text_loc = [0.05, 0.92] grd = fig.add_gridspec(ncols=2, nrows=2, width_ratios=[1.5, 1], height_ratios=[1,1]) fig.add_subplot(grd[:, 0]) plt.plot(catalog["longitude"], catalog["latitude"], '.',markersize=2, alpha=1.0) plt.plot(events["longitude"], events["latitude"], '.', markersize=2, alpha=0.6) plt.axis("scaled") plt.xlim(np.array(config["xlim_degree"])+np.array([0.2,-0.27])) plt.ylim(np.array(config["ylim_degree"])+np.array([0.2,-0.27])) plt.xlabel("Latitude") plt.ylabel("Longitude") plt.gca().set_prop_cycle(None) plt.plot(config["xlim_degree"][0]-10, config["ylim_degree"][0]-10, '.', markersize=10, label=f"{result_label}", rasterized=True) plt.plot(config["xlim_degree"][0]-10, config["ylim_degree"][0]-10, '.', markersize=10, label=f"{catalog_label}", rasterized=True) plt.plot(stations["longitude"], stations["latitude"], 'k^', markersize=5, alpha=0.7, label="Stations") plt.legend(loc="lower right") plt.text(text_loc[0], text_loc[1], '(i)', horizontalalignment='left', verticalalignment="top", transform=plt.gca().transAxes, fontsize="large", fontweight="normal", bbox=box) fig.add_subplot(grd[0, 1]) plt.plot(catalog["longitude"], catalog["depth(m)"]/1e3, '.', markersize=2, alpha=1.0, rasterized=True) plt.plot(events["longitude"], events["depth(m)"]/1e3, '.', markersize=2, alpha=0.6, rasterized=True) # plt.axis("scaled") plt.xlim(np.array(config["xlim_degree"])+np.array([0.2,-0.27])) plt.ylim([0,21]) plt.gca().invert_yaxis() plt.xlabel("Longitude") plt.ylabel("Depth (km)") plt.gca().set_prop_cycle(None) plt.plot(config["xlim_degree"][0]-10, 31, '.', markersize=10, label=f"{result_label}") plt.plot(31, 31, '.', markersize=10, label=f"{catalog_label}") plt.legend(loc="lower right") plt.text(text_loc[0], text_loc[1], '(ii)', horizontalalignment='left', verticalalignment="top", transform=plt.gca().transAxes, fontsize="large", fontweight="normal", bbox=box) fig.add_subplot(grd[1, 1]) plt.plot(catalog["latitude"], catalog["depth(m)"]/1e3, '.', markersize=2, alpha=1.0, rasterized=True) plt.plot(events["latitude"], events["depth(m)"]/1e3, '.', markersize=2, alpha=0.6, rasterized=True) # plt.axis("scaled") plt.xlim(np.array(config["ylim_degree"])+np.array([0.2,-0.27])) plt.ylim([0,21]) plt.gca().invert_yaxis() plt.xlabel("Latitude") plt.ylabel("Depth (km)") plt.gca().set_prop_cycle(None) plt.plot(config["ylim_degree"][0]-10, 31, '.', markersize=10, label=f"{result_label}") plt.plot(31, 31, '.', markersize=10, label=f"{catalog_label}") plt.legend(loc="lower right") plt.tight_layout() plt.text(text_loc[0], text_loc[1], '(iii)', horizontalalignment='left', verticalalignment="top", transform=plt.gca().transAxes, fontsize="large", fontweight="normal", bbox=box) plt.savefig(os.path.join(figure_dir, "earthquake_location.png"), bbox_inches="tight", dpi=300) plt.savefig(os.path.join(figure_dir, "earthquake_location.pdf"), bbox_inches="tight", dpi=300) plt.show() # - plt.figure() plt.hist(catalog["magnitude"], range=(-1., events["magnitude"].max()), bins=25, alpha=1.0, edgecolor="k", linewidth=0.5, label=f"{result_label}: {len(catalog['magnitude'])}") plt.hist(events["magnitude"], range=(-1., events["magnitude"].max()), bins=25, alpha=0.6, edgecolor="k", linewidth=0.5, label=f"{catalog_label}: {len(events['magnitude'])}") plt.legend() # plt.figure() plt.xlim([-1,events["magnitude"].max()]) plt.xlabel("Magnitude") plt.ylabel("Frequency") plt.gca().set_yscale('log') plt.savefig(os.path.join(figure_dir, "earthquake_magnitude_frequency.png"), bbox_inches="tight", dpi=300) plt.savefig(os.path.join(figure_dir, "earthquake_magnitude_frequency.pdf"), bbox_inches="tight") plt.show() plt.figure() plt.plot(catalog["time"], catalog["magnitude"], '.', markersize=5, alpha=1.0, rasterized=True) plt.plot(events["time"], events["magnitude"], '.', markersize=5, alpha=0.8, rasterized=True) plt.xlim(config["starttime"], config["endtime"]) ylim = plt.ylim() plt.ylabel("Magnitude") # plt.xlabel("Date") plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m-%d:%H')) plt.gcf().autofmt_xdate() plt.gca().set_prop_cycle(None) plt.plot(config["starttime"], -10, '.', markersize=15, alpha=1.0, label=f"{result_label}: {len(catalog['magnitude'])}") plt.plot(config["starttime"], -10, '.', markersize=15, alpha=1.0, label=f"{catalog_label}: {len(events['magnitude'])}") plt.legend() plt.ylim(ylim) plt.grid() plt.savefig(os.path.join(figure_dir, "earthquake_magnitude_time.png"), bbox_inches="tight", dpi=300) plt.savefig(os.path.join(figure_dir, "earthquake_magnitude_time.pdf"), bbox_inches="tight", dpi=300) plt.show() fig = plt.figure(figsize=plt.rcParams["figure.figsize"]*np.array([0.8,1.1])) box = dict(boxstyle='round', facecolor='white', alpha=1) text_loc = [0.05, 0.90] plt.subplot(311) plt.plot(catalog["time"], catalog["sigma_time"], '.', markersize=3.0, label="Travel-time") plt.ylim([0, 3]) plt.ylabel(r"$\Sigma_{11}$ (s)$^2$") plt.legend(loc="upper right") plt.text(text_loc[0], text_loc[1], '(i)', horizontalalignment='left', verticalalignment="top", transform=plt.gca().transAxes, fontsize="large", fontweight="normal", bbox=box) plt.subplot(312) plt.plot(catalog["time"], catalog["sigma_amp"], '.', markersize=3.0, label="Amplitude") plt.ylim([0, 1]) plt.ylabel(r"$\Sigma_{22}$ ($\log10$ m/s)$^2$") plt.legend(loc="upper right") plt.text(text_loc[0], text_loc[1], '(ii)', horizontalalignment='left', verticalalignment="top", transform=plt.gca().transAxes, fontsize="large", fontweight="normal", bbox=box) plt.subplot(313) plt.plot(catalog["time"], catalog["cov_time_amp"], '.', markersize=3.0, label="Travel-time vs. Amplitude") plt.ylabel(r"$\Sigma_{12}$") plt.ylim([-0.5, 0.5]) plt.legend(loc="upper right") plt.text(text_loc[0], text_loc[1], '(iii)', horizontalalignment='left', verticalalignment="top", transform=plt.gca().transAxes, fontsize="large", fontweight="normal", bbox=box) plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m-%d:%H')) plt.gcf().autofmt_xdate() # plt.suptitle(r"Covariance Matrix ($\Sigma$) Coefficients") plt.tight_layout() plt.gcf().align_labels() plt.savefig(os.path.join(figure_dir, "covariance.png"), bbox_inches="tight", dpi=300) plt.savefig(os.path.join(figure_dir, "covariance.pdf"), bbox_inches="tight")
tests/GaMMA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/paul028/myJupyterNotebook/blob/master/CIFAR10_CNN_TPU.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="ue90VvMQV_UO" colab_type="text" # **Image CLassifier trained using CIFAR10 dataset using 3-layer Convolutional Neural Network** # # # 1. This model was trained Using TPU from google Colab # 2. Implements Data Augmentation # 3. Implements Regularization Technique # * Dropout # * Kernel Regularizer # * Batch Normalization # # # # # + [markdown] id="Fvb-PvEpXQub" colab_type="text" # 1. Import all the Necessary Libraries # + id="b3PGh4qDX0Ay" colab_type="code" colab={} #import tensorflow as tf from tensorflow.keras.models import Sequential from keras.utils import np_utils from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.layers import Dense, Activation, Flatten, Dropout, BatchNormalization, Conv2D, MaxPooling2D, Input from tensorflow.keras.datasets import cifar10 from tensorflow.keras import regularizers from tensorflow.keras.callbacks import LearningRateScheduler import matplotlib.pyplot as plt from scipy.misc import toimage import numpy as np import os import tensorflow as tf # + [markdown] id="LCh7QEQQX3Yg" colab_type="text" # 2. Call the Address of the TPU # + id="8hfEfr7EX5Yv" colab_type="code" colab={} # This address identifies the TPU we'll use when configuring TensorFlow. TPU_WORKER = 'grpc://' + os.environ['COLAB_TPU_ADDR'] tf.logging.set_verbosity(tf.logging.INFO) # + [markdown] id="A7lrsjApYCVX" colab_type="text" # 3. Prepare the Dataset # + id="KqQTN52wYEAm" colab_type="code" colab={} (x_train, y_train), (x_test, y_test) = cifar10.load_data() x_train = x_train.astype('float32') x_test = x_test.astype('float32') #z-score mean = np.mean(x_train,axis=(0,1,2,3)) std = np.std(x_train,axis=(0,1,2,3)) x_train = (x_train-mean)/(std+1e-7) x_test = (x_test-mean)/(std+1e-7) num_classes = len(np.unique(y_train)) y_train = np_utils.to_categorical(y_train,num_classes) y_test = np_utils.to_categorical(y_test,num_classes) # + [markdown] id="zJuUBNvNYUqO" colab_type="text" # 4. Dataset Augmentation # + id="bwMt2nwLYYW-" colab_type="code" colab={} #data augmentation datagen = ImageDataGenerator( rotation_range=15, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, ) datagen.fit(x_train) for X_batch, y_batch in datagen.flow(x_train, y_train, batch_size=128): # Show 9 images for i in range(0, 9): plt.subplot(330 + 1 + i) plt.imshow(toimage(X_batch[i].reshape(32, 32, 3))) # show the plot plt.show() break # + [markdown] id="tnJb3y7lYktT" colab_type="text" # 5. Initialize the Parameters # + id="mo9H4nciYkHD" colab_type="code" colab={} batch_size = 64 weight_decay = 1e-4 # + [markdown] id="1ofxA3bMYtjK" colab_type="text" # 6. Prepare the Model # + id="EtWgVTMXYttK" colab_type="code" colab={} model = Sequential() model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay), input_shape=x_train.shape[1:])) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.2)) model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.3)) model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(num_classes, activation='softmax')) model.summary() opt_rms = tf.keras.optimizers.RMSprop(lr=0.001,decay=1e-6) model.compile(loss='categorical_crossentropy', optimizer=opt_rms, metrics=['accuracy']) # + [markdown] id="GWAaCKaGY9Ee" colab_type="text" # 7. Define a Function for Changing Learning Rate # + id="rQP_CR54Y-I_" colab_type="code" colab={} def lr_schedule(epoch): lrate = 0.001 if epoch > 75: lrate = 0.0005 if epoch > 100: lrate = 0.0003 return lrate # + [markdown] id="qbezGUlBZB-C" colab_type="text" # 8. Convert the Model to TPU # + id="5aLKAAexZFZj" colab_type="code" colab={} tpu_model = tf.contrib.tpu.keras_to_tpu_model( model, strategy=tf.contrib.tpu.TPUDistributionStrategy( tf.contrib.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR']))) # + [markdown] id="EEvt7ShjZM9i" colab_type="text" # 9. Train the Network # + id="KGDOm6buep_Z" colab_type="code" outputId="5d91107f-2508-4f8c-<PASSWORD>-dee<PASSWORD>" colab={"base_uri": "https://localhost:8080/", "height": 8418} tpu_model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),\ steps_per_epoch=x_train.shape[0] // batch_size,epochs=125,\ verbose=1,validation_data=(x_test,y_test),callbacks=[LearningRateScheduler(lr_schedule)]) # + [markdown] id="evAzSVkWZhVr" colab_type="text" # 10. Testing # + id="khzFCOuvZo6L" colab_type="code" colab={} scores = tpu_model.evaluate(x_test, y_test, batch_size=128, verbose=1) print("\nTest result: %.3f loss: %.3f" % (scores[1]*100,scores[0])) # + [markdown] id="EN7g0_SXX57Y" colab_type="text" # **References** # # [[1] Dropout: A Simple Way to Prevent Neural Networks from Overfitting](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf) # # [[2] Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift ](https://arxiv.org/abs/1502.03167)
CIFAR10_CNN_TPU.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Name: <NAME> # ### Matic number: 21120612445 # ### Email: <EMAIL> # # # # # ## Project 1 # ##### Pseudocode for project 1 # * START # * INPUT number a # * SUBTRACT the number by 17 # * IF a is greater than 17 # * MULTIPLY (a-17) by 2 # * PRINT # * ELSE # * RETURN the absolute difference between a and 17 # * END # # ![image](images/project_1.jpg) a = int(input("input number:")) b = (a-17) if a > 17: print (b*2) else: c = abs(b) print(c) # ## Project 2 # ##### Pseudocode for project 2 # * START # * INPUT 3 numbers a,b and c # * SUM the 3 numbers # * IF a = b = c # * MULTIPLY the sum by 3 # * ELSE # * PRINT the sum of the numbers # * END # # ![image](images/project_2.jpg) a= int(input("input 1st number:")) b= int(input("input 2nd number:")) c= int(input("input 3rd number:")) d=(a+b+c) if a == b == c: e=(3*d) print(e) else: print(d) # ## Project 3 # ##### Pseudocode for project 3 # * START # * INPUT 2 numbers a,b # * IF a=b or a+b=5 or a-b=5 # * PRINT true # * ELSE # * PRINT FALSE # * END # # ![image](images/project_3.jpg) a= int(input("Input 1st number:")) b= int(input("Input 2nd number:")) if a==b or a+b==5 or a-b ==5: print("True") else: print("False") # ## Project 4 # ##### Pseudocode for project 4 # * START # * INPUT 3 numbers a,b,c # * KEEP the numbers # * ADD the 3 numbers then subtract it from the maximum value stored and from the minimum values stored # * PRINT maximum value, minimum vale, middle value # * END # # ![image](images/project_4.jpg) a= int(input("Input 1st number:")) b= int(input("Input 2nd number:")) c= int(input("input 3rd number:")) d=(a,b,c) e= a+b+c-max(d)-min(d) print ("maximum = ",max(d)) print ("minimum = ",min(d)) print ("middle = ",e) # ## Project 5 # ##### Pseudocode for project 5 # * START # * INPUT positive number a # * SUM the cube of the positive numbers less than a # * PRINT sum # * END # # ![image](images/project_5.jpg) a=int(input("Input a number:")) a -= 1 total = 0 while a > 0: total += a * a * a a -= 1 print("Sum of cubes smaller than the specified number: ",total)
Week 3/class_project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Explore AIMed Dataset # # ### AIMed dataset preprocessing # # - Download from ftp://ftp.cs.utexas.edu/pub/mooney/bio-data/interactions.tar.gz" # # - Convert the raw dataset into XML for using instructions in http://mars.cs.utu.fi/PPICorpora/ # # ```python # convert_aimed.py -i aimed_interactions_input_dir -o aimed.xml # ``` # # Acknowledgements: # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Comparative Analysis of Five Protein-protein Interaction Corpora, LBM'07. 2007. # xmlfile = "aimed.xml" s3_bucket ="s3://aegovan-data/aimed/" import sys sys.path.append('./source') # + import pandas as pd pd.set_option('display.max_columns', None) pd.set_option('display.max_colwidth', 10000) pd.set_option('display.max_rows', 100) import matplotlib.pyplot as plt plt.rcParams.update({'font.size': 12}) # - # #### Covert to pandas from datatransformer.AimedXmlToDataFramePreprocessed import AimedXmlToDataFramePreprocessed df_processed = AimedXmlToDataFramePreprocessed()(xmlfile) df_processed.query("docid=='AIMed.d35'") df_processed.shape df_processed.head(n=2) unique_passages = df_processed["passageid"].unique() print("Unique sentences : " , len(unique_passages)) # ### Explore dataset - Preprocessed # + import matplotlib.pyplot as plt df_processed["isValid"].value_counts().plot.pie(autopct='%.2f',figsize=(4, 4)) plt.show() # - df_processed["isValid"].value_counts() data_file="AIMedFull_preprocessed.json" df_processed.to_json(data_file) # !aws s3 cp $data_file $s3_bucket # ## Split into train/test/validation - Preprocessed # + from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split unique_docids = df_processed.docid.unique() stratified = [ df_processed.query("docid == '{}'".format(p))['isValid'].iloc[0] for p in unique_docids] traindocid, valdocid = train_test_split(unique_docids, test_size=.1, random_state=777, stratify=stratified) # + train = df_processed[df_processed['docid'].isin(traindocid)] val = df_processed[df_processed['docid'].isin(valdocid)] # + import matplotlib.pyplot as plt fig, ax = plt.subplots(1,2, figsize=(15,20)) ax[0].set_title('Train class distribution') train.isValid.value_counts().plot.pie(autopct='%.2f', ax=ax[0]) ax[1].set_title('Validation class distribution') val.isValid.value_counts().plot.pie(autopct='%.2f', ax=ax[1]) plt.show() # - data_train_file = "AIMedtrain_preprocessed.json" data_val_file = "AIMedval_preprocessed.json" train.to_json(data_train_file) val.to_json(data_val_file) # !aws s3 cp $data_train_file $s3_bucket # !aws s3 cp $data_val_file $s3_bucket val.isValid.value_counts() train.isValid.value_counts().plot.bar() word_count= train.apply(lambda r: len(r["passage"].split(" ")), axis=1) word_count.plot.hist() train.sample(n=20).to_json("Aimedsample_preprocessed.json") # ## Split into train/validation without looking at pubmed overlap - Preprocessed # + from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split train_p, val_p = train_test_split(df_processed, test_size=.1, random_state=777, stratify=df_processed["isValid"]) # + import matplotlib.pyplot as plt fig, ax = plt.subplots(1,2, figsize=(15,20)) ax[0].set_title('Train class distribution') train_p.isValid.value_counts().plot.pie(autopct='%.2f', ax=ax[0]) ax[1].set_title('Validation class distribution') val_p.isValid.value_counts().plot.pie(autopct='%.2f', ax=ax[1]) plt.show() # + data_train_file_p = "AIMedtrain_pubmedoverlap_preprocessed.json" data_val_file_p="AIMedval_pubmedoverlap_preprocessed.json" train_p.to_json(data_train_file_p) val_p.to_json(data_val_file_p) # - # !aws s3 cp $data_train_file_p $s3_bucket # !aws s3 cp $data_val_file_p $s3_bucket val_p.isValid.value_counts() train_p.isValid.value_counts() train_p.isValid.value_counts().plot.bar() # ## No preprocessing from datatransformer.AimedXmlToDataFrame import AimedXmlToDataFrame df_not_processed = AimedXmlToDataFrame()(xmlfile) df_not_processed["isValid"].value_counts() df_not_processed.query("docid=='AIMed.d35'") # ### Explore dataset - no preprocess # + import matplotlib.pyplot as plt df_not_processed["isValid"].value_counts().plot.pie(autopct='%.2f',figsize=(4, 4)) plt.show() # - df_not_processed["isValid"].value_counts() data_file_not_processed = "AIMedFull.json" df_not_processed.to_json(data_file_not_processed) # !aws s3 cp $data_file_not_processed $s3_bucket # ## Split into train/test/validation - no preprocess # + from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split unique_docids = df_not_processed.docid.unique() stratified = [ df_not_processed.query("docid == '{}'".format(p))['isValid'].iloc[0] for p in unique_docids] traindocid_np, valdocid_np = train_test_split(unique_docids, test_size=.1, random_state=777, stratify=stratified) # + train_notprocessed = df_not_processed[df_not_processed['docid'].isin(traindocid_np)] val_notprocessed = df_not_processed[df_not_processed['docid'].isin(valdocid_np)] # + import matplotlib.pyplot as plt fig, ax = plt.subplots(1,2, figsize=(15,20)) ax[0].set_title('Train class distribution') train_notprocessed.isValid.value_counts().plot.pie(autopct='%.2f', ax=ax[0]) ax[1].set_title('Validation class distribution') val_notprocessed.isValid.value_counts().plot.pie(autopct='%.2f', ax=ax[1]) plt.show() # - train_file_not_processed = "AIMedtrain.json" val_file_not_processed="AIMedval.json" train_notprocessed.to_json(train_file_not_processed) val_notprocessed.to_json(val_file_not_processed) # !aws s3 cp $train_file_not_processed $s3_bucket # !aws s3 cp $val_file_not_processed $s3_bucket val_notprocessed.isValid.value_counts() train_notprocessed.isValid.value_counts() train_notprocessed.isValid.value_counts().plot.bar() word_count= train_notprocessed.apply(lambda r: len(r["passage"].split(" ")), axis=1) word_count.plot.hist() train_notprocessed.sample(n=20).to_json("Aimedsample.json") assert train_notprocessed.shape[0] == train.shape[0] # ## Split into train/validation without looking at pubmed overlap - no preprocess # + from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split train_np_p, val_np_p = train_test_split(df_not_processed, test_size=.1, random_state=777, stratify=df_not_processed["isValid"]) # + import matplotlib.pyplot as plt fig, ax = plt.subplots(1,2, figsize=(15,20)) ax[0].set_title('Train class distribution') train_np_p.isValid.value_counts().plot.pie(autopct='%.2f', ax=ax[0]) ax[1].set_title('Validation class distribution') val_np_p.isValid.value_counts().plot.pie(autopct='%.2f', ax=ax[1]) plt.show() # - train_np_p.to_json("AIMedtrain_pubmedoverlap.json") val_np_p.to_json("AIMedval_pubmedoverlap.json") train_np_p.isValid.value_counts().plot.bar()
AIMedDataExploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # Just in case someone is still stuck on Python 2: # - from __future__ import print_function # DecayLanguage imports # + slideshow={"slide_type": "slide"} from decaylanguage.particle import Particle, SpinType from decaylanguage.decay.goofit import GooFitChain, SF_4Body # - # Pretty colors from plumbum import colors colors.use_color = 2 lines, all_states = GooFitChain.read_ampgen('../models/DtoKpipipi_v2.txt') # ### Look at an example line lines[0] for seen_factor in {p.spindetails() for p in lines}: my_lines = [p for p in lines if p.spindetails()==seen_factor] print(colors.bold | seen_factor, ":", *my_lines[0].spinfactors) for line in my_lines: colors.blue.print(' ', line) for spintype, c in zip(SpinType, colors): ps = [c | format(str(p), '11') for p in GooFitChain.all_particles if p.spin_type == spintype] print(c & colors.bold | "{:>12}:".format(spintype.name), *ps) for n, line in enumerate(lines): print(colors.bold | '{:2}'.format(n), '{:<70}'.format(str(line)), colors.bold & colors.blue | 'spinfactors:', colors.blue | str(len(line.spinfactors)), colors.bold & colors.magenta | 'L:', colors.magenta | '{0} [{1[0]}-{1[1]}]'.format(line.L, line.L_range())) # + [markdown] slideshow={"slide_type": "slide"} # ### We can make the GooFit Intro code: # + colors.bold.print('All discovered spin configurations:') for line in sorted({line.spindetails() for line in lines}): print(line) # + colors.bold.print('All known spin configurations:') for e in SF_4Body: print(e.name) # - print(GooFitChain.make_intro(all_states)) colors.green.print(' // Parameters') print(GooFitChain.make_pars()) # + [markdown] slideshow={"slide_type": "subslide"} # ### And the lines can be turned into code, as well: # - colors.green.print(' // Lines') for n, line in enumerate(lines): colors.green.print(' // Line', n) print(line.to_goofit(all_states[1:]), end='\n\n\n')
notebooks/AmpGen2GooFit D2K3p.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: proj_env # language: python # name: proj_env # --- from fastquant import get_stock_data import numpy as np import pandas as pd import matplotlib.pyplot as plt import math from sklearn.ensemble import RandomForestRegressor from scipy.ndimage.filters import gaussian_filter sigma = 1 d = 5 def data_prep(data): return data['close'].pct_change().iloc[1:].values training_data = get_stock_data('GME','2000-01-01','2021-01-01') testing_data = get_stock_data('GME','2021-01-01','2021-05-11') train_data = training_data['close'].values train_obs = data_prep(training_data) train_obs = gaussian_filter(train_obs, sigma=sigma) fig, ax = plt.subplots(figsize=(15,5)) ax.set_title('SPY') time = range(len(train_obs)) ax.plot(time,train_obs,color='tab:red',marker='s',markersize=2,linestyle='-',linewidth=1,label='train') ax.set_xlabel('time') ax.set_ylabel('stock price ($)') ax.set_xticks(np.arange(0,len(train_obs)+10,50)) ax.set_xlim(0,len(train_obs)+10) ax.xaxis.grid(True,ls='--') ax.yaxis.grid(True,ls='--') ax.legend() plt.show() # + # build the x as the observation from (O_i,...,O_i+d) # y is O_i+d x_train, y_train = [],[] for i in range(d, len(train_obs)): x_train.append(train_obs[i-d:i]) y_train.append(train_obs[i]) x_train,y_train = np.array(x_train),np.array(y_train) y_train = np.reshape(y_train, (*y_train.shape,1)) # - x_train regr = RandomForestRegressor(n_estimators=1000, max_depth=25, max_samples=150, random_state=0) regr.fit(x_train, y_train) preds = regr.predict(x_train) fig, ax = plt.subplots(figsize=(15,5)) ax.set_title('SPY') time = range(len(preds)) ax.plot(time,y_train,color='tab:blue',marker='s',markersize=2,linestyle='-',linewidth=1,label='actual') ax.plot(time,preds,color='tab:red',marker='s',markersize=2,linestyle='-',linewidth=1,label='preds') ax.set_xlabel('time') ax.set_ylabel('stock price ($)') ax.set_xticks(np.arange(0,len(preds)+10,10)) ax.set_xlim(0,len(preds)+10) ax.xaxis.grid(True,ls='--') ax.yaxis.grid(True,ls='--') ax.legend() plt.show() # + test_data = testing_data['close'].values last = training_data.iloc[-1].to_dict() row = pd.DataFrame(last, index=[0]) row['dt'] = None testing_data = testing_data.reset_index() testing_data = pd.concat([row,testing_data], ignore_index=True) test_obs = data_prep(testing_data) test_labels = test_obs.copy() print(len(test_obs)) print(len(test_labels)) test_obs = gaussian_filter(test_obs, sigma=sigma) test_obs = np.concatenate((train_obs[-d:], test_obs), axis=0) # build the x as the observation from (O_i,...,O_i+d) # y is O_i+d x_test, y_test = [],[] index = 0 for i in range(d, len(test_obs)): x_test.append(test_obs[i-d:i]) y_test.append(test_labels[index]) index += 1 x_test,y_test = np.array(x_test),np.array(y_test) y_test = np.reshape(y_test, (*y_test.shape,1)) # - print(len(test_obs)) print(len(test_data)) preds = regr.predict(x_test) len(preds) fig, ax = plt.subplots(figsize=(15,5)) ax.set_title('SPY') time = range(len(preds)) ax.plot(time,y_test,color='tab:blue',marker='s',markersize=2,linestyle='-',linewidth=1,label='actual') ax.plot(time,preds,color='tab:red',marker='s',markersize=2,linestyle='-',linewidth=1,label='preds') ax.set_xlabel('time') ax.set_ylabel('stock price ($)') ax.set_xticks(np.arange(0,len(preds)+10,10)) ax.set_xlim(0,len(preds)+10) ax.xaxis.grid(True,ls='--') ax.yaxis.grid(True,ls='--') ax.legend() plt.show() pred_close = [] closes = testing_data['close'].values for i,pred in enumerate(preds): pred_close.append(pred*closes[i-1]+closes[i-1]) truth = testing_data['close'].values[1:] fig, ax = plt.subplots(figsize=(15,5)) ax.set_title('SPY') time = range(len(preds)) ax.plot(time,truth,color='tab:blue',marker='s',markersize=2,linestyle='-',linewidth=1,label='actual') ax.plot(time,pred_close,color='tab:red',marker='s',markersize=2,linestyle='-',linewidth=1,label='preds') ax.set_xlabel('time') ax.set_ylabel('stock price ($)') ax.set_xticks(np.arange(0,len(preds)+10,10)) ax.set_xlim(0,len(preds)+10) ax.xaxis.grid(True,ls='--') ax.yaxis.grid(True,ls='--') ax.legend() plt.show() from sklearn.ensemble import AdaBoostRegressor regr = AdaBoostRegressor(n_estimators=250, random_state=0) regr.fit(x_train, y_train) preds = regr.predict(x_train) fig, ax = plt.subplots(figsize=(15,5)) ax.set_title('SPY') time = range(len(preds)) ax.plot(time,y_train,color='tab:blue',marker='s',markersize=2,linestyle='-',linewidth=1,label='actual') ax.plot(time,preds,color='tab:red',marker='s',markersize=2,linestyle='-',linewidth=1,label='preds') ax.set_xlabel('time') ax.set_ylabel('stock price ($)') ax.set_xticks(np.arange(0,len(preds)+10,10)) ax.set_xlim(0,len(preds)+10) ax.xaxis.grid(True,ls='--') ax.yaxis.grid(True,ls='--') ax.legend() plt.show() preds = regr.predict(x_test) fig, ax = plt.subplots(figsize=(15,5)) ax.set_title('SPY') time = range(len(preds)) ax.plot(time,y_test,color='tab:blue',marker='s',markersize=2,linestyle='-',linewidth=1,label='actual') ax.plot(time,preds,color='tab:red',marker='s',markersize=2,linestyle='-',linewidth=1,label='preds') ax.set_xlabel('time') ax.set_ylabel('stock price ($)') ax.set_xticks(np.arange(0,len(preds)+10,10)) ax.set_xlim(0,len(preds)+10) ax.xaxis.grid(True,ls='--') ax.yaxis.grid(True,ls='--') ax.legend() plt.show() pred_close = [] closes = testing_data['close'].values for i,pred in enumerate(preds): pred_close.append(pred*closes[i-1]+closes[i-1]) truth = testing_data['close'].values[1:] fig, ax = plt.subplots(figsize=(15,5)) ax.set_title('SPY') time = range(len(preds)) ax.plot(time,truth,color='tab:blue',marker='s',markersize=2,linestyle='-',linewidth=1,label='actual') ax.plot(time,pred_close,color='tab:red',marker='s',markersize=2,linestyle='-',linewidth=1,label='preds') ax.set_xlabel('time') ax.set_ylabel('stock price ($)') ax.set_xticks(np.arange(0,len(preds)+10,10)) ax.set_xlim(0,len(preds)+10) ax.xaxis.grid(True,ls='--') ax.yaxis.grid(True,ls='--') ax.legend() plt.show() from sklearn.ensemble import GradientBoostingRegressor regr = GradientBoostingRegressor(n_estimators=250, loss='huber', learning_rate=0.1, subsample=0.9, max_depth=10, random_state=0) regr.fit(x_train, y_train) preds = regr.predict(x_train) fig, ax = plt.subplots(figsize=(15,5)) ax.set_title('SPY') time = range(len(preds)) ax.plot(time,y_train,color='tab:blue',marker='s',markersize=2,linestyle='-',linewidth=1,label='actual') ax.plot(time,preds,color='tab:red',marker='s',markersize=2,linestyle='-',linewidth=1,label='preds') ax.set_xlabel('time') ax.set_ylabel('stock price ($)') ax.set_xticks(np.arange(0,len(preds)+10,10)) ax.set_xlim(0,len(preds)+10) ax.xaxis.grid(True,ls='--') ax.yaxis.grid(True,ls='--') ax.legend() plt.show() preds = regr.predict(x_test) regr.score(x_test,y_test) fig, ax = plt.subplots(figsize=(15,5)) ax.set_title('SPY') time = range(len(preds)) ax.plot(time,y_test,color='tab:blue',marker='s',markersize=2,linestyle='-',linewidth=1,label='actual') ax.plot(time,preds,color='tab:red',marker='s',markersize=2,linestyle='-',linewidth=1,label='preds') ax.set_xlabel('time') ax.set_ylabel('stock price ($)') ax.set_xticks(np.arange(0,len(preds)+10,10)) ax.set_xlim(0,len(preds)+10) ax.xaxis.grid(True,ls='--') ax.yaxis.grid(True,ls='--') ax.legend() plt.show() len(testing_data) len(preds) pred_close = [] closes = testing_data['close'].values opens = testing_data['open'].values[1:] for i,pred in enumerate(preds): if i == 0: pred_close.append(pred*training_data['close'].values[-1]+training_data['close'].values[-1]) else: pred_close.append(pred*closes[i-1]+closes[i-1]) truth = testing_data['close'].values[1:] fig, ax = plt.subplots(figsize=(15,5)) ax.set_title('SPY') time = range(len(preds)) ax.plot(time,truth,color='tab:blue',marker='s',markersize=2,linestyle='-',linewidth=1,label='actual') #ax.plot(time,opens,color='tab:green',marker='s',markersize=2,linestyle='-',linewidth=1,label='opens') ax.plot(time,pred_close,color='tab:red',marker='s',markersize=2,linestyle='-',linewidth=1,label='preds') ax.set_xlabel('time') ax.set_ylabel('stock price ($)') ax.set_xticks(np.arange(0,len(preds)+10,10)) ax.set_xlim(0,len(preds)+10) ax.xaxis.grid(True,ls='--') ax.yaxis.grid(True,ls='--') ax.legend() plt.show()
frac_change_forecasting/regressors/regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Imports import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import re from sklearn import metrics # import cvxopt # <- installation via conda recommended from collections import defaultdict from tqdm import tqdm from sklearn.feature_extraction.text import CountVectorizer from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression import nltk import scipy.optimize as sopt import scipy.stats as sstats import csv from scipy.linalg import solve_triangular # # Motivation # Given LOB (Limit Order Book) data and certain points in time our goal is to predict, whether the price will increase or decrease. <br> # We will be using LOB data from London stock market, collected for September 2013. <br> # Main method used is Logistic regression. <br> # # Data description # Every row of our data represents all active ask and bid orders in some point in time. Row can be described as follows: # # $date/time$ $'BID'$ $p_{b1}$ $w_{b1}$ $p_{b2}$ $w_{b2}$ ... $p_{bn}$ $w_{bn}$ $'ASK'$ $p_{a1}$ $w_{a1}$ $p_{a2}$ $w_{a2}$ ... $p_{am}$ $w_{am}$, # where $p_b$, $w_b$ are prices and size of bid order and $p_a$, $w_a$ are prices and sizes of ask order. Prices $p_x$ are sorted ascending. <br> # # LOB data is often represented as 3-element tuples $(p_x,w_x,t_x)$, where $p_x,w_x,t_x$ represent price, size and time of $xth$ order and $w_x$ is greater than zero for ask order. # # In our case it is convenient to represent the data as a list of pairs, where first element of each pair is bid orders list and second one is ask orders lists. <br> # # More formally let $$data = D$$ and for given time ${i}$, $${D_i} = ({BID_i}, {ASK_i})$$ $$BID_{ix} = ({p_x}, {w_x})$$ $$ASK_{ix} = ({p_x}, {w_x})$$ for some index $x$. <br> # Moreover bid and ask lists contain $(p_x, w_x)$ pairs, where $w_x > 0$ for all orders. # # We consider orders from $8:30$ to $16:30$ to eliminate abnormal trading behaviour that can occur shortly after the opening auction or shortly before closing auction. # # Data loading def load_data(path, start_time=83000000, stop_time=163000000): X = [] with open(path,newline='') as file: csv_reader = csv.reader(file, delimiter='\t') for row in csv_reader: date, time = map(int, row[0].split(' ')) if time < start_time or time > stop_time: continue line = 2 ASK_list, BID_list = [], [] while line < len(row): if row[line] == 'ASK': break p,w = map(float, row[line: line+2]) BID_list.append((p, w)) line += 2 line += 1 while line < len(row): p,w = map(float, row[line: line+2]) ASK_list.append((p, w)) line += 2 X.append((BID_list, ASK_list)) return X path = "C:\Projekt_ED\OrderBookSnapshots.csv" data = load_data(path) len(data) # # Data visualization # ## Whole data # (One can see that if ask and bid prices intersect the transaction can be made.) # + BID, ASK = [], [] for t in data: BID_list = t[0] ASK_list = t[1] BID += BID_list ASK += ASK_list BID = np.array(BID) ASK = np.array(ASK) # - plt.figure(figsize=(15, 5)) plt.scatter(BID[:, 0], BID[:, 1], c='green', alpha=0.6, edgecolors='black', label='BID', s=60) plt.scatter(ASK[:, 0], ASK[:, 1], c='red', alpha=0.6, edgecolors='black', label='ASK', s=60) plt.legend(loc='lower left') # ## In some fixed time interval # ${t}=100$ # + BID, ASK = data[100][0], data[100][1] BID, ASK = np.array(BID), np.array(ASK) plt.figure(figsize=(15, 5)) plt.bar(BID[:, 0], BID[:, 1], width=0.3, color='green', alpha=0.6, label='BID') plt.bar(ASK[:, 0], ASK[:, 1], width=0.3, color='red', alpha=0.6, label='ASK') plt.vlines(x=3030.5, ymin=0, ymax=10000, label='mid price', linestyles='dashed') plt.legend(loc='upper left') plt.xlim(3026, 3035) plt.ylim(0, 6000) # - # # Data process functions # At given time $t$, the bid price $b(t)$ is the highest stated price among active buy orders, # $$b(t) = \max_{x \in BIDlist(t)} p_x $$ # and the ask price $a(t)$ is the lowest stated price among active sell orders, # $$a(t) = \min_{x \in ASKlist(t)} p_x $$ # The mid price at time $t$ is # $$m(t) = \frac{a(t)+b(t)}{2} $$ # # The bid size $n_b(t)$ is total size of active buy orders with price equal to bid price # $$n_b(t) = \sum_{x \in BIDlist(t) | px = b(t)} w_x $$ # and ask size $n_b(t)$ is total size of active sell orders with price equal to ask price # $$n_a(t) = \sum_{x \in ASKlist(t) | px = a(t)} w_x $$ # # At a given time $t$, the queue imbalance $I(t)$ is normalized difference between $n_b(t)$ and $n_a(t)$ # $$I(t) = \frac{n_b(t) - n_a(t)}{n_b(t) + n_a(t)} $$ # # # We can expend those definitions considering k highest (lowest) bid (ask) prices. # <center>$b_k(t) = k-th$ highest price $\in BIDlist(t)$</center> # # <center>$a_k(t) = k-th$ lowest price $\in ASKlist(t)$</center> # # <center>$n_{k,b}(t) = \sum_{x \in BIDlist(t) | px \geqslant b_k(t)} w_x $</center> # # <center>$n_{k,a}(t) = \sum_{x \in ASKlist(t) | px \leqslant a_k(t)} w_x $</center> # # At a given time $t$, the $k-th$ queue imbalance $I_k(t)$ is normalized difference between $n_{k,b}(t)$ and $n_{k,b}(t)$ # <center>$I_k(t) = \frac{n_{k,b}(t) - n_{k,a}(t)}{n_{k,b}(t) + n_{k,a}(t)} $</center> # def bid_price(data,t): return data[t][0][-1][0] def ask_price(data,t): return data[t][1][0][0] def mid_price(data,t): return (bid_price(data,t) + ask_price(data,t))/2 def bid_size(data,t): return data[t][0][-1][1] def ask_size(data,t): return data[t][1][0][1] def queue_imbalance(data,t): nb = bid_size(data,t) na = ask_size(data,t) return (nb-na)/(nb+na) def queue_imbalance_k(data,t,k=2): sb = 0 sa = 0 for i in range(k): sb += data[t][0][-(i+1)][1] sa += data[t][1][i][1] return (sb-sa)/(sb+sa) # ## Midprices # If we plot how midprices changed over the time we will get typical auction value graph. <br> # One can see how hard it is to predict, whether the price goes up or down. # + M_x = [] for i in range(len(data)): M_x.append(mid_price(data, i)) plt.figure(figsize=(15,5)) plt.plot(range(len(data)), M_x) plt.ylabel('midprice') plt.xlabel('time') plt.show() # - # # Target defining # First in order to obtain binary targets we consider only moments when the price does change. <br> # Thus let us define vector of those moments: # $$T = [t_x | m(t_x^0) \neq m(t_{x-1}^0)],$$ where $t_x^0$ are all times included in dataset.<br> # Size of this vector equals $N = |T| $. <br> # # Now targets are defined as 1 if price increases, 0 otherwise. I.e. # $$ # target_x = # \begin{cases} # 1 & \text{ if } \ m(t_{x+1}) > m(t_{x}) \\ # 0 & \text{ if } \ m(t_{x+1}) < m(t_{x}) # \end{cases} # $$ # # Furthermore we shift $T$ forwards by setting ${T_0}$ as 0, <br> # because we want to model ${target_x}$ that happend after moment ${t_x}$. def get_time_and_target(data): T = [0] target = [] for t in range(1, len(data)): t_1 = T[-1] mt = mid_price(data, t) mt_1 = mid_price(data, t_1) if mt != mt_1: T.append(t) if mt > mt_1: target.append(1) else: target.append(0) return np.array(T[:-1]), np.array(target) T, target = get_time_and_target(data) vals,counts = np.unique(target,return_counts=True) for i,v in enumerate(vals): print(f'Number of data with target = {v}: {counts[i]}') print(f'Ratio of target = {vals[0]} for train data: {counts[0] / counts.sum()}') # # Data matrix definition # Now we can define our data matrix. # $$ # \begin{bmatrix} # I_1(t_0) & I_2(t_0) &I_3(t_0) & \dots & I_K(t_0) \\ # I_1(t_1) & I_2(t_1) &I_3(t_1) & \dots & I_K(t_1) \\ # \vdots & \vdots & \vdots & \ddots & \vdots \\ # I_1(t_N) & I_2(t_N) &I_3(t_N) & \dots & I_K(t_N) \\ # \end{bmatrix} # $$ # We can notice, that for $K=1$ our data matrix is equal to: # $$ # \begin{bmatrix} # I(t_0) \\ # I(t_1) \\ # \vdots \\ # I(t_N) \\ # \end{bmatrix} # $$ K = 2 X = np.array([[queue_imbalance_k(data,t,k) for k in range(1, K+1)] for t in T]) X.dtype # ## Train test split # We split the data using 80% as train and 20% as test. X_train, X_test, y_train, y_test = train_test_split( X, target, test_size=0.2, random_state=42, shuffle=False) # ## Baseline # We define baseline accuracy for LOB data, so later we can compare it with our model. vals_train,counts_train = np.unique(y_train,return_counts=True) for i,v in enumerate(vals_train): print(f'Number of train data with target = {v}: {counts_train[i]}') print(f'Baseline for target = {v} for train data: {counts_train[i] / counts_train.sum()}') print() vals_test,counts_test = np.unique(y_test,return_counts=True) for i,v in enumerate(vals_test): print(f'Number of test data with target = {v}: {counts_test[i]}') print(f'Baseline for target = {v} for train data: {counts_test[i] / counts_test.sum()}') def print_score(preds,Y,name): print(name) acc = np.mean(preds == Y) print(f"Acc: {acc}") M = metrics.confusion_matrix(preds, Y) N = np.sum(M) print('\nConfusion matrix:') print(M) print(f'\nTrue negative, (price goes down): {M[0][0]}') print(f'True positive, (price goes up): {M[1][1]}') print(f'False negative: {M[0][1]}') print(f'False positive: {M[1][0]}') return M,N,acc # # Logistic regression # Our goal is to predict if $m_{t+1} > m_t$ using data vector # $$ # \begin{bmatrix} # I_1(t) & I_2(t) &I_3(t) & \dots & I_K(t) \\ # \end{bmatrix} # $$ # We use logistic regression. # That way we can calculate probability of the sample $x$ belonging to class 1. # $$p(y=1|x) = \sigma(\theta^Tx) = \frac{1}{1 + e^{-\theta^Tx}}$$ # # We can observe that: # $$ p(y=y^{(i)}|x^{(i)};\Theta) = \sigma(\Theta^Tx)^{y^{(i)}}(1-\sigma(\Theta^Tx))^{(1-y^{(i)})}$$ # # Therefore the negative log likelihood ($nll$) is:$$ # \begin{split} # nll(\Theta) &= -\sum_{i=1}^{N} y^{(i)} \log \sigma(\Theta^Tx) + (1-y^{(i)})\log(1-\sigma(\Theta^Tx)) = \\ # &= -\sum_{i=1}^{N}y^{(i)}\log p(y=1|x^{(i)}; \Theta) + (1-y^{(i)})\log p(y=0|x^{(i)}; \Theta) # \end{split} # $$ # # So we are searching for $\theta$: # $$\theta = arg\,min_{\theta} \ nll(\theta) $$ # # We can further consider logistic regression with regularization, where:$$ # \begin{split} # nll(\Theta) &= -\sum_{i=1}^{N}y^{(i)}\log p(y=1|x^{(i)}; \Theta) + (1-y^{(i)})\log p(y=0|x^{(i)}; \Theta) + \frac{\lambda}{2} \sum_{i}\theta_{i}^{2} # \end{split} # $$ # # There are a few ways to find $\theta$. First we will consider Newtod-Raphson Method and L-BFGS-B solver, then we will compare results with sklearn LogisticRegression. # Newton's method is an iterative method for finding the roots of a differentiable function $F$, which are solutions to the equation $F(x) = 0$. For give start approximation $x_n$ we can calculate better approximation of the root: # $$x_{n+1} = x_n - \frac{f(x)}{f'(x)} $$ # # We can use this method to find root of $F'$, where is local optimum of $F$. # # For given approximation $x_n$ we can calculate better approximation of local optimum: # $$x_{n+1} = x_n - \gamma [f''(x_n)]^{-1} f'(x_n) $$ # $$\text{where} \ 0<\gamma<1,$$ # $$f'(x) = \nabla f(x) \in \mathbb {R} ^{d}$$ # $$ f''(x)=\nabla ^{2}f(x)=H_{f}(x) \in \mathbb {R} ^{d\times d} $$ # $$H_{f}(x) \ \text{is Hessian matrix and} \ \gamma \ \text{is step size.}$$ # ## L-BFGS-B solver # L-BFGS-B solver tries to find optimum of $f$ function using $\nabla f(x)$. class Logistic_Regression: def __init__(self, max_iter=500, solver_calls=5, lambda_=0.5, Theta=None, \ solver='l_bfgs_b', debug=False): self.Theta = Theta self.solver_calls = solver_calls self.max_iter = max_iter self.solver = solver self.debug = debug self.lambda_ = lambda_ def __sigmoid(self,x): return 1 / (1 + np.exp(-x)) def __gradient(self,x,y,Theta): SZ = self.__sigmoid(np.dot(Theta,x.T)) return np.dot(x.T, (y-SZ).T) + self.lambda_ * Theta def __hessian(self,x,y,Theta): SZ = self.__sigmoid(np.dot(Theta,x.T)) hess = np.dot(x.T,x * (SZ).reshape(-1,1) * (1 - SZ).reshape(-1,1)) hess += np.eye(hess.shape[0]) * self.lambda_ return hess def __logreg_loss(self, Theta, X, Y): Theta = Theta.astype(np.float64) X = X.astype(np.float64) Y = Y.astype(np.float64) if self.debug: print(f"Loss calculating... ",end="") Z = np.dot(Theta,X.T) if self.debug: print(f" Z done... ",end="") SZ = self.__sigmoid(Z) Y_ = Y[:,np.newaxis] nll = -np.sum((Y_*np.log2(SZ+1e-50) + (1-Y_)*np.log2(1-SZ+1e-50))) nll += (self.lambda_/2) * np.sum(Theta**2) if self.debug: print(f" nll done... ",end="") grad = np.dot(X.T, (SZ - Y).T) grad = grad.reshape(Theta.shape) + self.lambda_ * Theta if self.debug: print(f" grad done... done ") return nll, grad def fit(self,X,y): if self.solver == 'l_bfgs_b': Theta = self.Theta if Theta is None: Theta = np.ones(X.shape[1]+1) X_with_ones = np.hstack((np.ones((X.shape[0],1)),X)) for i in tqdm(range(self.solver_calls), desc='Calculating Theta', position=0): Theta = sopt.fmin_l_bfgs_b(lambda th: self.__logreg_loss(th, X_with_ones, y), Theta, maxiter=self.max_iter)[0] self.Theta = Theta elif self.solver == 'newton': X_with_ones = np.hstack((np.ones((X.shape[0],1)),X)) Theta = np.ones(X.shape[1]+1) g = self.__gradient(X_with_ones,y,Theta) i = 0 while not np.all(np.isclose(g, 0, atol=0.00001)) and i < self.max_iter: hess_inv = np.linalg.inv(self.__hessian(X_with_ones, y, Theta)) Theta = np.add(Theta, np.dot(hess_inv, g)) g = self.__gradient(X_with_ones, y, Theta) i += 1 self.Theta = Theta else: print(f'Wrong solver!: {self.solver}') def predict(self, X, threshold=0.5): X_with_ones = np.hstack((np.ones((X.shape[0],1)),X)) preds = self.__sigmoid(np.dot(self.Theta, X_with_ones.T)) >= threshold # preds = np.dot(self.Theta, X_with_ones.T) >= 0 return preds LR_solver = Logistic_Regression(solver='l_bfgs_b') LR_solver.fit(X_train,y_train) preds_train_solver = LR_solver.predict(X_train) M,N,acc = print_score(preds_train_solver,y_train, 'Train data, L-BFGS-B solver, lambda=0.5') preds_test_solver = LR_solver.predict(X_test) M,N,acc = print_score(preds_test_solver,y_test, 'Test data, L-BFGS-B solver, lambda=0.5') # ## Newtod-Raphson Method LR_newton = Logistic_Regression(solver='newton') LR_newton.fit(X_train, y_train) preds_train_newton = LR_newton.predict(X_train, threshold=0.5) M,N,acc = print_score(preds_train_newton,y_train, 'Train data, Newton method, lambda=0.5') preds_test_newton = LR_newton.predict(X_test) M,N,acc = print_score(preds_test_newton,y_test, 'Test data, Newton method, lambda=0.5') # ## Sklearn # We also consider Sklearn implementation of Logistic Regression with L-BFGS-B solver. LR_sklearn = LogisticRegression(solver='lbfgs') LR_sklearn.fit(X_train,y_train) preds_train_sklearn = LR_sklearn.predict(X_train) print_score(preds_train_sklearn,y_train, 'Train data, sklearn LR, C=1.0') preds_test_sklearn = LR_sklearn.predict(X_test) print_score(preds_test_sklearn,y_test, 'Test data, sklearn LR, C=1.0') # ## Testing our regression for different Ks # + def plot_bar(X, Y1, Y2, title, x_title, width=0.02, a=0, b=-1): def autolabel(rects): for rect in rects: height = rect.get_height() ax.annotate('{:.2f}%'.format(height * 100), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 3), # 3 points vertical offset textcoords="offset points", ha='center', va='bottom') fig, ax = plt.subplots(figsize=(15,5)) rects1 = ax.bar(X[a: b] - width/2, Y1[a: b], width, label='Train') rects2 = ax.bar(X[a: b] + width/2, Y2[a: b], width, label='Test') ax.set_ylabel('Accuracy') ax.set_xlabel(x_title) ax.set_xticks(X[a: b]) ax.set_ylim([0, 0.7]) ax.set_title(title) ax.legend(loc='lower right') autolabel(rects1) autolabel(rects2) def plot(X, Y1, Y2, title, x_title): plt.plot(X, Y1, label='Train') plt.plot(X, Y2, label='Test') plt.title(title) plt.legend(loc='lower left') plt.xlabel(x_title) plt.ylabel('Accuracy') # + train_c, test_c = [], [] k_list = np.arange(1, 11) for K in k_list: X = np.array([[queue_imbalance_k(data,t,k) for k in range(1,K+1)] for t in T]) X_train, X_test, y_train, y_test = train_test_split( X, target, test_size=0.2, random_state=42,shuffle=False) LR = Logistic_Regression(solver='newton') LR.fit(X_train, y_train) preds_train = LR.predict(X_train) train_c.append(np.mean(preds_train == y_train)) preds_test = LR.predict(X_test) test_c.append(np.mean(preds_test == y_test)) train_c = np.array(train_c) test_c = np.array(test_c) # - plot(X=k_list, Y1=train_c, Y2=test_c, title='Accuracy for different Ks', \ x_title='K') plot_bar(X=k_list, Y1=train_c, Y2=test_c, title='Accuracy for different Ks', \ x_title='K', width=0.4, b=len(k_list) // 2) plot_bar(X=k_list, Y1=train_c, Y2=test_c, title='Accuracy for different Ks', \ x_title='K', width=0.4, a=len(k_list) // 2, b=len(k_list)) # chosing best K best_k = k_list[np.argmax(test_c)] print(f'Best K: {best_k}') # ## Testing for different Cs # + train_c, test_c = [], [] C_list = np.r_[np.linspace(0.01,1,9), np.linspace(1,10,9)] X = np.array([[queue_imbalance_k(data,t,k) for k in range(1, best_k+1)] for t in T]) X_train, X_test, y_train, y_test = train_test_split( X, target, test_size=0.2, random_state=42,shuffle=False) for C in C_list: LR = Logistic_Regression(solver='newton', lambda_=C) LR.fit(X_train, y_train) preds_train = LR.predict(X_train) train_c.append(np.mean(preds_train == y_train)) preds_test = LR.predict(X_test) test_c.append(np.mean(preds_test == y_test)) train_c = np.array(train_c) test_c = np.array(test_c) # - plot(X=C_list, Y1=train_c, Y2=test_c, title='Accuracy for different Cs',\ x_title='C') plot_bar(X=C_list, Y1=train_c, Y2=test_c, title='Accuracy for different Cs', \ x_title='C', width=0.06, b=len(C_list) // 2) plot_bar(X=C_list, Y1=train_c, Y2=test_c, title='Accuracy for different Cs', \ x_title='C', width=0.5, a=len(C_list) // 2, b=-1) # chose best C best_c = C_list[np.argmax(test_c)] print(f'Best c: {best_c}') # ## Testing different thresholds # + # we will also save FP and TP rate for later train_c, test_c = [], [] FPR, TPR, TNR = [], [], [] threshold = np.linspace(0, 1, 25) X = np.array([[queue_imbalance_k(data,t,k) for k in range(1, best_k+1)] for t in T]) X_train, X_test, y_train, y_test = train_test_split( X, target, test_size=0.2, random_state=42,shuffle=False) for th in threshold: LR = Logistic_Regression(solver='newton', lambda_=best_c) LR.fit(X_train, y_train) preds_train = LR.predict(X_train, threshold=th) train_c.append(np.mean(preds_train == y_train)) preds_test = LR.predict(X_test, threshold=th) test_c.append(np.mean(preds_test == y_test)) M = metrics.confusion_matrix(preds_test, y_test) TP, FP = M[1][1], M[1][0] TN, FN = M[0][0], M[0][1] FPR.append(FP / (FP + TN + 1e-50)) TPR.append(TP / (TP + FN + 1e-50)) TNR.append(TN / (TN + FP + 1e-50)) train_c = np.array(train_c) test_c = np.array(test_c) # - plot(X=threshold, Y1=train_c, Y2=test_c, title='Accuracy for different thresholds', \ x_title='threshold') best_accuracy=np.max(test_c) print(f'Best accuracy: {best_accuracy} for K: {best_k}, lambda: {best_c}, \ treshold: {threshold[np.argmax(test_c)]}') plot_bar(X=threshold, Y1=train_c, Y2=test_c, title='Accuracy for different thresholds', \ x_title='threshold', width=0.02, a=len(threshold) // 2 - 3, b=len(threshold) // 2 + 4) # # Receiver operating characteristic # ROC curve, is a graphical plot that illustrates the diagnostic ability of a binary classifier system as its discrimination threshold is varied. <br> # The ROC curve is created by plotting the true positive rate (TPR) against the false positive rate (FPR) at various threshold settings. <br> # # To assess the predictive power of our logistic regressions for performing binary <br> # and probabilistic classification, we compare their output to that of a simple null <br> # model in which we assume that I provides no useful information for predicting <br> # the direction of mid-price movements, such that for all $I$: <br> # # $$y(I) = \frac{1}{2}$$ # # In words, our null model predicts that the probability of an upward or downward price movement is always 50%, irrespective of the queue imbalance. <br> # We calculate area under the curve by a number of trapezoidal approximations. I.e. sum of triangles and rectangles. <br> # def area_under_roc(TPR, FPR): ans = 0 for k in range(1, len(TPR)): triange = abs(FPR[k] - FPR[k-1]) * (TPR[k] - TPR[k-1]) / 2 rectangle = abs((FPR[k] - FPR[k-1]) * min(TPR[k], TPR[k-1])) ans += triange + rectangle return ans # # Results # ## Sensitivity (true positive rate) and Specificity (true negative rate) # Measures the proportion of actual positives/negatives that are correctly identified as such. # + plt.figure(num=None, figsize=(7, 5), dpi=80, facecolor='w', edgecolor='k') plt.plot(TNR, TPR, c='orange', label="Logistic Regression") plt.plot([0,1],[1,0], linestyle='--', label="Null Hypothesis") plt.xlabel('Specificity (TNR)') plt.ylabel('Sensitivity (TPR)') plt.title('Sensitivity (TPR) and Specificity (TNR)') plt.legend(loc='lower left') plt.show() print(f'Area under the curve: {area_under_roc(TNR, TPR)}') # - # ## ROC curve: False alarm (FPR) and Sensitivity (TPR) # + plt.figure(num=None, figsize=(7, 5), dpi=80, facecolor='w', edgecolor='k') plt.plot(FPR, TPR, c='orange', label="Logistic Regression") plt.plot([0,1],[0,1], linestyle='--', label="Null Hypothesis") plt.xlabel('False alarm (FPR)') plt.ylabel('Sensitivity (TPR)') plt.title('False alarm (FPR) and Sensitivity (TPR)') plt.legend(loc='upper left') plt.show() print(f'Area under the curve: {area_under_roc(TNR, TPR)}') # - # ## Summary # Baseline accuracy was around 51.6% for test data. <br> # After tuning our Logistic Regression parameters we got results similar to sklearn implementation. # Our model scores 55.88% on test, which is considerably better having in mind that predicting stock market is quite hard task.
LOB_project.ipynb
#!/usr/bin/env python # --- # jupyter: # jupytext: # cell_metadata_filter: -all # formats: ipynb,py # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tfrl-cookbook # language: python # name: tfrl-cookbook # --- # Dueling, Double DQN agent training script # Chapter 3, TensorFlow 2 Reinforcement Learning Cookbook | <NAME> import argparse from datetime import datetime import os import random from collections import deque import gym import numpy as np import tensorflow as tf from tensorflow.keras.layers import Add, Dense, Input from tensorflow.keras.optimizers import Adam tf.keras.backend.set_floatx("float64") parser = argparse.ArgumentParser(prog="TFRL-Cookbook-Ch3-DuelingDoubleDQN") parser.add_argument("--env", default="CartPole-v0") parser.add_argument("--lr", type=float, default=0.005) parser.add_argument("--batch_size", type=int, default=256) parser.add_argument("--gamma", type=float, default=0.95) parser.add_argument("--eps", type=float, default=1.0) parser.add_argument("--eps_decay", type=float, default=0.995) parser.add_argument("--eps_min", type=float, default=0.01) parser.add_argument("--logdir", default="logs") args = parser.parse_args([]) logdir = os.path.join( args.logdir, parser.prog, args.env, datetime.now().strftime("%Y%m%d-%H%M%S") ) print(f"Saving training logs to:{logdir}") writer = tf.summary.create_file_writer(logdir) class ReplayBuffer: def __init__(self, capacity=10000): self.buffer = deque(maxlen=capacity) def store(self, state, action, reward, next_state, done): self.buffer.append([state, action, reward, next_state, done]) def sample(self): sample = random.sample(self.buffer, args.batch_size) states, actions, rewards, next_states, done = map(np.asarray, zip(*sample)) states = np.array(states).reshape(args.batch_size, -1) next_states = np.array(next_states).reshape(args.batch_size, -1) return states, actions, rewards, next_states, done def size(self): return len(self.buffer) class DuelingDQN: def __init__(self, state_dim, aciton_dim): self.state_dim = state_dim self.action_dim = aciton_dim self.epsilon = args.eps self.model = self.nn_model() def nn_model(self): state_input = Input((self.state_dim,)) fc1 = Dense(32, activation="relu")(state_input) fc2 = Dense(16, activation="relu")(fc1) value_output = Dense(1)(fc2) advantage_output = Dense(self.action_dim)(fc2) output = Add()([value_output, advantage_output]) model = tf.keras.Model(state_input, output) model.compile(loss="mse", optimizer=Adam(args.lr)) return model def predict(self, state): return self.model.predict(state) def get_action(self, state): state = np.reshape(state, [1, self.state_dim]) self.epsilon *= args.eps_decay self.epsilon = max(self.epsilon, args.eps_min) q_value = self.predict(state)[0] if np.random.random() < self.epsilon: return random.randint(0, self.action_dim - 1) return np.argmax(q_value) def train(self, states, targets): self.model.fit(states, targets, epochs=1) class Agent: def __init__(self, env): self.env = env self.state_dim = self.env.observation_space.shape[0] self.action_dim = self.env.action_space.n self.model = DuelingDQN(self.state_dim, self.action_dim) self.target_model = DuelingDQN(self.state_dim, self.action_dim) self.update_target() self.buffer = ReplayBuffer() def update_target(self): weights = self.model.model.get_weights() self.target_model.model.set_weights(weights) def replay_experience(self): for _ in range(10): states, actions, rewards, next_states, done = self.buffer.sample() targets = self.model.predict(states) next_q_values = self.target_model.predict(next_states)[ range(args.batch_size), np.argmax(self.model.predict(next_states), axis=1), ] targets[range(args.batch_size), actions] = ( rewards + (1 - done) * next_q_values * args.gamma ) self.model.train(states, targets) def train(self, max_episodes=1000): with writer.as_default(): for ep in range(max_episodes): done, episode_reward = False, 0 observation = self.env.reset() while not done: action = self.model.get_action(observation) next_observation, reward, done, _ = self.env.step(action) self.buffer.store( observation, action, reward * 0.01, next_observation, done ) episode_reward += reward observation = next_observation if self.buffer.size() >= args.batch_size: self.replay_experience() self.update_target() print(f"Episode#{ep} Reward:{episode_reward}") tf.summary.scalar("episode_reward", episode_reward, step=ep) if __name__ == "__main__": env = gym.make("CartPole-v0") agent = Agent(env) agent.train(max_episodes=2) # Increase max_episodes value
Chapter03/3_dueling_double_dqn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="8f719ff4-001" colab_type="text" # #1. Install Dependencies # First install the libraries needed to execute recipes, this only needs to be done once, then click play. # # + id="8f719ff4-002" colab_type="code" # !pip install git+https://github.com/google/starthinker # + [markdown] id="8f719ff4-003" colab_type="text" # #2. Get Cloud Project ID # To run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play. # # + id="8f719ff4-004" colab_type="code" CLOUD_PROJECT = 'PASTE PROJECT ID HERE' print("Cloud Project Set To: %s" % CLOUD_PROJECT) # + [markdown] id="8f719ff4-005" colab_type="text" # #3. Get Client Credentials # To read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play. # # + id="8f719ff4-006" colab_type="code" CLIENT_CREDENTIALS = 'PASTE CLIENT CREDENTIALS HERE' print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS) # + [markdown] id="8f719ff4-007" colab_type="text" # #4. Enter Say Hello Parameters # Recipe template for say hello. # 1. This should be called for testing only. # Modify the values below for your use case, can be done multiple times, then click play. # # + id="8f719ff4-008" colab_type="code" FIELDS = { 'auth_read': 'user', # Credentials used for reading data. 'say_first': 'Hello Once', # Type in a greeting. 'say_second': 'Hello Twice', # Type in a greeting. 'error': '', # Optional error for testing. 'sleep': 0, # Seconds to sleep. } print("Parameters Set To: %s" % FIELDS) # + [markdown] id="8f719ff4-009" colab_type="text" # #5. Execute Say Hello # This does NOT need to be modified unless you are changing the recipe, click play. # # + id="8f719ff4-010" colab_type="code" from starthinker.util.configuration import Configuration from starthinker.util.configuration import execute from starthinker.util.recipe import json_set_fields USER_CREDENTIALS = '/content/user.json' TASKS = [ { 'hello': { 'auth': 'user', 'say': {'field': {'name': 'say_first','kind': 'string','order': 1,'default': 'Hello Once','description': 'Type in a greeting.'}}, 'error': {'field': {'name': 'error','kind': 'string','order': 3,'default': '','description': 'Optional error for testing.'}}, 'sleep': {'field': {'name': 'sleep','kind': 'integer','order': 4,'default': 0,'description': 'Seconds to sleep.'}} } }, { 'hello': { 'auth': 'user', 'say': {'field': {'name': 'say_second','kind': 'string','order': 1,'default': 'Hello Twice','description': 'Type in a greeting.'}}, 'sleep': {'field': {'name': 'sleep','kind': 'integer','order': 4,'default': 0,'description': 'Seconds to sleep.'}} } } ] json_set_fields(TASKS, FIELDS) execute(Configuration(project=CLOUD_PROJECT, client=CLIENT_CREDENTIALS, user=USER_CREDENTIALS, verbose=True), TASKS, force=True)
colabs/hello.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # numpy 使えば良いのだが, # $f(x)=2x+1$ def f(x): return 2*x + 1 # $g(x)=(x-1)(x-3)$ def g(x): return (x - 1) * (x - 3) # $\displaystyle \sum_{ i = 2 }^{ 4 } f(x)=21$ def sigma(func, frm, to): result = 0; for i in range(frm, to+1): result += func(i) return result sigma(f,2,4) # $\displaystyle \sum_{ i = 1 }^{ 3 } g(x)=-1$ sigma(g,1,3) # $\displaystyle \sum_{ i = 1 }^{ 5 } \frac{ 1 }{ 2^{i} }=\frac{31}{32}$ を計算してみる # $h(x)=\displaystyle \frac{ 1 }{ 2^{x} }$ def h(x): return 1/2**x # $\displaystyle \sum_{ i = 1 }^{ 5 } h(x)=\frac{31}{32}$ sigma(h,1,5) 31/32 sum(1/2**n for n in range(1, 5+1)) # これて゜も良い!!
sigma function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + slideshow={"slide_type": "skip"} from IPython.core.display import HTML def css_styling(): styles = open("./styles/custom.css", "r").read() return HTML(styles) css_styling() # - # Open Jupyter notebook: # <br> Start >> Programs >> Programming >> Anaconda3 >> JupyterNotebook # <br>(Start >> すべてのプログラム >> Programming >> Anaconda3 >> JupyterNotebook) # In Jupyter notebook, select the tab with the contents list of the interactive textbook: # # Open __Seminar 1__ by clicking on __1_Data_types_and_operators__. # + [markdown] slideshow={"slide_type": "slide"} # <h1>Data Types and Simple Arithmetic Operators</h1> # # <h1>Lesson Goal</h1> # # Compose and solve simple mathematical problems using Python. # # # + [markdown] slideshow={"slide_type": "slide"} # <h1>Objectives</h1> # # - Use Python as a calculator. # - Express mathematical and logic operations correctly. # - Learn to use different "types" of variable. # # We will finish by learning how to create a local copy of the interactive textbook on your personal computer that you will use to complete your homework. # + [markdown] slideshow={"slide_type": "slide"} # Why we are studying this: # # - To do basic algebra in Python. # # - To use programming to solve engineering problems that you will encounter in your other classes. # # + [markdown] slideshow={"slide_type": "slide"} # Lesson structure: # - Learn new skills together: # - __Demonstration__ on slides. # - __Completing examples__ in textbooks. # - __Feedback answers__ (verbally / whiteboards) # - Practise alone: __Completing review excercises__. # - Skills Review: __Updating your online git repository__. # - New skills: Updating your online git repository __from home__. # - __Summary__. # # + [markdown] slideshow={"slide_type": "slide"} # Each time you complete a section of your textbook, please wait to feedback the answer before moving on. # # Let’s start by practising how you will fill in your textbooks and feedback answers. # # __Basic Arithmetic Operators...__ # - # <a id='AlgebraicOperators'></a> # + [markdown] slideshow={"slide_type": "slide"} # <h2>Simple Operators</h2> # # We can use Python like a calculator. # # __Simple arithmetical operators:__ # # $+$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Addition <br> # $-$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Subtraction <br> # $*$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Multiplication <br> # $/$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Division <br> # $//$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Floor division <br> # $\%$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Modulus <br> # $**$ &nbsp; &nbsp; &nbsp; &nbsp; Exponent <br> # + [markdown] slideshow={"slide_type": "slide"} # <h3>Algebraic Operators</h3> # Express the following simple expressions using python code. <br> # Click on the cell to type in it. <br> # Press "Shift" + "Enter" to run the cell. # # $3 + 8$ # # + slideshow={"slide_type": "-"} # + [markdown] slideshow={"slide_type": "slide"} # $2 - 4$ # + slideshow={"slide_type": "-"} # + [markdown] slideshow={"slide_type": "slide"} # Use the list of mathematical operators in your textbook to write the expressions using Python. # # __STOP__ when you have completed the the expression $2^{3}$. # # We will review your answers before moving on to Section 2. # + [markdown] slideshow={"slide_type": "slide"} # $6 \times 4$ # + slideshow={"slide_type": "-"} # + [markdown] slideshow={"slide_type": "slide"} # $ 12 \div 5$ # + slideshow={"slide_type": "-"} # + [markdown] slideshow={"slide_type": "-"} # $12 \div 5$ without any decimal points or remainders. # + slideshow={"slide_type": "-"} # + [markdown] slideshow={"slide_type": "-"} # The remainder when $12 \div 5$ # - # + [markdown] slideshow={"slide_type": "slide"} # $2^{3}$ # - # + [markdown] slideshow={"slide_type": "slide"} # ### Operator precedence # # __Operator precedence:__ The order in which operations are performed when there are multiple operations in an expression # # e.g. multiplication before addition. # # - # Python follows the usual mathematical rules for precedence. # # > 1. Parentheses &nbsp; e.g. $(2+4)$ # 1. Exponents &nbsp; &nbsp; e.g. $2^2$ # 1. Multiplication, Division, Floor Division and Modulus (left to right) # 1. Addition and Subtraction (left to right) # - The expression should __evaluate correctly__. # - The expression should be __easily readable__. # # __Easily Readable__ # Simple enough for someone else reading the code to understand. # # It is possible to write __code__ that is correct, but might be difficult for someone (including you!) to check. # + [markdown] slideshow={"slide_type": "slide"} # #### Correct Evaluation # # A common example: # # $$ # \frac{10}{2 \times 50} = 0.1 # $$ # - 10 / 2 * 50 # is incorrect. # # # + [markdown] slideshow={"slide_type": "slide"} # Multiplication and division have the same precedence. # # The expression is evaluated 'left-to-right'. # # The correct result is acheived by using brackets &nbsp; $()$, as you would when using a calculator. # # # + [markdown] slideshow={"slide_type": "slide"} # $$ # \frac{10}{2 \times 50} = 0.1 # $$ # # __How would you enter this using a calculator to get the correct order of precedence?__ # - # + [markdown] slideshow={"slide_type": "slide"} # #### Readability # # An example that __evaluates__ the following expression correctly: # # $$ # 2^{3} \cdot 4 = 32 # $$ # # but is __not easily readable__: # - 2**3*4 # + [markdown] slideshow={"slide_type": "slide"} # $$ # 2^{3} \cdot 4 = 32 # $$ # # A better (__more readable__) expression: # - (2**3)*4 # + [markdown] slideshow={"slide_type": "slide"} # It is best practise to use spaces between characters to make your code more readable. # # You will be marked on readbility in your assessment. # # Start developing good habits now! # + slideshow={"slide_type": "-"} (2**3)*4 #(2**3) * 4 # + [markdown] slideshow={"slide_type": "slide"} # ## Variables and Assignment # # We can easily solve the equations so far using a calculator. # # Let's look at some special operations that Python allows us to do. # # # # # + [markdown] slideshow={"slide_type": "slide"} # What if we want to evaluate the same expression multiple times, changing the numerical constants each time? # # Example: # >$x^{y} \cdot z = $ <br> # # >$2^{3} \cdot 4 = $ <br> # $4^{5} \cdot 3 = $ <br> # $6^{2} \cdot 2 =$ &nbsp; ... # # # # # # # + [markdown] slideshow={"slide_type": "slide"} # What if we want to use the value of the expression in a subsequent computation? # # Example: # # >$a = b + c$ # # >$d = a + b$ # + [markdown] slideshow={"slide_type": "slide"} # In both these cases programming can improve the speed and ease of computation by using *assignment*. # + [markdown] slideshow={"slide_type": "slide"} # ### Assigning Variables # # When we compute something, we usually want to __store__ the result. # # This allows us to use it in subsequent computations. # # *Variables* are what we use to store something. # + slideshow={"slide_type": "slide"} c = 10 print(c) # - # Above, the variable `c` is used to 'store' the value `10`. # # The function `print` is used to display the value of a variable. # # (We will learn what functions are and how we use them later). # # # + [markdown] slideshow={"slide_type": "slide"} # To compute $c = a + b$ , where $a = 2$ and $b = 11$: # - a = 2 b = 11 c = a + b # On each line the expression on the right-hand side of the assignment operator '`=`' is evaluated and then stored as the variable on the left-hand side. print(c) # + [markdown] slideshow={"slide_type": "slide"} # If we want to change the value of $a$ to $4$ and recompute the sum, replace `a = 2` with `a = 4` and execute the code. # # __Try this yourself__. # # Change the value of a or b. # # Re-run the cell to update the value. # # (Click on the cell to type in it. <br> # Press "Shift" + "Enter" to run the cell.) # # Then run the `print(c)` block to view the new value. # + [markdown] slideshow={"slide_type": "slide"} # __In the cell below find $y$ when__: # <br>$y=ax^2+bx+c$, # <br>$a=1$ # <br>$b=1$ # <br>$c=-6$ # <br>$x=-2$ # # When you have finished, hold up your answer on your whiteboard. # + # create variables a, b, c and x # e.g. a = 1 #type: print (y) to reveal the answer # + [markdown] slideshow={"slide_type": "slide"} # What value did you get for y # + [markdown] slideshow={"slide_type": "slide"} # Answer: $ y = -4 $ # + [markdown] slideshow={"slide_type": "slide"} # Now change the value of $x$ so that $x = 0$ and re-run the cell to update the value. # # What value did you get for y this time? # + [markdown] slideshow={"slide_type": "slide"} # Answer = $ y = -6 $ # + [markdown] slideshow={"slide_type": "slide"} # ### Augmented Assignment # # The case where the assigned value depends on a previous value of the variable. # # Example: # - a = 2 b = 11 a = a + b print(a) # + [markdown] slideshow={"slide_type": "-"} # This type of expression is not a valid algebraic statement since '`a`' appears on both sides of '`=`'. # # However, is very common in computer programming. # # # # + [markdown] slideshow={"slide_type": "slide"} # __How it works:__ # # > `a = a + b` # # 1. The expression on the right-hand side is evaluated (the values assigned to `a` and `b` are summed). # 2. The result is assigned to the left-hand side (to the variable `a`). # # - # <a id='Shortcuts'></a> # + [markdown] slideshow={"slide_type": "slide"} # ### Shortcuts # - # Augmented assignments can be written in short form. # # For __addition__: # # `a = a + b` &nbsp;&nbsp; &nbsp; can be written &nbsp;&nbsp;&nbsp; `a += b` # + # Long-hand addition a = 2 b = 11 a = a + b print(a) # Short-hand addition a = 2 b = 11 a += b print(a) # + [markdown] slideshow={"slide_type": "slide"} # For __subtraction__: # # `a = a - b` &nbsp;&nbsp; &nbsp; can be written &nbsp;&nbsp;&nbsp; `a -= b` # # + # Long-hand subtraction a = 1 b = 4 print(a) # Short-hand subtraction a = 1 b = 4 print(a) # + [markdown] slideshow={"slide_type": "slide"} # The <a href='#AlgebraicOperators'>basic algebraic operators</a> can all be manipulated in the same way to produce a short form of augmented assigment. # # Complete the cells below to include the __short form__ of the expression and `print(a)` to check your answers match. # + [markdown] slideshow={"slide_type": "slide"} # __Multiplication__ # + # Long-hand multiplication a = 10 c = 2 a = c*a print(a) # Short-hand multiplication a = 10 c = 2 # + [markdown] slideshow={"slide_type": "slide"} # __Division__ # + # Long-hand division a = 1 a = a/4 print(a) # Short-hand division a = 1 # + [markdown] slideshow={"slide_type": "slide"} # __Floor Division__ # + # Long-hand floor division a = 12 a = a//5 print(a) # Short-hand floor division a = 12 # + [markdown] slideshow={"slide_type": "slide"} # __Modulus__ # + # Long-hand modulus a = 12 c = 5 a = a % c print(a) # Short-hand modulus a = 12 c = 5 # + [markdown] slideshow={"slide_type": "slide"} # __Exponent__ # + # Long-hand exponent a = 3 c = 2 a = a ** c print(a) # Short-hand exponent a = 3 c = 2 # + [markdown] slideshow={"slide_type": "slide"} # ##### Note: The sentences beginning with "#" in the cell are called comments. # These are not computed as part of the program but are there for humans to read to help understand what the code does. # + [markdown] slideshow={"slide_type": "slide"} # ## Naming Variables # __It is good practice to use meaningful variable names. __ # # e.g. using '`x`' for time, and '`t`' for position is likely to cause confusion. # # You will be marked on readbility in your assessment. # # Start developing good habits now! # + [markdown] slideshow={"slide_type": "slide"} # Problems with poorly considered variable names: # # 1. You're much more likely to make errors. # 1. It can be difficult to remember what the program does. # 1. It can be difficult for others to understand and use your program. # # + [markdown] slideshow={"slide_type": "slide"} # __Different languages have different rules__ for what characters can be used in variable names. # # In Python variable names can use letters and digits, but cannot start with a digit. # # e.g. # # `data5 = 3` &nbsp; &nbsp; &nbsp; $\checkmark$ # # `5data = 3` &nbsp; &nbsp; &nbsp; $\times$ # + [markdown] slideshow={"slide_type": "slide"} # __Python is a case-sensitive language__ # # e.g. the variables '`A`' and '`a`' are different. # # # + [markdown] slideshow={"slide_type": "slide"} # __Languages have *reserved keywords*__ that cannot be used as variable names as they are used for other purposes. # # The reserved keywords in Python are: # # `['False', 'None', 'True', 'and', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif', 'else', 'except', 'finally', 'for', 'from', 'global', 'if', 'import', 'in', 'is', 'lambda', 'nonlocal', 'not', 'or', 'pass', 'raise', 'return', 'try', 'while', 'with', 'yield']` # + [markdown] slideshow={"slide_type": "slide"} # Reserved words are colored bold green when you type them in the Notebook so you can see if one is being used. # # # + [markdown] slideshow={"slide_type": "slide"} # If you try to assign something to a reserved keyword, you will get an error e.g. it is not possible to create a variable with the name __`for`__: # - for = 12 # + [markdown] slideshow={"slide_type": "slide"} # __Sometimes it is useful to have variable names that are made up of two words.__ # # A convention is to separate the words in the variable name using an underscore '`_`'. # # e.g. a variable name for storing the number of days: # ```python # num_days = 10 # ``` # + [markdown] slideshow={"slide_type": "slide"} # Suggest a variable name for each of the following quantities and hold it up on your whiteboard. # # __temperature__ # # __height__ # # __depth of hole__ # # __class__ # # - # + [markdown] slideshow={"slide_type": "slide"} # ## Comparing Variables Using Booleans # # __Boolean:__A type of variable that can take on one of two values - true or false. # # # + [markdown] slideshow={"slide_type": "slide"} # One way to visualise how a Boolean works is consider the answer when we make a comparison... # - # <a id='ComparisonOperators'></a> # + [markdown] slideshow={"slide_type": "slide"} # ### Comparison Operators # # __Comparison Operator:__An operator that is used to compare the values of two variables. # # __Commonly used comparison operators:__ # # $==$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Equality <br> # $!=$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Inequality <br> # $>$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Greater than <br> # $<$ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; Less than <br> # $>=$ &nbsp; &nbsp; &nbsp; &nbsp; Greater than or equal to <br> # $<=$ &nbsp; &nbsp; &nbsp; &nbsp; Less than or equal to <br> # # # # + [markdown] slideshow={"slide_type": "slide"} # __Example:__ Comparing variables a and b using comparison operators returns a boolean variable: # + a = 10.0 b = 9.9 # Check if a is less than b. print("Is a less than b?") print(a < b) # Check if a is more than b. print("Is a greater than b?") print() # - # ##### Note: We can print words by placing them between quotation marks "......". # The collection of words between the marks is called a *string*. # # A string is a type of *variable*. We will learn about other types of variable shortly. # # + [markdown] slideshow={"slide_type": "slide"} # __Complete the cell in your textbook by writing the correct comparison operator in each set of empty brackets.__ # + a = 14 b = -9 c = 14 # Check if a is equal to b print("Is a equal to b?") print() # Check if a is equal to c print("Is a equal to c?") print() # Check if a is not equal to c print("Is a not equal to c?") print() # Check if a is less than or equal to b print("Is a less than or equal to b?") print() # Check if a is less than or equal to c print("Is a less than or equal to c?") print() # Check if two colours are the same colour0 = 'blue' colour1 = 'green' print("Is colour0 the same as colour1?") print() # + [markdown] slideshow={"slide_type": "slide"} # ### Logical Operators # # The comparisons we have looked at so far consider two variables. # # *Logical operators*: # # > ```python # and # or # not # ``` # # allow us to make multiple comparisons at the same time. # # # # # + [markdown] slideshow={"slide_type": "slide"} # The code # ```python # X and Y # ``` # will evaluate to `True` if statement `X` *and* statement `Y` are both true. # # Otherwise will evaluate to `False`. # # # + [markdown] slideshow={"slide_type": "slide"} # The code # ```python # X or Y # ``` # will evaluate to `True` if statement `X` *or* statement `Y` is true. # # Otherwise will evaluate to `False`. # + [markdown] slideshow={"slide_type": "slide"} # __Examples:__ # # $10 < 9$ &nbsp; &nbsp;&nbsp; &nbsp; is false # # $15 < 20$ &nbsp; &nbsp; is true # + #print(10 < 9 and 15 < 20) # + #print(10 < 9 or 15 < 20) # + [markdown] slideshow={"slide_type": "slide"} # Guess the answer (`True` or `False`) by writing it on your whiteboard: # + # print(1 < 2 and 3 < 4) # + # print(1 < 2 or 4 < 3) # + # print(1 < 2 and 3 < 4) # + [markdown] slideshow={"slide_type": "slide"} # In Python, the 'not' operator negates a statement, e.g.: # - a = 12 b = 7 #print(a < b) #print(not a < b) # + [markdown] slideshow={"slide_type": "slide"} # In your textbook you will find an example of a simple computer program that uses comparison operators. # # Based on the current time of day, the program answers two questions: # # # >__Is it lunchtime?__ # # >`True` # # # if it is lunch time. # # >__Is it time for work?__ # # >`True` # # if it is within working hours. # # # # + slideshow={"slide_type": "slide"} time = 13.05 # current time work_starts = 8.00 # time work starts work_ends = 17.00 # time work ends lunch_starts = 13.00 # time lunch starts lunch_ends = 14.00 # time lunch ends # variable lunchtime is True or False lunchtime = time >= lunch_starts and time < lunch_ends # variable work_time is True or False work_time = time < work_starts or time >= work_ends print("Is it lunchtime?") print(lunchtime) print("Is it time for work?") print(work_time) # + [markdown] slideshow={"slide_type": "slide"} # Based on the current time of day, the program answers two questions: # # >__Is it lunchtime?__ # # >`True` # # # if it is lunch time. # # >__Is it time for work?__ # # >`True` # # if it is within working hours. # + [markdown] slideshow={"slide_type": "slide"} # You can see that if we change the time, the program output changes. # + [markdown] slideshow={"slide_type": "slide"} # __Try changing the value of variable `time`__ to a value that is: # - before work # - during work # - during lunchtime # - after work # # Each time you change the value of `time` re-run the cell to check if the answer is as you expect; lunchtime, work-time or neither. # + [markdown] slideshow={"slide_type": "slide"} # Note that the comparison operators (`>=`, `<=`, `<` and `>`) are evaluated before the Boolean operators (`and`, `or`). # # ### Operator Precedence # # > 1. Parentheses # 1. Exponents # 1. Multiplication, Division, Floor Division and Modulus (left to right) # 1. Addition and Subtraction (left to right) # 1. Comparison Operators (left to right) # 1. Boolean not # 1. Boolean and # 1. Boolean or # + a = 3 + 1 < 4 or 3 * 1 < 4 a = ((3 + 1) < 4) or ((3 * 1) < 4) # - # Both expressions show the same equation but the second is more __readable__. # + [markdown] slideshow={"slide_type": "slide"} # ## Types # All variables have a 'type', which indicates what the variable is, e.g. a number, a string of characters, etc. # # # + [markdown] slideshow={"slide_type": "slide"} # Type is important because it determines: # - how a variable is stored # - how it behaves when we perform operations on it # - how it interacts with other variables. # # e.g.multiplication of two real numbers is different from multiplication of two complex numbers. # + [markdown] slideshow={"slide_type": "slide"} # ### Introspection # # We can check a variable's type using *introspection*. # # To check the type of a variable we use the function `type`. # + x = True print(type(x)) a = "1.0" print(type(a)) # - # Complete the cell in your interactive textbook to find the `type` of `a` when it is written as shown below: # + a = 1 a = 1.0 # - # What is the first type? What is the second type? # # Did anyone get a different answer? # + [markdown] slideshow={"slide_type": "slide"} # Note that `a = 1` and `a = 1.0` are different types! # # - __bool__ means __Boolean__ variable. # - __str__ means __string__ variable. # - __int__ means __integer__ variable. # - __float__ means __floating point__ variable. # # This distinction is very important for numerical computations. # # We will look at the meaning of these different types next... # + [markdown] slideshow={"slide_type": "notes"} # Explain the importance of the position of print statements when augmenting variables. # + [markdown] slideshow={"slide_type": "slide"} # ### Booleans # # A type of variable that can take on one of two values - true or false. This is the simplest type. # + slideshow={"slide_type": "slide"} a = True b = False # test will = True if a or b = True test = a or b print(test) print(type(test)) # + [markdown] slideshow={"slide_type": "slide"} # ##### Note: We can use a single instance of the print function to display multiple pieces of information if we sperate them by commas. # # e.g. `print(item_1, item_2)` # # - print(test, type(test)) # + [markdown] slideshow={"slide_type": "slide"} # __Re-cap: what does a evaluate to? (`True` or `False`)__ # + slideshow={"slide_type": "-"} a = (5 < 6 or 7 > 8) #print(a) # + [markdown] slideshow={"slide_type": "slide"} # <a id='Strings'></a> # # ### Strings # # A string is a collection of characters. # # + [markdown] slideshow={"slide_type": "slide"} # A string is created by placing the characters between quotation marks. # # You may use single or double quotation marks; either is fine e.g. # # my_string = 'This is a string.' # # or # # my_string = "This is a string." # + [markdown] slideshow={"slide_type": "slide"} # __Example:__ Assign a string to a variable, display the string, and then check its type: # + my_string = "This is a string." print(my_string) print(type(my_string)) # + [markdown] slideshow={"slide_type": "slide"} # We can perform many different operations on strings. # # __Example__: Extract a *single* character as a new string: # # > *__NOTE:__ Python counts from 0.* # + my_string = "This is a string." # Store the 3rd character of `my_string` as a new variable s = my_string[2] # Print print(s) # Check type print(type(s)) # - # The number that describes the position of a character is called the *index*. # # What is the character at index 4? # # What is the index of character r? my_string = "This is a string." # + [markdown] slideshow={"slide_type": "notes"} # This shows that we count spaces as characters. # + [markdown] slideshow={"slide_type": "slide"} # __Try it yourself__. # # `my_string = "This is a string."` # # In the cell provided in your textbook: # # - store the 5th character as a new variable # - print the new variable # - check that it is a string # + # Store the 6th character as a new variable # Print the new variable # Check the type of the new variable # + [markdown] slideshow={"slide_type": "slide"} # We can extract a *range of* characters as a new string by specifiying the index to __start__ at and the index to __stop__ at: # # + slideshow={"slide_type": "slide"} my_string = "This is a string." # Store the first 6 characters s = my_string[0:6] # print print(s) # check type print(type(s)) # - # $$ # \underbrace{ # \underbrace{t}_{\text{0}} \ # \underbrace{h}_{\text{1}}\ # \underbrace{i}_{\text{2}}\ # \underbrace{s}_{\text{3}}\ # \underbrace{}_{\text{4}}\ # \underbrace{i}_{\text{5}}\ # }_{\text{s}} # \underbrace{s}_{\text{6}}\ # \underbrace{}_{\text{7}}\ # \underbrace{a}_{\text{8}}\ # \underbrace{}_{\text{9}}\ # \underbrace{s}_{\text{10}}\ # \underbrace{t}_{\text{11}}\ # \underbrace{r}_{\text{12}}\ # \underbrace{i}_{\text{13}}\ # \underbrace{n}_{\text{14}} \ # \underbrace{g}_{\text{15}} \ # \underbrace{.}_{\text{16}} \ # $$ # __Note:__ # - The space between the first and second word is counted as the 5th character. # - The "stop" value is not included in the range. # + slideshow={"slide_type": "slide"} # Store the last 4 characters and print s = my_string[-4:] print(s) # - # $$ # my\_string = # \underbrace{t}_{\text{-17}} \ # \underbrace{h}_{\text{-16}}\ # \underbrace{i}_{\text{-15}}\ # \underbrace{s}_{\text{-14}}\ # \underbrace{}_{\text{-13}}\ # \underbrace{i}_{\text{-12}}\ # \underbrace{s}_{\text{-11}}\ # \underbrace{}_{\text{-10}}\ # \underbrace{a}_{\text{-9}}\ # \underbrace{}_{\text{-8}}\ # \underbrace{s}_{\text{-7}}\ # \underbrace{t}_{\text{-6}}\ # \underbrace{r}_{\text{-5}}\ # \underbrace{ # \underbrace{i}_{\text{-4}}\ # \underbrace{n}_{\text{-3}} \ # \underbrace{g}_{\text{-2}} \ # \underbrace{.}_{\text{-1}} \ # }_{\text{s}} # $$ # __Note:__ # - The second value in this range is empty. # - This means the range ends at the end of the string. # __Try it yourself.__ # # In the cell provided in your textbook: # # - store the last 6 characters # - print your new variable # # # + slideshow={"slide_type": "skip"} # Store the last 6 characters as a new variable # Print the new varaible # - # In the next cell provided: # - store 6 characters, starting with the 2nd character; "his is" # - print your new variable # + # Store 6 characters, starting with "h" # Print the new varaible # - # Is there an alternative way of extracting the same string? # + [markdown] slideshow={"slide_type": "slide"} # __Example:__ Add strings together. # + start = "Py" end = "thon" word = start + end print(word) # + [markdown] slideshow={"slide_type": "slide"} # __Example:__ Add a section of a string to a section of another string: # + start = "Pythagorus" end = "marathon" word = start[:2] + end[-4:] print(word) # - # __Note__: We can use a blank space __or__ a 0 to index the first character; either is OK. # + [markdown] slideshow={"slide_type": "slide"} # __Try it yourself:__ # In the cell in your textbook add the variables `start` and `end` to make a sentence. # + start = "<NAME>" end = "Hemma" # Add start and end to make a new variable and print it # + [markdown] slideshow={"slide_type": "slide"} # Notice that we need to add a space to seperate the words "is" and "Hemma". # We do this using a pair of quotation marks, seperated by a space. # - sentence = start + " " + end #print(sentence) # + [markdown] slideshow={"slide_type": "slide"} # ### Numeric types # # Numeric types are particlarly important when solving scientific and engineering problems. # # # + [markdown] slideshow={"slide_type": "slide"} # Python 3 has three numerical types: # # - integers (`int`) # - floating point numbers (`float`) # - complex numbers (`complex`) # # __Integers:__Whole numbers. <br> # __Floating point:__Numbers with a decimal place.<br> # __Complex numbers:__Numbers with a real and imaginary part.<br> # + [markdown] slideshow={"slide_type": "slide"} # Python determines the type of a number from the way we input it. # # e.g. It will decide that a number is an `int` if we assign a number with no decimal place: # # # # # + [markdown] slideshow={"slide_type": "slide"} # __Try it for yourself__ # # In the cell provided in your textbook: # # - Create a variable with the value 3.1 # - Print the variable type # - Create a variable with the value 2 # - Print the variable type # + # Create a variable with the value 3.1 # Print the variable type # Create a variable with the value 2 # Print the variable type # + [markdown] slideshow={"slide_type": "slide"} # What type is the first variable? # # What type is the second variable? # - # __How could you re-write the number 2 so that Python makes it a float?__ # # Try changing the way 2 is written and run the cell again to check that the variable type has changed. # + [markdown] slideshow={"slide_type": "slide"} # ### Integers # # - Integers (`int`) are whole numbers. # - They can be postive or negative. # - Integers should be used when a value can only take on a whole number <br> e.g. the year, or the number of students following this course. # + [markdown] slideshow={"slide_type": "slide"} # ### Floating point # # Most engineering calculations involve numbers that cannot be represented as integers. # # Numbers that have a decimal point are automatically stored using the `float` type. # # A number is automatically classed as a float: # - if it has a decimal point # - if it is written using scientific notation (i.e. using e or E - either is fine) # # # + [markdown] slideshow={"slide_type": "slide"} # <a id='ScientificNotation'></a> # #### Scientific Notation # # In scientific notation, the letter e (or E) symbolises the power of ten in the exponent. # # For example: # # $$ # 10.45e2 = 10.45 \times 10^{2} = 1045 # $$ # + [markdown] slideshow={"slide_type": "slide"} # Examples using scientific notation. # + a = 2e0 print(a, type(a)) b = 2e3 print(b) c = 2.1E3 print(c) # + [markdown] slideshow={"slide_type": "slide"} # __Try it yourself__ # # In the cell provided in your textbook: # # - create a floating point variable for each number shown using scientific notation. # - print each variable to check it matches the number given in the comment. # + # Create a variable with value 62 # Print the variable # Create a variable with value 35,000 # Print the variable # Are there any other ways you could have expressed this? # - # What alternative ways can be used to express 35,000? # + [markdown] slideshow={"slide_type": "slide"} # ### Complex numbers # # Complex numbers have real and imaginary parts. # # We can declare a complex number in Python by adding `j` or `J` after the complex part of the number: # # &nbsp; &nbsp; __Standard mathematical notation.__ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;__Python notation__ # # &nbsp; &nbsp; &nbsp; &nbsp; # $ a = \underbrace{3}_{\text{real part}} + \underbrace{4j}_{\text{imaginary part}} $ &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; # `a = 3 + 4j` &nbsp; &nbsp; __or__ &nbsp; &nbsp; `a = 3 + 4J` # - b = 4 - 3j print(b, type(b)) # + [markdown] slideshow={"slide_type": "slide"} # <a id='Casting'></a> # # ## Type Conversions (Casting) # # We often want to change between types. # # Sometimes we need to make sure two variables have the same type in order to perform an operation on them. # # Sometimes we recieve data of a type that is not directly usable by the program. # # This is called *type conversion* or *type casting*. # # + [markdown] slideshow={"slide_type": "slide"} # ### Automatic Type Conversion # # If we add two integers, the results will be an integer: # + a = 4 # int b = 15 # int c = a + b #print(c, type(c)) # + [markdown] slideshow={"slide_type": "slide"} # However, if we add an int and a float, the result will be a float: # + a = 4 # int b = 15.0 # float c = a + b #print(c, type(c)) # + [markdown] slideshow={"slide_type": "slide"} # If we divide two integers, the result will be a `float`: # + a = 16 # int b = 4 # int c = a/b #print(c, type(c)) # + [markdown] slideshow={"slide_type": "slide"} # When dividing two integers with floor division (or 'integer division') using `//`, the result will be an `int` e.g. # + a = 16 # int b = 3 # int c = a//b #print(c, type(c)) # + [markdown] slideshow={"slide_type": "slide"} # In general: # - operations that mix an `int` and `float` will generate a `float`. # - operations that mix an `int` or a `float` with `complex` will generate a `complex` type. # # # If in doubt, use `type` to check. # + [markdown] slideshow={"slide_type": "slide"} # ### Explicit Type Conversion # # We can explicitly change (or *cast*) the type. # # To cast variable a as a different type, write the name of the type, followed by the variable to convert in brackets. # # __Example: Cast from an int to a float:__ # + a = 1 #a = float(a) print(a, type(a)) # + slideshow={"slide_type": "slide"} # If we use a new variable name the original value is unchanged. b = float(a) #print(a, type(a)) #print(b, type(b)) # + # If we use the orignal name, the variable is updated. a = float(a) #print(a, type(a)) # + [markdown] slideshow={"slide_type": "slide"} # __Try it yourself.__ # # In the cell provided: # # - cast variable `a` from a float back to an int. # - print variable `a` and its type to check your answer # + # cast a as an int # print a and its type # + [markdown] slideshow={"slide_type": "slide"} # ##### Note: Take care when casting as the value of the variable may change as well as the type. # # To demonstrate this we will complete a short exercise together... # # # + [markdown] slideshow={"slide_type": "slide"} # In the cell provided in your textbook: # 1. cast `i` as an `int` and print `i`. # 1. cast `i` back to a `float` and print `i`. # + i = 1.3 # float print(i, type(i)) # cast i as an int and print it # cast i back to a float and print it # + [markdown] slideshow={"slide_type": "slide"} # What has happened to the original value of `i`? # # Note that rounding is applied when converting from a `float` to an `int`; the values after the decimal point are discarded. # # This type of rounding is called 'round towards zero' or 'truncation'. # + [markdown] slideshow={"slide_type": "slide"} # A common task is converting numerical types to-and-from strings. # # Examples: # - Reading a number from a file where it appears as as a string # - User input might be given as a string. # # __Example: Cast from a float to a string:__ # # + a = 1.023 b = str(a) #print(b, type(b)) # + [markdown] slideshow={"slide_type": "slide"} # __Example: Cast from a string to a float:__ # # It is important to cast string numbers as either `int`s or `float`s for them to perform correctly in algebraic expressions. # # Consider the example below: # + a = "15.07" b = "18.07" print("As string numbers:") print("15.07 + 18.07 = ", a + b) print("When cast from string to float:") print("15.07 + 18.07 = ", float(a) + float(b)) # - # Note from the cell above that numbers expressed as strings can be cast as floats *within* algebraic expressions. # + [markdown] slideshow={"slide_type": "slide"} # Only numerical values can be cast as numerical types. # e.g. Trying to cast the string `four` as an integer causes an error: # - f = float("four") # + [markdown] slideshow={"slide_type": "slide"} # __Complete the review exercises in your textbook.__ # # We will stop 10 minutes before the end of the seminar to: # # - update your online git repository # - summarise what we have learnt today # # # + [markdown] slideshow={"slide_type": "slide"} # ## Review Exercises # Here are a series of short engineering problems for you to practise each of the new Python skills that you have learnt today. # # + [markdown] slideshow={"slide_type": "subslide"} # ### Review Excercise: Gravitational Potential # The gravitational potential, $V$, of a particle of mass $m$ at a distance $r$ from a body of mass $M$, is: # # $$ # V = \frac{G M m}{r} # $$ # # In the cell below, solve for $V$ when: # # $G$ = *gravitational constant* = 6.674 \times 10^{-11}$ Nm$^{2}$kg$^{-2}$. # # $M = 1.65 \times 10^{12}$kg # # $m = 6.1 \times 10^2$kg # # $r = 7.0 \times 10^3$ m # # Assign variables for $G, M, m$ and $r$ before solving. # <br>Express the numbers using __scientific notation__. # # <a href='#ScientificNotation'>Jump to Scientific Notation</a> # + # Gravitational Potential # + [markdown] slideshow={"slide_type": "subslide"} # ### Review Exercise: Fahrenheit to Celsius # # Degrees Fahrenheit ($T_F$) are converted to degrees Celsius ($T_c$) using the formula: # # $$ # T_c = 5(T_f - 32)/9 # $$ # # In the cell below, write a program to convert 78 degrees Fahrenheit to degrees Celsius and print the result. # # Write your program such that you can easily change the input temperature in Fahrenheit and re-calculate the answer. # + # Convert degrees Fahrenheit to degrees Celsius # + [markdown] slideshow={"slide_type": "subslide"} # ### Review Exercise: Volume of a Cone # The volume of a cone is: # $$ # V = \frac{1}{3}(base \ area)\times(perpendicular \ height) # $$ # # ![title](img/cone.png) # # In the cell below, find the internal volume of a cone of internal dimensions: # # base radius, $r = 5cm$ # # perpendicular height, $h = 15cm$ # # Assign variables for $r$ and $h$ before solving. # + pi = 3.142 # Internal volume # + [markdown] slideshow={"slide_type": "subslide"} # The cone is held upside down and filled with liquid. # # The liquid is then transferred to a hollow cylinder. # # Base radius of cylinder, $r_c = 4cm$. # # <img src="img/cone-cyl.gif" alt="Drawing" style="width: 200px;"/> # # The volume of liquid in the cylinder is: # # $V = (base \ area)\times(height \ of \ liquid)$ # # # In the cell below, find the height of the liquid in the cylinder? # # Assign a variables for $r_c$ before solving. # - # H = height of liquid in the cylinder # The total height of the cyclinder, $H_{tot}$ is 10cm. # # In the cell below, use a __comparison operator__ to show if the height of the liquid, $H$, is more than half the total height of the cylinder. # # <a href='#ComparisonOperators'>Jump to Comparison Operators</a> # + # Is the height of the liquid more than half the total height of the cylinder? # - # Lastly, go back and change the radius of the __cone__ to 2.5cm. # # Re-run the cells to observe how you can quickly re-run calculations using different initial values. # + [markdown] slideshow={"slide_type": "subslide"} # ### Review Exercise: Manipulating Strings # <a href='#Strings'>Jump to Strings</a> # # __(A)__ # In the cell below, print a new string whose: # - first 3 letters are the last 3 letters of `a` # - last 3 letters are the first 3 letters of `b` # - a = "orangutans" b = "werewolves" # __(B)__ # # In the cell below, use `c` to make a new string that says: `programming`. c = "programme" # __(C)__ # # In the cell below, __cast__ `d` and `e` as a different type so that: # # `f` = (numerical value of `d`) + (numerical value of `e`) # $$ # # using standard arithmetic. # # <a href='#Casting'>Jump to Type Conversion (Casting)'</a> # # Print `f` d = "3.12" e = "7.41" # Use __shortcut notation__ to update the value of `f`. # The new value of f should equal the __remainder (or modulus)__ when f is divided by 3. # # <a href='#Shortcuts'>Jump to Shortcuts</a> # + # What is the remainder (modulus) when f is divided by 3 # - # In the cell below, change the type of the variable f to ab integer. # # <a href='#Casting'>Jump to Type Conversion (Casting)'</a> # + # f expressed as an integer # + [markdown] slideshow={"slide_type": "slide"} # # Summary # # - We can perform simple *arithmetic operations* in Python (+, -, $\times$, $\div$.....) # - We can *assign* values to variables. # - Expressions containing multiple operators obey precedence when executing operations. # # # # + [markdown] slideshow={"slide_type": "slide"} # # Summary # - *Comparison operators* (==, !=, <, >....) compare two variables. # - The outcome of a comparison is a *Boolean* (True or False) value. # - *Logical operators* (`and`, `or`) compares the outcomes of two comparison operations. # - The outcome of a logical operation is a *Boolean* (True or False) value. # - The logical `not` operator returns the inverse Boolean value of a comparison. # + [markdown] slideshow={"slide_type": "slide"} # - Every variable has a type (`int`, `float`, `string`....). # - A type is automatically assigned when a variable is created. # - Python's `type()` function can be used to determine the type of a variable. # - The data type of a variable can be converted by casting (`int()`, `float()`....) # + [markdown] slideshow={"slide_type": "slide"} # # Homework # # 1. __CLONE__ your online GitHub repository to your personal computer (if you have not done so in-class today). # <br>(Refer to supplementary material: __S1_Introduction_to_Version_Control.ipynb__, Creating a Local Repository on your Personal Computer) # 1. __COMPLETE__ any unfinished Review Exercises. # 1. __PUSH__ the changes you make at home to your online repository # <br>(Refer to supplementary material: __S1_Introduction_to_Version_Control.ipynb__, Syncronising Repositories, Pushing Changes to an Online Repository # ). # - # # Next Seminar # # If possible, please bring your personal computer to class. # # We are going to complete an excercise: __pulling changes made in-class to the local repository on your personal computer.__ # # If you cannot bring your personal computer with you, you can practise using a laptop provided in class, but you wil need to repeat the steps at home in your own time. # + [markdown] slideshow={"slide_type": "slide"} # # Updating your Git repository # # You have made several changes to your interactive textbook. # # The final thing we are going to do is add these changes to your online repository so that: # - I can check your progress # - You can access the changes from outside of the university server. # # > Save your work. # > <br>`git add -A` # > <br>`git commit -m "A short message describing changes"` # > <br>`git push origin master` # # <br>Refer to supplementary material: __S1_Introduction_to_Version_Control.ipynb__ # - # # Updating your Git repository from home. # # We will finish by learning how to: # - Create a local repository on your personal computer, containing the chnages you have made today. # - Add the changes you make at home (homework: review exercises) to the online Git Repository. # # In Jupyter notebook, open: __S1_Introduction_to_Version_Control__. # # Navigate to section: __Creating a Local Repository on your Personal Computer.__
1_Data_types_and_operators.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline data = pd.read_csv('Local_Area_Unemployment_Statistics__Beginning_1976.csv') data[:10] dates = data.apply(lambda x: pd.Timestamp('%d-%d-01' % (x.Year, x.Month)), axis=1) d = pd.concat((data,pd.DataFrame({'date':dates})), axis=1) # Plot NY State Labor Labor Force, Employed, and Unemployed ny = d[d.Area == 'New York State'] ny = ny.sort_values(by='date') plt.plot(ny.date, ny['Labor Force'], label='Labor Force') plt.plot(ny.date, ny['Employed'], label='Employed') plt.plot(ny.date, ny['Unemployed'], label='Unemployed') plt.legend(loc='best') plt.xlabel('date') plt.ylabel('Number of People')
Section 2/Video 2.2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- from science import * temperature_data=pandas.read_csv('temperatures.txt',sep='\s*') # cleaned version sunspot_data=pandas.read_csv('spot_num.txt',sep='\s*') # + x=temperature_data['Year'] y=temperature_data['J-D']/100.0 plot(x,y,'-o') ylabel('Temperature Anomaly') xlabel('Year') # - x=sunspot_data['YEAR']+(sunspot_data['MON']-1)/12.0 y=sunspot_data['SSN'] plot(x,y,'-o') xlabel('year') ylabel('sunspot number') # + ax1=gca() x=temperature_data['Year'] y=temperature_data['J-D']/100.0 plot(x,y,'-o') ylabel('Temperature') x=sunspot_data['YEAR']+(sunspot_data['MON']-1)/12.0 y=sunspot_data['SSN'] y2=pandas.rolling_mean(y,150) ax2 = gca().twinx() plot(x,y2,'r-') ylabel('Sunspot Number') ax2.set_ylim([20,120]) ax1.set_xlim([1880,2013]) ax2.set_xlim([1880,2013]) # + # pandas.rolling_mean? # -
examples/Temperature and Sunspots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Python Fundamentals Workshop # ***Part 1 & 2*** # <img src="https://www.python.org/static/community_logos/python-logo-master-v3-TM.png" title="Python Logo"/> # The main source of materials is the [official wiki page of Python](https://wiki.python.org/moin/BeginnersGuide/Programmers) (subsequently [this tutorial](https://python.land/python-tutorial)) and [Python cheatsheet](https://www.pythoncheatsheet.org/). # # The Zen of Python # Try `import this` to learn about some advocated principles by Python's gurus. import this # --- # # Commenting in Python # You can insert your comments in Python which will not be interpreted by it at all. # Why? # - Document what you are doing # - Debug # - Reminders: use `# TODO add another function here` # - Etc. # + # This is a single line comment """ This is a longer multiline comment""" # Or maybe # This multiline # comment print(""" This is a longer multiline comment""") # - # --- # # Simple Operators # We have seven simple operators in Python: # # | # | Operator | Semantics | Example | # | --- | --- | --- | --- | # | 1 | `**` | Exponent | `2 ** 3 = 8` | # | 2 | `%` | Modulus/Remainder | `22 % 8 = 6` | # | 3 | `//` | Integer division | `22 // 8 = 2` | # | 4 | `/` | Division | `22 / 8 = 2.75` | # | 5 | `*` | Multiplication | `3 * 3 = 9` | # | 6 | `-` | Subtraction | `5 - 2 = 3` | # | 7 | `+` | Addition | `2 + 2 = 4` | # | - | `=` | Assignment | - | # # **What do these `#` numbers mean though?** Let's try to evaluate the following **expressions**: (6 + 6) / 2 2 * 3 * 2 / 2 + 2 2 * 3 + 2 * 3 2 * 3 + 2 ** 2 * 3 2 * 3 + 2 ** (2 * 3) # **Operators mean different things with different data types, or might not work!!** "Hello python!" * 3 # So it's *absolutely important* to know your data types. Thus, let's learn more about `variables` in Python! # Assignment can be augmented thus: # # |Operator|Equivalent| # |---|---| # |var += 1 | var = var + 1| # |var -= 1 | var = var - 1| # |var *= 1 | var = var * 1| # |var /= 1 | var = var / 1| # |var %= 1 | var = var % 1| 4 % 2 5 % 2 var = 10 var += 5 var var %= 3 var # --- # # Variables # Variables are significant quantities that you want to save and use **later** in your code. Typical example would be **calculations** you make. # # Every variable in python must have **a name**. Names must be: # - Starting with a letter, or an underscore `_`! We cannot start a variable with a number like `1_best_var_ever` # - Containing only letters, numbers and `_`, so **one word** # → **Additional encouraged rules**: # - Never name a variable with a reserved word (coloured usually)! like `print` or `def` # - Sometimes you will get a syntax error, sometimes you'd have very bad results! # - Always use meaningful names, even if long # - Variable names are case sensetive! So better use lower case always as a general rule, however: # - Follow the naming and coding conventions of Python (found in `PEP 8` guide) from the start to get used to it. See them in [PEP 8 -- Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/). var = 4 print(var) my beautiful var = 5 my_beautiful_var = 5 print(my_beautiful_var) _var = 4 print(_var) 1st_var = 4 _var = 4 def = 4 # ## Types of Simple Variables in Python # We don't have to specify the types as we see above! However, we need to understand them and identify them. # # We have booleans (`bool`), integers (`int`), floats (`float`) and strings (`str`). We can test for their types using `type(.)`. var = True type(var) var = False type(var) var = 5 type(var) var = -5.5 type(var) var = "hello" type(var) var = "4" type(var) var * 3 var = '4' type(var) # Double or single quotes is your choice, but Python prefers `'` over `"`. # # Let's try to define a var with the value: `I'm happy`: var = 'I'm happy' var var = "I'm happy" var var = "I like 'AMD'" var var = 'I\'m happy' var # ### Knowing your variables types makes a lot of difference # Especially if you are reading data from text files or CSV files! 4 + 6 "4" + "6" 5 * 3 "5" * 3 5 * "3" 2 ** 3 2 ** "3" # Note the difference in how the output is formulated, which helps you debug and develop your code: type(2) type("2") type('2') type(2.0) # ### Moving between variable types # Sometimes we want to convert a variable from type to type. # When you want to convert a variable from string to integer, you can use the data type keyword as function: var = 5.0 var str(var) str(int(var)) var = "6" float(var) var = "-6" int(var) float(var) var = "-6.787232" float(var) int(var) int(float(var)) var = "6.x" int(var) var = 5 > 4 type(var) var int(var) int(False) var = True + 4 print(var) type(var) var = 0 bool(0) var = 1 bool(1) var = 3 bool(var) var = 3.4 bool(var) var = 0 bool(0) bool(-1) var = None type(var) bool(var) # **So:** # - Anything greater other than zero evaluates to `True` boolean value in Python # - Zeros and `None` evaluate to `False`. `None` **is when our variable is defined, but has no value in it**, i.e. not initialised → very useful check! # - Any comparison is a boolean in itself → important to understand # ### String variables are cool # String variables in Python have many useful functions and perks. # - They can span multiple lines # - They can include special characters (indicators of special cases), like: # - `\n` the new line special character # - `\t` the tab charachter # - `\` the escape character if preceds a special case, to deactivate special cases of what follows # - They have two useful kinds: raw strings and formatted strings var = """Here is my first line, And my second, and what about a third one?! why not!""" print(var) var = "Here is line1,\nAnd line 2,\nAnd three!" print(var) var var = """Name:\tRafi Origin:\tEarth""" print(var) var = """Name:\tRafi Origin:\\tEarth""" print(var) # As you see, we can deactivate the special functionality of `\` manually. This is sometimes tedious, like when defining filepaths on Windows: file_path = "D:\\repo\\python_fundamentals_workshop\\hello.txt" print(file_path) # That's when you use raw strings! Just an `r` before starting the string: file_path = r"D:\repo\python_fundamentals_workshop\hello.txt" print(file_path) # Finally, there are a lot of things to apply on string variables. Try to press `.` at the end of a string variable and check them up, or maybe see [this resource](https://www.w3schools.com/python/python_ref_string.asp). var = " This Is My String " var var.lower() var.upper() var.strip() var.strip().lower() # #### Formatting Strings using other Variables # There are many ways to print variable values within strings. Let's assume we want to print a greeting to someone. # We will use `input` to get the user's name from them. your_name = input("What is your name?") print("Welcome", your_name, "to our workshop!") # A neater and more efficient way is to use formatted strings! Like raw strings, you just put an `f` for formatted before the string: print(f"Welcome {your_name}!") print(f"Welcome {your_name*3}!") # --- # # More complex variables # What if we want a group of vairable that are linked to each other? # # Your **address** is one such kind of data: you have a post code, a city, a street and a house number. Till now, we would define it as: postcode = "A45 3SA" street = "Victoria road" house_number = 33 city = "Birmingham" # A bit messy. However, we can group these in a more complex data types: # - Tuple # - List # - Set # - Dictionary # # When to choose each one though? # # | Data Type | Allows Duplicates | Ordered | Can change its contents (mutable) | # | --- | --- | --- | --- | # | Tuple | Yes | Yes | No| # | List | Yes | Yes | Yes| # | Set | No| No| "Yes"| # # Dictionary is more specific: you store `key:value` pairs inside. # Keys must be a basic data type or something that can be used as key because **it doesn't change its internal contents**: like `int`, `str` or maybe `tuple` (In other words, keys **must be immutable**). # # Getting back to our address example, which one to choose? # - My address won't change # - The order matters # - Duplicates are OK # # → A Tuple! # Also: a tuple is faster and more memory efficient than lists, and it has fixed number of members. # ► *Do you have examples of when to use lists or sets?* # ## Tuples: define them with brackets `(` `)` address = (house_number, street, city, postcode) address address[2] # We can extract tuple's data in one shot! nbr, strt, cty, pcode = address print(f"""{nbr} {strt} {cty} {pcode}""") address[2] = "London" address.append("Earth") type(address) # ## Lists: define them with square brackets `[` `]` queue = ["Tom", "John", "Peter", "Luke", "Sam"] queue queue[3] queue[5] queue[-1] queue[-3] # Slicing with lists queue[:] queue[1:3] queue[2:] queue[:3] queue[2,3,4] # We can change the contents of the queue because it is mutable! queue.append("Aaron") queue # Let's imagine we have a new line of people at the door new_people = ["Bridget", "Valerie", "Mary"] queue.append(new_people) queue queue.append("Mary").append("Valerie") queue queue = ["Tom", "John", "Peter", "Luke", "Sam"] queue.extend(new_people) queue # Can use basic operations on lists queue = ["Tom", "John", "Peter", "Luke", "Sam"] queue + new_people # Since order matters, we want to be able to insert at specific location sometimes. We can then use `.insert()`: queue.insert(3, "Felicity") queue # Or even sort things queue.sort() queue queue.sort(reverse=True) queue # Or even remove things queue.remove("Peter") queue queue.remove("Dan") # **We can have multi-dimensional lists!** that is by *nesting* lists: matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] matrix matrix[1, 2] matrix[1][3] matrix[1][2] matrix[0].sort(reverse=True) matrix matrix[2][2] # # Sets: define them with `set([])` or with `{` `}` if not empty # The main feautre: **no duplicates**! We can perform set operations on sets, like union and intersection, which is very handy! Memberships are typically modelled with sets. # # Note: a set is **unordered**! so no indexes or order here husband_bag = {"apples", "orages", "eggs"} husband_bag husband_bag = set(["apples", "oranges", "eggs"]) husband_bag husband_bag.add("chocolate") husband_bag husband_bag.add("eggs") husband_bag husband_bag.update(["PS4", "Castle Vania", "apples"]) husband_bag husband_bag[0] wife_bag = {"milk", "meat", "eggs", "chocolate", "chocolate", "chocolate"} wife_bag # Let's find particular items of both of them: wife_bag.difference(husband_bag) husband_bag.difference(wife_bag) wife_bag.symmetric_difference(husband_bag) wife_bag ^ husband_bag # Has any of the two bought the same thing twice? let's avoid an argument and find out! wife_bag.intersection(husband_bag) # + husband_bag.remove("eggs") if len(wife_bag.intersection(husband_bag)) == 0: print("We're good now!") # - husband_bag.remove("eggs") husband_bag.add("eggs") husband_bag.discard("eggs") husband_bag husband_bag.discard("eggs") husband_bag # Let's have an overview of what the couple has bought: husband_bag.union(wife_bag) # + couples_bag = husband_bag.union(wife_bag) print(f"The couple bought {len(couples_bag)} items") # - # # Dictionaries: define them with `{` `}` # Very common and power data structure, wherever you have associative relations between a "key" and a "value": key → value(s). # Typical example: **telephone directory**! # Let's make one for BT, wher we have Tom, Mary and Rebecca as customers. bt_phone_book = {} print(bt_phone_book) print(type(bt_phone_book)) # Since its type is dict, there is a `dict()` function which we can use too, like other datatypes: bt_phone_book = dict([]) bt_phone_book # Since we know the keys, we can use `dict.fromkeys`: customers = ['Tom', 'Mary', 'Rebecca'] bt_phone_book = dict.fromkeys(customers, "0000-000-0000") bt_phone_book bt_phone_book = {"Tom": '013253434', "Mary": '734246344', "Rebecca": '533159372'} bt_phone_book # ## Dictionary Keys and Values or Items print("Keys:", bt_phone_book.keys()) print("Values:", bt_phone_book.values()) bt_phone_book["Tom"] bt_phone_book.get("Tom") bt_phone_book["Sam"] bt_phone_book.get("Sam") bt_phone_book["Sam"] = '72453' bt_phone_book del(bt_phone_book["Sam"]) bt_phone_book bt_phone_book.items() # What if a person has many phone numbers?? # Let's make a better phone book bt_phone_book = {"Tom": '013253434', "Mary": '734246344', "Rebecca": '533159372'} # Let's add a number to Rebecca: new_customers = {"Sam": [143423, 634345], "Dan": 632323} bt_phone_book.update(new_customers) bt_phone_book new_data = {"Sam": None} bt_phone_book.update(new_data) bt_phone_book # ### The `isinstance()` function # + my_list = [2, 6, "Tom", "John", 8, True] my_tupe = ("0044", "1521", "759123") # Feel free to craft your own even more complex data types to suit your needs! my_dict = {"country1": {"city1": ["street1", "street2"], "city2": "street3"}, "country2": {"city3": "street4"}} # - type(my_list) type(my_tupe) type(my_dict) type(my_list) == list type(my_tupe) == tuple # Safer and should be used always for headache-free execution (subtypes and OOP...) isinstance(my_list, list) isinstance(my_dict, tuple) isinstance("3", int) # ### The `len()` function len(my_list) len(my_tupe) my_dict len(my_dict) len(my_dict["country2"]) # Will you be able to pull this off? len(my_dict["country1"]["city2"]) # ### The `in` operator queue = ["Sam", "Tom", "Paul"] "Tom" in queue "Tom" not in queue bt_phone_book = {"Tom": '013253434', "Mary": '734246344', "Rebecca": '533159372'} "Mary" in bt_phone_book "Mary" in bt_phone_book.values() # --- # # Conditionals and comparisons # We have six comparison operators in Python: # |Operator|Meaning| # |---|---| # |>| greater than| # |<| smaller than| # |>=| greater than or equal to| # |<=| smaller than or equal to| # |**==**| is equal| # |!=| is not equal| # **Each comparison results in a boolean True or False, so it gives us a boolean values**. # # We use `if`, `else` and `elif` for complex comparison logic. **indentation is key to know which code lines belong under which condition expression!** var = 0 if var > 0: print(f"value {var} is positive!") var2 = "Yes!" elif var < 0: print(f"value {var} is negative!") var2 = "No!" else: print(f"value {var} is zero.") var2 = "Meh" print(var2, ".") if 2 < 3 < 4 < 5: print("School is good") else: print("They lied to us!") if 'a' >= 'z': print("hello") elif 'a' < 'z': print("bye") # + var = 5 if var = 0: print(f"This var is absolutely zero, and it's value is {var}") elif var != 0: print(f"{var} is not zilch at the end.") # - if False == 1: print("hello") elif False != 1: print("bye") # ## Taking it to the next level: `or`, `and` and `not` # `or`, `not` and `and` can be used to make complex comparisons like this: check = (2 + 2 == 4) and (5 * 5 == 25) and not (5 + 2 == 7) if check: print("hello") else: print("bye") raining = True windy = False warm = True play_outside = None # + if warm or not raining: play_outside = True elif raining and windy: play_outside = False elif warm and not windy: play_outside = True print(f"Play outside: {play_outside}") # - # What if a variable is unknown? # + var1 = None var2 = None if var1 == var2: print("zero") elif var1 != var2: print("not zero") # - # However, it is **always recommended** to use `is` when comparing to `None` because `None` is "unknown"/"unassigned" if var1 is var2: print("zero") else: print("not zero") # --- # # Loops # Let's try to print the names of people standing at the till: queue = ["Tom", "John", "Peter", "Luke", "Sam"] # Print all elements print(queue[0]) print(queue[1]) print(queue[2]) print(queue[3]) print(queue[4]) # Imagine that we have 100 customers... # # For tasks than need repetition, like going over a list or data, we should use loops. Loops are dynamic and convenient. # # We have two kinds of loops in Python: # - `for` loops # - `while` loops for customer in queue: print(customer) # We can enumerate the loops, or number them and maybe use these numbers if they are meaningful to us, with `enumerate`: for turn, customer in enumerate(queue): print(f"{turn} - {customer}") for turn, customer in enumerate(queue): print(f"# {turn+1} - {customer}") # **For (and while) loops can iterate over *iterable* objects**, i.e. they need to have elements to return one at a time. # How about this? for letter in "ABCDEFG": print(letter) for number in 2376: print(element) for counter in range(6): print(counter) matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] matrix for r, element in enumerate(matrix): print(f"row {r}:{element}") for r, row in enumerate(matrix): for c, column in enumerate(row): print(f"row{r}, column{c}: {matrix[r][c]}") # If I don't know how many times to iterate, or my loop depends on a guarding condition like user input, `while` can be used: end = 6 counter = 0 while counter < end: print(counter) counter += 1 search = ["innocent", "innocent", "guilty", "innocent", "innocent"]*100 search i = 0 while i < len(search) and search[i] != "guilty": print(search[i]) i += 1 num = 0 while num != -1: num = input("Enter a number to get its exponential! If you want to quite, enter -1.\n► ") print(f"{num}^2 = {num**2}") print("Bye!") # ## `break` and `continue` # `break` and `continue` are very handful for finely managing loops and enhancing the efficiency (speed and resilence to errors for example). # # - `break`: breaks the current loop and gets out of it to continue executing **what comes after the loop** # - `continue`: skips the current iteration or the loop and goes to the next **iteration** if any search = ["guilty", "innocent", "innocent", "guilty", "innocent", "innocent"]*100 search # Find if the list contains an innocent: has_innocents = False iterations_run = 0 for case in search: iterations_run += 1 if case == "innocent": has_innocents = True print(f"Innocents found is {has_innocents} in {iterations_run} iterations.") # Find if the list contains an innocent: has_innocents = False iterations_run = 0 for case in search: iterations_run += 1 if case == "innocent": has_innocents = True break; print(f"Innocents found is {has_innocents} in {iterations_run} iterations.") data = ["Falk", "Kim", "Uli", None, "Dani", "Charlie", None] for name in data: print(f"{name} is {len(name)} characters long.") for name in data: if name is None: continue print(f"{name} is {len(name)} characters long.") # # List Comprehensions # It is an advanced "aesthetic" concept, so understand loops perfectly before using these. # + nums = [1, 2, 3] doubles = [] for num in nums: doubles.append(num*2) doubles # - doubles = [num*2 for num in nums] doubles # + nums = [1, 2, 3, 4] even_doubles = [] for num in nums: if num % 2 == 0: even_doubles.append(num*2) even_doubles # - even_doubles = [x*2 for x in nums if x%2 == 0] even_doubles # --- # # I/O # Let's look at a quich example to read a CSV file in a slightly different way. # # `Pandas` is amazing at doing so, but some I/O experience can come a long way. import os os.getcwd() file_path = r".\resources\titanic.csv" # + import csv from pprint import pprint file_handler = open(file_path, mode="r") interpret_data = csv.DictReader(file_handler) for line in interpret_data: pprint(line) # CRUCIAL!! file_handler.close() print("\nIs the file closed properly??", file_handler.closed) # - # It is crucial to close the file! Safer is to use a **context manager** like `with`. # # They manage the memory and resources for you and they take care of cleaning the mess after their context ends; context = a block of code. # + with open(file_path, mode='r') as file_handler: interpret_data = csv.DictReader(file_handler) for line in interpret_data: print(f"{line['Name']}:\t{line['Survived']}") print("\nIs the file closed properly??", file_handler.closed) # + with open(file_path, mode='r') as file_handler: interpret_data = csv.reader(file_handler) # How many people do we have? print(f"We have {len(list(interpret_data))} people on the Titanic") for line in interpret_data: print(f"{line['Name']}:\t{line['Survived']}") print("\nIs the file closed properly??", file_handler.closed) # - # --- # # Functions: the lego pieces at last! define then with `def` # You have all used them! `print()` is a function. # # A function has: # - a name # - some parameters if needed, can be mandatory or optional with default values # - a return value: always, even `None` # # These three elements are sometimes known as **signatures**. # Let's define a function that checks if a number is even? **note the indentation!!** def is_even(number): check = (number % 2 == 0) return(check) is_even(3) # we call the function with the *argument* 3 is_even(6) type(is_even(6)) even = is_even(6) even is_even() # You can give your parameters default values so they will be optional. def is_even(number = 0): check = (number % 2 == 0) return(check) is_even() # You can be explicit about the types of parameters. is_even("r") def is_even(number:int = 0): check = (number % 2 == 0) return(check) is_even("r") # Let's define a function that calculates the equation $f(x) = 5x + 3$ def find(x: int): y = 5 *x + 3 return y print(find(2)) print(find(5)) for x in range(3): print(f"x = {x} → y = {find(x)}") # ## Scope Caveats # + var = 0 #.. #.. def fun(): print(f"Hello from fun and var is {var}") fun() var # + var = 0 #.. #.. def fun(): var = 2 print(f"Hello from fun and var is {var}") fun() var # + var = 0 def fun(): global var var = 2 print(f"Hello from fun and var is {var}") fun() var # + var = 0 def fun(myvar): myvar = 2 print(f"Hello from fun and var is {myvar}") fun(myvar=var) var # + def fun(): var2 = 2 print(f"Hello from fun and var2 is {var2}") fun() print(var2) # - # Can we use a function inside a function?? # ---
python_fundamentals.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # `ApJdataFrames` Muzic2012 # --- # `Title`: Substellar Objects in Nearby Young Clusters (SONYC). V. New Brown Dwarfs in ρ Ophiuchi # `Authors`: <NAME>. # # Data is from this paper: # http://iopscience.iop.org/0004-637X/744/2/134/ # + # %pylab inline import seaborn as sns sns.set_context("notebook", font_scale=1.5) #import warnings #warnings.filterwarnings("ignore") # - import pandas as pd # ## Table 1 - Parameters of ρ Oph M-type Objects Observed with FMOS/Subaru and SINFONI/VLT addr = "http://iopscience.iop.org/0004-637X/744/2/134/suppdata/apj408590t1_ascii.txt" names = ["ID","RA", "DEC", "ins", "A_V_phot","A_V","T_eff","SpT","Ref","Name"] tbl1 = pd.read_csv(addr, sep='\t', skiprows=10, skipfooter=8, index_col=False, engine='python', na_values=r" ... ", names = names) tbl1.T_eff = tbl1.T_eff.str.replace(">or=", '') tbl1 # ##Table 2. Spectral Types Calculated from the SINFONI Spectra addr = "http://iopscience.iop.org/0004-637X/744/2/134/suppdata/apj408590t2_ascii.txt" names = ["ID","SpT_HPI", "SpT_Q", "SpT_H_2O"] tbl2 = pd.read_csv(addr, sep='\t', skiprows=8, skipfooter=5, index_col=False, engine='python', na_values=r" ... ", names = names) tbl2 # Need to parse these strings if I'm going to use this data. # ## Save data. # !mkdir ../data/Muzic2012 tbl1.to_csv("../data/Muzic2012/tbl1.csv", sep='\t', index=False) # *Script finished.*
notebooks/Muzic2012.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Saving Data # Once you request data, Hydrofunctions can automatically save the JSON in a compact zip file. The next time that you re-run your request, the data are retrieved automatically from the local file. Using a data cache like this saves on internet traffic, speeds up your code, and prevents spamming the NWIS just because you are making minor changes to your code. As an alternative to zipped JSON, Hydrofunctions also makes it easy to use [Parquet](https://parquet.apache.org/), a compact file format for storing large datasets. Parquet is efficient: file sizes are small and can be read quickly. Parquet is great for large datasets, because it is possible to access parts of the file without reading the entire file. # # To save your data, simply provide a filename as a parameter to the NWIS object. If you supply a .parquet file extension, Hydrofunctions will save a parquet file; otherwise it will supply a .json.gz extension and save it in that format. import hydrofunctions as hf new = hf.NWIS('01585200', 'dv', start_date='2018-01-01', end_date='2019-01-01', file='save_example.json.gz') new # ## Automatic file reading & writing # # The first time that you make the request, hydrofunctions will save the incoming data into a new file, and you will get a message, `Saving data to filename`. # # The second time that you make the request, hydrofunctions will read the data from the file instead of requesting it, and you will get a message, `Reading data from filename`. new = hf.NWIS('01585200', 'dv', start_date='2018-01-01', end_date='2019-01-01', file='save_example.json.gz') new # In effect, the local file will act as a cache for your data, reducing your network traffic. # # ## Manual file reading & writing # It is also possible to force hydrofunctions to read or write a file by using the `NWIS.read()` and `NWIS.save()` methods. new.save('save_example.parquet') new.read('save_example.parquet') new
docs/notebooks/Saving_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h2> Matrices: Tensor Product</h2> # # Tensor product is defined between any two matrices. The result is a new bigger matrix. # # Before giving its formal definition, we define it based on examples. # # We start with a simple case. # # <i>A vector is also a matrix. Therefore, tensor product can be defined between two vectors or between one vector and one matrix.</i> # <h3> Tensor product of two vectors </h3> # # We have two vectors: $ u = \myrvector{-2\\3} $ and $ v = \myrvector{1 \\ 2 \\ -3} $. # # The tensor product of $u$ and $ v $ is denoted by $ u \otimes v $. # # We may consider the tensor product as extending $ u $ by $ v $: # # $$ # u \otimes v = \myrvector{-2\\3} \otimes \myrvector{1 \\ 2 \\ -3} = # \myrvector{ -2 \cdot \myrvector{1 \\ 2 \\ -3} \\ 3 \cdot \myrvector{1 \\ 2 \\ -3} } = # \myrvector{ -2 \\ -4 \\ 6 \\ 3 \\ 6 \\ -9 }. # $$ # # Here, $ -2 $ in $ u $ is replaced with the vector $ (-2 \cdot v) $, and $ 3 $ in $ u $ is replaced with the vector $ 3 v $. # # Thus each entry of $ u $ is replaced by a 3-dimensional vector, and the dimension of the result vector is $ 6~(=2 \cdot 3) $. # # Algorithmically, each element in $ u $ is replaced by the multiplication of this element with the vector $ v $. # # Let's find $ v \otimes u $ in Python. # + # vector v v = [1,2,-3] # vector u u=[-2,3] vu = [] for i in range(len(v)): # each element of v will be replaced for j in range(len(u)): # the vector u will come here after multiplying with the entry there vu.append( v[i] * u[j] ) print("v=",v) print("u=",u) print("vu=",vu) # - # <h3> Task 1 </h3> # # Find $ u \otimes v $ and $ v \otimes u $ for the given vectors $ u = \myrvector{-2 \\ -1 \\ 0 \\ 1} $ and $ v = \myrvector{ 1 \\ 2 \\ 3 } $. # + # # your solution is here # u = [-2,-1,0,1] v = [1,2,3] uv = [] vu = [] for i in range(len(u)): # one element of u is picked for j in range(len(v)): # now we iteratively select every element of v uv.append(u[i]*v[j]) # this one element of u is iteratively multiplied with every element of v print("u-tensor-v is",uv) for i in range(len(v)): # one element of v is picked for j in range(len(u)): # now we iteratively select every element of u vu.append(v[i]*u[j]) # this one element of v is iteratively multiplied with every element of u print("v-tensor-u is",vu) # - # <h3> Note:</h3> # # Tensor products are useful when we have a system composed by two (or more) sub-systems. # # Any new entry after tensoring represents a pair of entries, each of which comes from one of the sub-sytems. # # We will see the usage of tensor products in the main tutorial. # <h3> Tensor product of two matrices </h3> # # The definition is the same. # # Let's find $ M \otimes N $ and $ N \otimes M $ for the given matrices # $ # M = \mymatrix{rrr}{-1 & 0 & 1 \\ -2 & -1 & 2 \\ 1 & 2 & -2} ~~\mbox{and}~~ # N = \mymatrix{rrr}{0 & 2 & 1 \\ 3 & -1 & -2 \\ -1 & 1 & 0}. # $ # # $ M \otimes N $: Each element of $ M $ will be replaced with the whole matrix $ N $ after multiplying with this element. # # $$ # M \otimes N = # \mymatrix{rrr}{-1 & 0 & 1 \\ -2 & -1 & 2 \\ 1 & 2 & -2} \otimes \mymatrix{rrr}{0 & 2 & 1 \\ 3 & -1 & -2 \\ -1 & 1 & 0} # = # \mymatrix{rrr}{ -1 \mymatrix{rrr}{0 & 2 & 1 \\ 3 & -1 & -2 \\ -1 & 1 & 0} & 0 \mymatrix{rrr}{0 & 2 & 1 \\ 3 & -1 & -2 \\ -1 & 1 & 0} & 1 \mymatrix{rrr}{0 & 2 & 1 \\ 3 & -1 & -2 \\ -1 & 1 & 0} \\ # -2 \mymatrix{rrr}{0 & 2 & 1 \\ 3 & -1 & -2 \\ -1 & 1 & 0} & -1 \mymatrix{rrr}{0 & 2 & 1 \\ 3 & -1 & -2 \\ -1 & 1 & 0} & 2 \mymatrix{rrr}{0 & 2 & 1 \\ 3 & -1 & -2 \\ -1 & 1 & 0} \\ # 1 \mymatrix{rrr}{0 & 2 & 1 \\ 3 & -1 & -2 \\ -1 & 1 & 0} & 2 \mymatrix{rrr}{0 & 2 & 1 \\ 3 & -1 & -2 \\ -1 & 1 & 0} & -2 \mymatrix{rrr}{0 & 2 & 1 \\ 3 & -1 & -2 \\ -1 & 1 & 0}} # $$ # # Calculating by hand looks a boring task because of many repetitions. # # We do this once by hand (in mind), and then check the result by Python. # $$ # M \otimes N = \mymatrix{rrrrrrrrr}{ # 0 & -2 & -1 & 0 & 0 & 0 & 0 & 2 & 1 \\ # -3 & 1 & 2 & 0 & 0 & 0 & 3 & -1 & -2 \\ # 1 & -1 & 0 & 0 & 0 & 0 & -1 & 1 & 0 \\ # 0 & -4 & -2 & 0 & -2 & -1 & 0 & 4 & 2 \\ # -6 & 2 & 4 & -3 & 1 & 2 & 6 & -2 & -4 \\ # 2 & -2 & 0 & 1 & -1 & 0 & -2 & 2 & 0 \\ # 0 & 2 & 1 & 0 & 4 & 2 & 0 & -4 & -2 \\ # 3 & -1 & -2 & 6 & -2 & -4 & -6 & 2 & 4 \\ # -1 & 1 & 0 & -2 & 2 & 0 & 2 & -2 & 0 # } # $$ # # Now, we find the same matrix in Python. # # This time we use four nested for-loops. # + # matrix M M = [ [-1,0,1], [-2,-1,2], [1,2,-2] ] # matrix N N = [ [0,2,1], [3,-1,-2], [-1,1,0] ] # MN will be a (9x9)-dimensional matrix # prepare it as a zero matrix # this helps us to easily fill it MN=[] for i in range(9): MN.append([]) for j in range(9): MN[i].append(0) for i in range(3): # row of M for j in range(3): # column of M for k in range(3): # row of N for l in range(3): # column of N MN[i*3+k][3*j+l] = M[i][j] * N[k][l] print("M-tensor-N is") for i in range(9): print(MN[i]) # - # We find $ N \otimes M $ in Python. # # We use the same code by interchanging $ N $ and $ M $. # + # matrices M and N were defined above # matrix NM will be prepared as a (9x9)-dimensional zero matrix NM=[] for i in range(9): NM.append([]) for j in range(9): NM[i].append(0) for i in range(3): # row of N for j in range(3): # column of N for k in range(3): # row of M for l in range(3): # column of M NM[i*3+k][3*j+l] = N[i][j] * M[k][l] print("N-tensor-M is") for i in range(9): print(NM[i]) # - # <h3> Task 2 </h3> # # Find $ A \otimes B $ for the given matrices # $ # A = \mymatrix{rrr}{-1 & 0 & 1 \\ -2 & -1 & 2} ~~\mbox{and}~~ # B = \mymatrix{rr}{0 & 2 \\ 3 & -1 \\ -1 & 1 }. # $ # + # # your solution is here # A = [ [-1,0,1], [-2,-1,2] ] B = [ [0,2], [3,-1], [-1,1] ] print("A =") for i in range(len(A)): print(A[i]) print() # print a line print("B =") for i in range(len(B)): print(B[i]) # let's define A-tensor-B as a (6x6)-dimensional zero matrix AB = [] for i in range(6): AB.append([]) for j in range(6): AB[i].append(0) # let's find A-tensor-B for i in range(2): for j in range(3): # for each A(i,j) we execute the following codes a = A[i][j] # we access each element of B for m in range(3): for n in range(2): b = B[m][n] # now we put (a*b) in the appropriate index of AB AB[3*i+m][2*j+n] = a * b print() # print a line print("A-tensor-B =") print() # print a line for i in range(6): print(AB[i]) # - # <h3> Task 3 </h3> # # Find $ B \otimes A $ for the given matrices # $ # A = \mymatrix{rrr}{-1 & 0 & 1 \\ -2 & -1 & 2} ~~\mbox{and}~~ # B = \mymatrix{rr}{0 & 2 \\ 3 & -1 \\ -1 & 1 }. # $ # # You can use the code in your (our) solution for Task 2. # # But please be careful with the indices and range values, and how they are interchanged (!) # + # # your solution is here # A = [ [-1,0,1], [-2,-1,2] ] B = [ [0,2], [3,-1], [-1,1] ] print() # print a line print("B =") for i in range(len(B)): print(B[i]) print("A =") for i in range(len(A)): print(A[i]) # let's define B-tensor-A as a (6x6)-dimensional zero matrix BA = [] for i in range(6): BA.append([]) for j in range(6): BA[i].append(0) # let's find B-tensor-A for i in range(3): for j in range(2): # for each B(i,j) we execute the following codes b = B[i][j] # we access each element of A for m in range(2): for n in range(3): a = A[m][n] # now we put (a*b) in the appropriate index of AB BA[2*i+m][3*j+n] = b * a print() # print a line print("B-tensor-A =") print() # print a line for i in range(6): print(BA[i])
QWorld's Global Quantum Programming Workshop/Mathematics For Quantum Computing/4.Tensor Product.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.5 64-bit # language: python # name: python37564bit9c6ef5c60eed4b8ca23f96caa58fb4e1 # --- # **<NAME>** # # 20170062 # # KSTN <NAME> - K62 # # *Đại học Bách khoa Hà Nội* import processviz as pvz import sentry_sdk sentry_sdk.init("https://31be2fd911834411b1b58755d06e9ac2@sentry.io/2445393") # **Câu 1:** # # Đầu tiên, dễ thấy $(X_n)$ là một xích Markov. # # Ta tìm ma trận xác suất chuyển của xích: # $$ # P = \left(\begin{matrix}0& \frac{1}{2} & 0 & \frac{1}{2}\\\frac{1}{2} & 0 & \frac{1}{2} & 0 \\0& \frac{1}{2} & 0 & \frac{1}{2}\\\frac{1}{2} & 0 & \frac{1}{2} & 0\end{matrix}\right) # $$ # *a.* Dễ thấy xích Markov trên là tối giản. # # Các trang thái có chu kì là G1 = pvz.MarkovChain() G1.from_file('./assignment/assignment_4.2/input_1.csv') # *b.*Gọi 1 dãy các bước đi ngẫu nhiên của con châu chấu là $(x_0,...,x_{100})$. Dễ thây để con châu chấu trở lại $x_0$ sau đúng 100 bước thì cần có $x_0 = x_{100}$ và $x_i \ne x_0,\forall i = \overline{1,99}$. # # Ta có $x_0 = 0$. Điều này dẫn đến $x_1 = 2$. Tại thời điểm $2n$ thấy rằng $x_{2n} \in \{1,3\}$ và $x_{2n+1} = 2$. Do đó số trường hợp để con châu chấu quay trở lại điểm xuất phát sau 100 bước là $\frac{1}{2^{50}}$. # --- # # **Câu 2:** Gọi $(X_n)$ là trạng thái của đồng xu tại lần tung thứ $n, n \ge 0$. Hiển nhiên $(X_n)$ là một xích Markov. # # Xét $Y_n = (X_n, X_{n+1}, X_{n+2})$. Ta chứng minh $(Y_n)$ là một xích Markov. # # Thật vậy, ta có # $$ # \begin{aligned}P(Y_{n+1} = y_{n+1}|Y_{n} = y_n) &= P[(X_n, X_{n+1}, X_{n+2}) = (i_n, j_{n+1}, k_{n+2})|(X_{n-1}, X_n, X_{n+1}) = (i_{n-1}, j_{n}, k_{n+1})] \\&= P[X_n, X_{n+1}, X_{n+2} = i_n, j_{n+1}, k_{n+2}|X_{n-1}, X_n, X_{n+1} = i_{n-1}, j_{n}, k_{n+1}]\label{eq1}\end{aligned} # $$ # Lại có # $$ # \begin{aligned}&P(Y_{n+1} = y_{n+1}|Y_{n} = y_n,..,Y_0) \\&= P[(X_n, X_{n+1}, X_{n+2}) = (i_n, j_{n+1}, k_{n+2})|(X_{n-1}, X_n, X_{n+1}) = (i_{n-1}, j_{n}, k_{n+1}),...,(X_0, X_1, X_2) = (i_0, j_1, k_2)] \\&= P[X_n, X_{n+1}, X_{n+2} = i_n, j_{n+1}, k_{n+2}|X_{n-1}, X_n, X_{n+1} = i_{n-1}, j_{n}, k_{n+1},...,X_0 = i_0] \\& = \frac{P(X_{n+2} = i_{n+2},...,X_0 = i_0)}{P(X_{n+1},..,X_0 = i_0)} \\& = \frac{P[X_{n+2}, X_{n+1}, X_{n} = i_n, j_{n+1}, k_{n+2}|X_{n+1}, X_n, X_{n-1}]}{P[X_{n+1}, X_{n}, X_{n-1},...,X_0]} \\& = P[X_n, X_{n+1}, X_{n+2} = i_n, j_{n+1}, k_{n+2}|X_{n-1}, X_n, X_{n+1} = i_{n-1}, j_{n}, k_{n+1}]\label{eq2}\end{aligned} # $$ # Vậy từ đó ta có $(Y_n)$ là xích Markov. # # Không gian trạng thái $I = \{(x, y, z) \in \{S, N\}^3 \}$ # # Ma trận xác suất chuyển $P$ được xác định như sau: G2 = pvz.MarkovChain() G2.from_file('./assignment/assignment_4.2/input_2.csv') state, matrix = G2.get_mean_time(type='absoring') matrix # Vậy trung bình mất số lần tung như trên để đạt đến trạng thái $NSN$ # # --- # **Câu 3:** # # Không gian trạng thái `I = {CS_THONGTHUONG, CS_DACBIET,CS_TANGCUONG,DONG_HOP, KHOI_BENH}` # # Ma trận xác suất chuyển # # | CS_THONGTHUONG | CS_DACBIET | CS_TANGCUONG | DONG_HOP | KHOI_BENH | # | -------------- | ---------- | ------------ | -------- | --------- | # | 0.3 | 0.15 | 0 | 0 | 0.55 | # | 0.2 | 0.55 | 0.1 | 0.05 | 0.1 | # | 0.05 | 0.3 | 0.55 | 0.1 | 0 | # | 0 | 0 | 0 | 1 | 0 | # | 0 | 0 | 0 | 0 | 1 | # # Phân phối ban đầu # # | CS_THONGTHUONG | CS_DACBIET | CS_TANGCUONG | DONG_HOP | KHOI_BENH | # | -------------- | ---------- | ------------ | -------- | --------- | # | 0.6 | 0.3 | 0.1 | 0 | 0 | # *a.* Xác suất để một bệnh nhân ở phòng ICU sau 2 ngày khỏi bệnh là $P_{24}^{(2)}$ G3 = pvz.MarkovChain() G3.from_file('./assignment/assignment_4.2/input_3.csv') G3.matrix_at(2)[2][4] state, vector = G3.get_mean_time(target='CS_THONGTHUONG') sum(vector) G3.get_mean_time(source='CS_THONGTHUONG') # *b.* Xác suất một bệnh nhân ở phòng ICU liên tiếp $k$ ngày là $0.55^k$ # # Do đó số ngày trung bình để bệnh nhân đó tiếp tục phải ở lại chăm sóc tại phòng ICU là # $$ # E(X|) = \underset{n \to \infty}{lim}\sum_{k = 1}^{n}k*0.55^k # $$ # # Xét # $$ # \begin{aligned}f(x) & = \sum_{i = 1}^{n}x^i \\\Rightarrow f'(x) & = \sum_{i = 1}^{n-1}ix^{i-1} \\\Rightarrow xf'(x) & = \sum_{i = 1}^{n-1}ix^{i}\end{aligned} # $$ # Mặt khác lại có # $$ # \begin{aligned}f(x) & = \frac{1-x^{n+1}}{1-x} \\\Rightarrow f'(x) & = \frac{1-x^{n+1}}{(1-x)^2} - \frac{x^{n+1}(n+1)}{x(1-x)} \\\Rightarrow xf'(x) & = x\frac{1-x^{n+1}}{(1-x)^2} - \frac{x^{n+2}(n+1)}{x(1-x)}\end{aligned} # $$ # Do đó # $$ # \begin{aligned}E(X) & = \underset{n \to \infty}{lim}\left(0.3\frac{1-0.3^{n+1}}{(1-0.3)^2} - \frac{0.3^{n+2}(n+1)}{0.3(1-0.3)}\right) \\& = \frac{0.55}{1-(0.55)^2} = 2.72\end{aligned} # $$ # # --- # # **Câu 4:** # Xét ma trận xác suất chuyển $P$, không gian trạng thái $I$ và tập các trạng thái hút $S$. Ta có: # # $$ # P_{ii} = 1, \forall i \in S # $$ # # Thời gian trung bình được xác định như sau; # # $$ # \left\{\begin{matrix} # k_i = 0, \forall i \in S\\ # k_i = 1+\sum_{j \in I / S}{P_{ij}k_j} # \end{matrix}\right. # $$ # # Chuyển vế ta được # # $$ # \left\{\begin{matrix} # k_i = 0, \forall i \in S\\ # (1-P_{ii})k_i - \sum_{j \ne i \in I / S}{P_{ij}k_j} = 1 # \end{matrix}\right. # $$ # # Hay ta được hệ phương trình viết lại dưới dạng ma trận như sau: # # $$ # A_1k = b_1 # $$ # # trong đó $A_1 = I - P, b_1 = (b_{1i})$ thỏa mãn $b_{1i} = 0$ nếu $i \in S$ và $b_{1i} = 1$ nếu $i \notin S$. # # Dễ thấy rằng $A_1$ là một `singular matrix`, do đó ta sẽ loại bớt tất cả những hàng $i$ có giá trị bằng 0 và cột $i$ tương ứng để được ma trận $A$. # # Thực hiện tương tự với $b_1$, ta xóa hết tất cả các hàng có số $0$. # # Chương trình như sau: # # ```python # def get_mean_time(self, source=None, target=None, type='transient'): # try: # state = mt.get_transient_state(self.state, self.P) # matrix = mt.get_mean_time_transient(self.state, self.P) # if type == 'absoring': # return state, (mt.get_mean_time_absoring(self.state, self.P)).tolist() # elif type == 'transient': # if source == None and target == None: # return state, matrix # elif source == None: # return state, (np.transpose(matrix)).tolist()[state.index(target)] # elif target == None: # return state, (matrix[state.index(source)]).tolist() # else: # return state, matrix[state.index(source)][state.index(target)] # except: # return "Invalid" # ``` # # --- # *Ví dụ trong giáo trình của <NAME> - trang 14* # # Ví dụ xét xích Markov có không gian trạng thái `I = {1, 2, 3, 4}` và ma trận xác suất chuyển # # | 1 | 2 | 3 | 4 | # | ---- | ---- | ---- | ---- | # | 1 | 0 | 0 | 0 | # | 0.5 | 0 | 0.5 | 0 | # | 0 | 0.5 | 0 | 0.5 | # | 0 | 0 | 0 | 1 | # # Thời điểm chạm trung bình được tính như sau: G4 = pvz.MarkovChain() G4.from_stdin(state=['1','2','3','4'], data=[[0,0,0.5,0.5],[1,0,0.5,0],[0,1,0,0.5],[0,1,0,0]]) state, expected_time = G4.get_mean_time(type='absoring') expected_time G4.classify_state()
Assignment_4.2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # JSON File to DSS # # __Description__: Converts the JSON files generated by `PM-EventsTable.ipynb` or `distalEventsTable.ipynb` to a single DSS. # # __Input__: Folder containing the pluvial forcing JSON files. # # __Output__: A DSS file with the pluvial forcing data. # # --- # ## A. Load Libraries, Parameters, and Data: # ### Libraries: import sys sys.path.append('../../core') from hydromet import* import hydromet_JSON_to_DSS # ### Parameters: # #### Site specific: # + ## Filenames and paths: Project_Area = 'Sacramento' # Project area name Pluvial_Model = 'P03' # Pluvial model name forcing = '{}_{}_Forcing'.format(Project_Area, Pluvial_Model) root_dir = pl.Path(os.getcwd()) outputs_dir = root_dir/'Outputs' forcing_dir = outputs_dir/forcing bin_dir = root_dir.parent.parent/'bin' ## Options: display_print = True remove_temp_files = True # - # ## B. Convert JSON Files to DSS: hydromet_JSON_to_DSS.main(forcing_dir, outputs_dir, bin_dir, forcing, remove_temp_files = remove_temp_files, display_print = display_print) # --- # ## End
notebooks/pluvial/_void/JSON_to_DSS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.5.0 # language: julia # name: julia-0.5 # --- using DataFrames using TensorFlow using Distributions using Images data_set_folder = "/raided/datasets/sketches_png/png/" list_of_imgs = open(data_set_folder * "filelist.txt") img_paths = readlines(list_of_imgs) test_paths = Array(String, 2500) path_index = 71 count = 1 for i = 1:2500 test_paths[i] = img_paths[path_index] path_index += 1 count += 1 if count % 11 == 0 path_index += 70 count = 1 end end files_in_folder = readdir(data_set_folder) labels = filter(x->isdir(data_set_folder * x) == true, files_in_folder) x = TensorFlow.placeholder(Float32) y_ = TensorFlow.placeholder(Float32) W = get_variable("weights", [77841, 250], Float32) b = get_variable("bias", [250], Float32) y = nn.softmax(x*W + b) cross_entropy = reduce_mean(-reduce_sum(y_ .* log(y), reduction_indices=[2])) train_step = train.minimize(train.GradientDescentOptimizer(.00001), cross_entropy) saver = train.Saver() gpu_options = TensorFlow.tensorflow.GPUOptions(allow_growth=true, per_process_gpu_memory_fraction=0.4) config = TensorFlow.tensorflow.ConfigProto(log_device_placement=false, gpu_options=gpu_options) # println(config) sess = Session(config=config) train.restore(saver, sess, "4percAcc-allCats") # + type DataLoader cur_id::Int order::Vector{Int} end TrainLoader() = DataLoader(1, shuffle(1:17500)) TestLoader() = DataLoader(1, shuffle(1:2500)) # - function get_test(loader::DataLoader) x = zeros(Float32, 2500, 77841) y = zeros(Float32, 2500, 250) for i = 1:2500 path = test_paths[loader.order[loader.cur_id]][1:end-1] sketch_name = split(path, "/")[1] img = load(data_set_folder * path) img = restrict(restrict(img)) flatten_img = reshape(img, 77841) x[i, :] = flatten_img label = find((x -> x == sketch_name), labels)[1] y[i, label] = 1.0 loader.cur_id += 1 if loader.cur_id > 2500 println("========================") println(sketch_name) break end end x, y end # + correct_prediction = indmax(y, 2) .== indmax(y_, 2) accuracy=reduce_mean(cast(correct_prediction, Float64)) test_loader = TestLoader() testx, testy = get_test(test_loader) println(run(sess, indmax(y, 2), Dict(x=>testx, y_=>testy))) println(run(sess, indmax(y_, 2), Dict(x=>testx, y_=>testy))) println(run(sess, accuracy, Dict(x=>testx, y_=>testy))) # - function sketch_input(path, sketch_name) x = zeros(Float32, 1, 77841) y = zeros(Float32, 1, 250) img = load(path) img = restrict(restrict(img)) flatten_img = reshape(img, 77841) x[1, :] = flatten_img label = find((x -> x == sketch_name), labels)[1] y[1, label] = 1.0 x, y end # + sketch_path = "/raided/datasets/sketches_png/png/bee/1371.png" sketch_label = "bee" test_img, test_label = sketch_input(sketch_path, sketch_label) what_it_thinks = labels[run(sess, indmax(y,2), Dict(x=>test_img, y_=>test_label))[1] + 1] println("Is it an " * what_it_thinks * "? Answer: ", (sketch_label == what_it_thinks), ".") # -
LoadingTest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/twloehfelm/SAR2020/blob/master/04%20-%20Segmentation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="SkHz0FVN6F8u" colab_type="text" # <table width="100%"> # <tr> # <td valign="top"><img src="https://cdn.ymaws.com/www.abdominalradiology.org/graphics/logo.jpg"/></td> # <td valign="middle" align="right"><h1>SAR 2020<br/>AI Masters Class</h1></td> # </tr> # <tr> # <td align="center" colspan=2><h1>Segmentation</h1></td> # </tr> # </table> # # + [markdown] id="2O-42dYnoR3Q" colab_type="text" # We are going to build a liver segmentation tool using Facebook's detectron2 object identification algorithm trained on 18 CTs from the Combined Healthy Abdominal Organ Segmentation (CHAOS) Challenge. We'll test it on 2 CTs from the same challenge that are not included in the training set. # <br/><br/><br/> # **References to the original dataset and related publications:** # # > <sub><sup><NAME>, <NAME>, <NAME>, <NAME>, <NAME>zer. CHAOS - Combined (CT-MR) Healthy Abdominal Organ Segmentation Challenge Data (Version v1.03) [Data set]. Apr. 2019. Zenodo. http://doi.org/10.5281/zenodo.3362844 </sup></sub> # # > <sub><sup><NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, et al. "CHAOS Challenge - Combined (CT-MR) Healthy Abdominal Organ Segmentation", arXiv pre-print, Jan. 2020. https://arxiv.org/abs/2001.06535</sup></sub> # # > <sub><sup><NAME>, <NAME>, <NAME>, Y.Şahin, <NAME>, <NAME>, et al. "Comparison of semi-automatic and deep learning-based automatic methods for liver segmentation in living liver transplant donors", Diagnostic and Interventional Radiology, vol. 26, pp. 11–21, Jan. 2020. https://doi.org/10.5152/dir.2019.19025</sup></sub> # + [markdown] id="vM54r6jlKTII" colab_type="text" # # Install required packages # + id="9_FzH13EjseR" colab_type="code" colab={} # install dependencies: # !pip install pyyaml==5.1 import torch, torchvision print(torch.__version__, torch.cuda.is_available()) # !gcc --version # opencv is pre-installed on colab # - # install detectron2: (Colab has CUDA 10.1 + torch 1.7) # See https://detectron2.readthedocs.io/tutorials/install.html for instructions import torch assert torch.__version__.startswith("1.7") # !pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.7/index.html # exit(0) # After installation, you need to "restart runtime" in Colab. This line can also restart runtime # !pip3 install -q pydicom # !pip3 install -q pypng # + id="4Qg7zSVOulkb" colab_type="code" colab={} # Download liver seg training data to root directory # !wget -q https://www.dropbox.com/s/1tprn2uubhl29xt/chaos_train.zip # !unzip chaos_train.zip > /dev/null #-d /content/drive/My\ Drive/LiverSeg > /dev/null # + id="ZyAvNCJMmvFF" colab_type="code" colab={} ## You may need to restart your runtime prior to this, to let your installation take effect # Some basic setup: # Setup detectron2 logger import detectron2 from detectron2.utils.logger import setup_logger setup_logger() # import some common libraries import numpy as np import cv2 import random # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor, DefaultTrainer, default_argument_parser, default_setup, launch from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer, ColorMode from detectron2.structures import BoxMode from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_train_loader, build_detection_test_loader from detectron2.data import transforms as T from detectron2.data import detection_utils as utils # Imports for liver seg import os from pathlib import Path import json import pydicom import pycocotools import png import matplotlib.pyplot as plt import copy try: import google.colab IN_COLAB = True except: IN_COLAB = False if IN_COLAB: # opencv is pre-installed on colab # colab cannot use the standard imshow due to some html/web limitation from google.colab.patches import cv2_imshow as imshow else: # #!pip install opencv-python from matplotlib.pyplot import imshow # %matplotlib inline # + id="qi2amjKT9Trx" colab_type="code" colab={} # !rm -rf ./__MACOSX # !rm -rf ./chaos_train.zip # !rm -rf ./sample_data ROOT_PATH = Path('/content') chaos = ROOT_PATH/'chaos_train' pts_dir = chaos/'CT' #testdir = ROOT_PATH/'test' # + [markdown] id="b2bjrfb2LDeo" colab_type="text" # # Prepare the dataset # + id="JFR6B1Kh99Bb" colab_type="code" colab={} # Create lists of DICOMs and segmentation files for the training and validation datasets dicoms_train = list() segs_train = list() dicoms_val = list() segs_val = list() # Randomly split patients 90/10 into training/validation. 20 studies, so 18 train and 2 val pts = [x for x in pts_dir.iterdir() if x.is_dir()] random.seed(716) random.shuffle(pts) train_pts = pts[:18] val_pts = pts[18:] # For each patient, add DICOMs and segmentation files to the respective lists # N.B. DICOM and seg file names must sort in the same order for p in train_pts: dicoms_train += sorted([x for x in (p/'DICOM_anon').iterdir() if x.is_file()]) segs_train += sorted([x for x in (p/'Ground').iterdir() if x.is_file()]) for p in val_pts: dicoms_val += sorted([x for x in (p/'DICOM_anon').iterdir() if x.is_file()]) segs_val += sorted([x for x in (p/'Ground').iterdir() if x.is_file()]) # + id="yfsEnRXLkU2N" colab_type="code" colab={} def mapper(dataset_dict): """ Args: dataset_dict (dict): Metadata of one image, in detectron2 Dataset format. Returns: dict: a format that builtin models in detectron2 accept """ # Implement a mapper, similar to the default DatasetMapper, but with your own customizations dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below ds = pydicom.dcmread(dataset_dict["file_name"]) image = ds.pixel_array # Convert pixel values to Hounsfield units image = image*ds.RescaleSlope + ds.RescaleIntercept image, transforms = T.apply_transform_gens([T.RandomBrightness(0.8, 1.2), T.RandomContrast(0.8, 1.2)], image) dataset_dict["image"] = torch.as_tensor(image.astype("float32")) annos = [ utils.transform_instance_annotations(obj, transforms, image.shape[:2]) for obj in dataset_dict.pop("annotations") if obj.get("iscrowd", 0) == 0 ] instances = utils.annotations_to_instances(annos, image.shape[:2], mask_format='bitmask') dataset_dict["instances"] = utils.filter_empty_instances(instances) return dataset_dict class LiverTrainer(DefaultTrainer): @classmethod def build_test_loader(cls, cfg, dataset_name): return build_detection_test_loader(cfg, dataset_name, mapper=mapper) @classmethod def build_train_loader(cls, cfg): return build_detection_train_loader(cfg, mapper=mapper) # + id="c3TFNA-47FYP" colab_type="code" colab={} def load_mask(image_path): """Load masks for the given image. Mask is a binary true/false map of the same size as the image. args: image_path = complete path to segmentation file returns: boolean array of the liver segmentation mask """ mask = plt.imread(str(image_path)) return mask.astype(np.bool) def bbox(img): """Generates a bounding box from a segmentation mask Bounding box is given as coordinates of upper left and lower right corner args: img = boolean array of a segmentation mask returns: coordinates of upper left and lower right corner """ try: x,y = np.where(img) except ValueError: return None if x.size != 0: bbox = y.min(), x.min(), y.max(), x.max() else: bbox = None return bbox # from fastai2 medical imaging def windowed(px, w, l): """Windows a pixel_array of Houndfield units args: px = pixel array in Houndfield units w = window width (HU range) l = window level (center point) returns: pixel_array convered to the given window/level """ if type(w) == pydicom.multival.MultiValue: w = w[0] if type(l) == pydicom.multival.MultiValue: l = l[0] px_min = l - w//2 px_max = l + w//2 px[px<px_min] = px_min px[px>px_max] = px_max return (px-px_min) / (px_max-px_min) # Used in the DatasetCatalog.register call def get_liver_dicts(train_or_val): """Builds a dataset_dict for detectron2 args: train_or_val = string 'train' or 'val' indicating whether to return the training or validation dataset_dict returns: dataset_dict with each element of the training or validation dataset """ if train_or_val == "train": dicoms = dicoms_train segs = segs_train elif train_or_val == "val": dicoms = dicoms_val segs = segs_val dataset_dicts = [] for idx, v in enumerate(dicoms): record = {} filename = str(v) ds = pydicom.dcmread(filename) height, width = ds.Rows, ds.Columns # Mininum required fields for each element in the dict record["file_name"] = filename # Full path to image file record["image_id"] = idx # Index of file (unique serial number) record["height"] = height # Image dimension record["width"] = width # Image dimension try: mask = load_mask(str(segs[idx])) except IndexError: mask = None # Add list of segmentation object(s) objs = [] if bbox(mask) is not None: obj = { "bbox": bbox(mask), "bbox_mode": BoxMode.XYXY_ABS, "segmentation": pycocotools.mask.encode(np.asarray(mask, order="F")), # Convert binary mask to RLE format "category_id": 0, "is_crowd": 0 } objs.append(obj) record["annotations"] = objs dataset_dicts.append(record) return dataset_dicts # + id="RgYw3tuOAs2Q" colab_type="code" colab={} # Clear existing DatasetCatalog and then register the training and validation datasets DatasetCatalog.clear() for d in ["train", "val"]: DatasetCatalog.register("liver_" + d, lambda d=d: get_liver_dicts(d)) MetadataCatalog.get("liver_" + d).set(thing_classes=["liver"]) liver_metadata = MetadataCatalog.get("liver_train") # + [markdown] id="6ljbWTX0Wi8E" colab_type="text" # # Verify Data Loading # To verify the data loading is correct, let's visualize the annotations of randomly selected samples in the training set: # # # + id="Tp1Ft4x-Kx23" colab_type="code" colab={} dataset_dicts = get_liver_dicts("train") # + id="UkNbUzUOLYf0" colab_type="code" colab={} # Choose three random images from the training dataset_dict and display image with mask overlay for d in random.sample(dataset_dicts, 3): ds=pydicom.dcmread(d["file_name"]) im=ds.pixel_array im=im*ds.RescaleSlope + ds.RescaleIntercept im = windowed(im, ds.WindowWidth, ds.WindowCenter) im = np.stack((im,) * 3, -1) im=im*255 v = Visualizer(im[:, :, ::-1], metadata=liver_metadata, scale=0.8 ) v = v.draw_dataset_dict(d) imshow(v.get_image()[:, :, ::-1]) # + [markdown] colab_type="text" id="XW5EakiuV2Ar" # # Train # # Now, let's fine-tune a coco-pretrained R50-FPN Mask R-CNN model on the liver dataset. It takes ~2 minutes to train 300 iterations on Colab. # # + id="7unkuuiqLdqd" colab_type="code" colab={} cfg = get_cfg() cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")) cfg.DATASETS.TRAIN = ("liver_train",) cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS = False cfg.DATASETS.TEST = () cfg.DATALOADER.NUM_WORKERS = 2 cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo # These three SOLVER parameters are probably the best places to start tweaking to modify performance cfg.SOLVER.IMS_PER_BATCH = 8 cfg.SOLVER.BASE_LR = 0.001 # Can experiment with differnt base learning rates cfg.SOLVER.MAX_ITER = 500 # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this simple dataset (default: 512) cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class (liver) cfg.INPUT.FORMAT = "F" #32-bit single channel floating point pixels cfg.INPUT.MASK_FORMAT = "bitmask" # Needed to change this from the default "polygons" os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) trainer = LiverTrainer(cfg) trainer.resume_or_load(resume=False) trainer.train() # + id="hBXeH8UXFcqU" colab_type="code" colab={} # Look at training curves in tensorboard: # %load_ext tensorboard # %tensorboard --logdir output # + id="h0JeEVP7MS8A" colab_type="code" colab={} # %reload_ext tensorboard # + [markdown] id="0e4vdDIOXyxF" colab_type="text" # # Inference & evaluation using the trained model # Now, let's run inference with the trained model on the validation dataset. First, let's create a predictor using the model we just trained: # # # + id="CYHrysR9MgV6" colab_type="code" colab={} from detectron2.modeling import build_model from detectron2.checkpoint import DetectionCheckpointer class LiverPredictor: """ Create a simple end-to-end predictor with the given config that runs on single device for a single input image. Compared to using the model directly, this class does the following additions: 1. Load checkpoint from `cfg.MODEL.WEIGHTS`. 2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`. 3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`. 4. Take one input image and produce a single output, instead of a batch. If you'd like to do anything more fancy, please refer to its source code as examples to build and use the model manually. Attributes: metadata (Metadata): the metadata of the underlying dataset, obtained from cfg.DATASETS.TEST. Examples: .. code-block:: python pred = DefaultPredictor(cfg) inputs = cv2.imread("input.jpg") outputs = pred(inputs) """ def __init__(self, cfg): self.cfg = cfg.clone() # cfg can be modified by model self.model = build_model(self.cfg) self.model.eval() self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0]) checkpointer = DetectionCheckpointer(self.model) checkpointer.load(cfg.MODEL.WEIGHTS) self.transform_gen = T.ResizeShortestEdge( [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST ) self.input_format = cfg.INPUT.FORMAT def __call__(self, original_image): """ Args: original_image (np.ndarray): a single channel image. Returns: predictions (dict): the output of the model for one image only. See :doc:`/tutorials/models` for details about the format. """ with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258 height, width = original_image.shape[:2] image = original_image image = torch.as_tensor(image.astype("float32")) inputs = {"image": image, "height": height, "width": width} predictions = self.model([inputs])[0] return predictions # + id="Ya5nEuMELeq8" colab_type="code" colab={} cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set the testing threshold for this model cfg.DATASETS.TEST = ("liver_val", ) predictor = LiverPredictor(cfg) # + [markdown] id="qWq1XHfDWiXO" colab_type="text" # Then, we randomly select several samples to visualize the prediction results. # + id="U5LhISJqWXgM" colab_type="code" colab={} dataset_dicts = get_liver_dicts("val") for d in random.sample(dataset_dicts, 10): ds=pydicom.dcmread(d["file_name"]) im=ds.pixel_array im=im*ds.RescaleSlope + ds.RescaleIntercept outputs = predictor(im) im = windowed(im, ds.WindowWidth, ds.WindowCenter) im = np.stack((im,) * 3, -1) im=im*255 v = Visualizer(im[:, :, ::-1], metadata=liver_metadata, scale=0.8, instance_mode=ColorMode.IMAGE ) v = v.draw_instance_predictions(outputs["instances"].to("cpu")) imshow(v.get_image()[:, :, ::-1])
04 - Segmentation.ipynb
// -*- coding: utf-8 -*- // --- // jupyter: // jupytext: // text_representation: // extension: .java // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Java // language: java // name: java // --- // + [markdown] slideshow={"slide_type": "slide"} // # Vererbung // // #### <NAME>, Departement Mathematik und Informatik, Universität Basel // + [markdown] slideshow={"slide_type": "slide"} // ### Klassen als eigene Datentypen // // Klassen lassen uns eigenes Vokabular definieren // * Können Konzepte aus Problemdomäne modellieren // * Beispiel aus Geometrie: Point, Vector, Line, ... // // + [markdown] slideshow={"slide_type": "slide"} // ### Beispiel: Punkte und Vektoren // - class Vector { double x; double y; Vector(double x, double y) { this.x = x; this.y = y; } } // + slideshow={"slide_type": "fragment"} class Point { double x; double y; Point(double x, double y) { this.x = x; this.y = y; } Point add(Vector v) { return new Point(this.x + v.x, this.y + v.y); } Vector minus(Point p2) { return new Vector(p2.x - this.x, p2.y - this.y); } } // + [markdown] slideshow={"slide_type": "slide"} // ### Typsystem // // Typsystem hilft Konzepte auseinanderzuhalten // // * Zwingt uns verschiedene Konzepte zu unterscheiden // * Verhindert viele Fehler beim Programmieren // // Beispiel: // // * Punkt != Vektor // + [markdown] slideshow={"slide_type": "slide"} // ### Beispiel: Punkte und Vektoren // // + Point p1 = new Point(1, 3); Point p2 = new Point(2, 4); //p1.add(v); // Funktioniert //p1.add(p2); // Funktioniert nicht Vector v = p1.minus(p2); // Funktioniert Point p = p2.minus(p1); // Funktioniert nicht // + [markdown] slideshow={"slide_type": "slide"} // ### Hierarchien von Konzepten // // Manche Konzepte können hierarchisch angeordnet werden // * Zuweisung in allgemeineres Konzept soll möglich sein // // ![class hierarchy](images/class-hierarchy.png) // + slideshow={"slide_type": "subslide"} Integer i = new Integer(5); Double d = new Double(3.5); Number n = d; // + [markdown] slideshow={"slide_type": "slide"} // ### Vererbung: Übersicht // // ##### Interfaces // * Garantiert, dass alle Subklassen dieselben Operationen implementieren // // ##### Abstrakte Klassen // * Klasse, welche Teile einer Implementation offenlässt // * Subklassen implementieren diese // // ##### Klassen // * Klasse ist normale Klasse // * Subklasse erweitert Konzept // // // + [markdown] slideshow={"slide_type": "slide"} // ### Interfaces // - // Grundidee: Gemeinsame Methoden aller Klassen werden definiert interface TurtleOps { void forward(int distance); void turnRight(double angle); void printPos(); // ... } // + [markdown] slideshow={"slide_type": "slide"} // ### Implementation 1 // - class Turtle implements TurtleOps { private double xPos; private double yPos; private double direction = 0; public void turnRight(double angle) { this.direction += angle; } public void forward(int distance) { xPos += Math.cos(this.direction) * distance; yPos += Math.sin(this.direction) * distance; } public void printPos() { System.out.println("(" + xPos + "," + yPos + ")"); } } // + [markdown] slideshow={"slide_type": "slide"} // ### Implementation 2 // + class TurtleRandomWalker implements TurtleOps { private double xPos; private double yPos; private Random rng = new Random(42); private double direction = 0; public void turnRight(double angle) { this.direction += angle; } public void forward(int distance) { xPos += Math.cos(rng.nextDouble() * 2 * Math.PI) * distance; yPos += Math.sin(rng.nextDouble() * 2 * Math.PI) * distance; } public void printPos() { System.out.println("(" + xPos + "," + yPos + ")"); } public void foo() {} } // + [markdown] slideshow={"slide_type": "slide"} // ### Interfaces als Datentyp // // * Interface kann als Datentyp benutzt werden. // * Zuweisung von allen Klassen die Interface implementieren möglich // - TurtleOps t1 = new Turtle(); TurtleOps t2 = new TurtleRandomWalker(); t2.foo(); // + [markdown] slideshow={"slide_type": "slide"} // ### Interfaces als Datentyp // - // Häufig bei Methodendeklarationen benutzt. // // * Abstrahiert konkrete Implementation. // + void animateTurtle(TurtleOps t, int numSteps) { for (int i = 0; i < numSteps; i++) { t.forward(1); t.printPos(); } } animateTurtle(new Turtle(), 10); // + [markdown] slideshow={"slide_type": "slide"} // ### Abstrakte Klassen // - // Einsatz: Klasse kann bis auf wenige Stellen implementiert werden // * Subklassen vervollständigen Implementation abstract class TurtleLike { double xPos; double yPos; double direction; Random rng = new Random(42); abstract public void forward(int distance); public void turnRight(double angle) { this.direction += angle; } public void printPos() { System.out.println("(" + xPos + "," + yPos + ")"); } } // + [markdown] slideshow={"slide_type": "slide"} // ### Konkrete Implementationen // - class Turtle extends TurtleLike { @Override public void forward(int distance) { xPos += Math.cos(this.direction) * distance; yPos += Math.sin(this.direction) * distance; } } class TurtleRandomWalker extends TurtleLike { @Override public void forward(int distance) { xPos += Math.cos(rng.nextDouble() * 2 * Math.PI) * distance; yPos += Math.sin(rng.nextDouble() * 2 * Math.PI) * distance; } } // + [markdown] slideshow={"slide_type": "slide"} // ### Verwendung als Datentyp // // * Abstrakte Klasse kann als Datentyp verwendet werden // * Zuweisung von allen Unterklassen möglich // - TurtleLike t = new Turtle(); t.forward(10); t.printPos(); // + [markdown] slideshow={"slide_type": "slide"} // ### Vererbung von Klassen // - // Einsatz: Erweiterung einer Klasse mit zusätzlicher Funktionalität class Turtle { double xPos; double yPos; double direction; public void forward(int distance) { System.out.println("forward in turtle"); xPos += Math.cos(this.direction) * distance; yPos += Math.sin(this.direction) * distance; } public void turnRight(double angle) { this.direction += angle; } public void printPos() { System.out.println("(" + xPos + "," + yPos + ")"); } } // + [markdown] slideshow={"slide_type": "slide"} // ### Erweiterung // - class TurtleWithColor extends Turtle { java.awt.Color color = java.awt.Color.BLACK; void setPenColor(java.awt.Color color) { this.color = color; } } TurtleWithColor t = new TurtleWithColor(); t.setPenColor(java.awt.Color.BLUE); t.forward(10); // + [markdown] slideshow={"slide_type": "slide"} // ### Terminologie // // ![oo-terminology](images/oo-terminology.png) // + [markdown] slideshow={"slide_type": "slide"} // ### Überschreiben von Methoden // // * Subklassen können Verhalten von Methoden durch *Überschreiben* ändern. // + class LazyTurtle extends Turtle { @Override public void forward(int distance) { System.out.println("Ich laufe langsam"); xPos += Math.cos(this.direction) * distance / 4; yPos += Math.sin(this.direction) * distance / 4; } } // + [markdown] slideshow={"slide_type": "slide"} // ### Nutzen der Superklassenimplementation // - // Das Keyword ```super``` erlaubt es auf die Superklassenimplementation zuzugreifen. class EagerTurtle extends Turtle { @Override public void forward(int distance) { System.out.println("Ich laufe doppelt so schnell"); super.forward(distance); super.forward(distance); } } EagerTurtle turtle = new EagerTurtle(); turtle.forward(10);
notebooks/Vererbung.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Large Dataset Model Building On All Joints import collections import pandas as pd import numpy as np import time import os import tensorflow as tf from tensorflow.keras.layers import * from tensorflow.keras.models import Model from tensorflow.keras.models import load_model from tensorflow.keras.utils import plot_model import matplotlib.pyplot as plt pd.set_option('display.max_columns', 500) os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE' tf.__version__ # ### Load Datasets and Clean # In this configuration the relevant data set should be loaded from the same folder as the notebook df = pd.read_csv('jackson_megadata_10_23.csv') # The data consists of timestamps from the two hardware devices and a diff between them. When the two hardware data streams were stitched together an effor was made to minimize this diff, but the driver configuration did not easily permit eliminating it. This information is included to understand the accuracy of the data, but will not be used during the training. # # The time data is followed by the 8 channels from the Myo, this data will be used as input features. # # This is followed by the 63 positional points from the Leap cameras. These will be used as labels. df.head() df = df.drop(labels=["Leap timestamp", "timestamp diff", "emg timestamp"], axis=1) df = df.drop(df.columns[0], axis=1) df.describe() feature_ar = df.loc[:, 'ch1':'ch8'].values label_ar = df.loc[:, 'Wrist x':].values feature_ar.shape label_ar.shape # From the above evaluation the initial untrained loss is around 2500. # Now train the model: # + seq_length = 32 def overlap_samples(seq_length, feats, labels): new_l = labels[seq_length - 1:] feat_list = [feats[i:i + seq_length] for i in range(feats.shape[0] - seq_length + 1)] new_f = np.array(feat_list) return new_f, new_l features, labels = overlap_samples(seq_length, feature_ar, label_ar) print(features.shape) print(labels.shape) # - # ### FC Architecture (Small) # + model_fc = tf.keras.models.Sequential() model_fc.add(LSTM(64, return_sequences=True, input_shape=(seq_length, 8))) model_fc.add(Dropout(0.5)) model_fc.add(LSTM(64)) model_fc.add(BatchNormalization()) model_fc.add(Dense(100, input_dim=64)) model_fc.add(Activation('relu')) model_fc.add(BatchNormalization()) model_fc.add(Dropout(0.5)) model_fc.add(Dense(64, input_dim=64)) model_fc.add(Activation('relu')) model_fc.add(Dropout(0.5)) model_fc.add(Dense(63, input_dim=64)) model_fc.compile(optimizer='Adam', loss='mse') history = model_fc.fit(features, labels, batch_size=2048, epochs=5, verbose=1, validation_split=0.2) # - model_fc.summary() # ### Visual Model Error # A similar visualizion is now done on this new more restricted model. These errors show 20% - 25% improvement on fingertip y position. plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.legend(['Train', 'Test']) preds = model_fc.predict(features) error = labels - preds abs_error = np.absolute(error) avg_error = np.mean(abs_error, axis=0) plt.figure(figsize=(15, 10)) plt.xticks(rotation=90) plt.ylabel('Prediction Error (mm)') bar = plt.bar(df.columns[8:], avg_error) for i in range(0,63,3): bar[i].set_color('coral') bar[i+1].set_color('olivedrab') plt.show() # ## Large FC Model Architecture # Here a much larger model architecture is implemented to understand if this will improve results on the larger dataset. This dataset is approx 650MB, thus a larger model with proportionally higher parameters is expected to be required. # + model_fc = tf.keras.models.Sequential() model_fc.add(LSTM(256, return_sequences=True, input_shape=(seq_length, 8))) model_fc.add(Dropout(0.5)) model_fc.add(LSTM(256, return_sequences=True)) model_fc.add(Dropout(0.5)) model_fc.add(LSTM(128)) model_fc.add(BatchNormalization()) model_fc.add(Dense(512, input_dim=128)) model_fc.add(Activation('relu')) model_fc.add(BatchNormalization()) model_fc.add(Dropout(0.5)) model_fc.add(Dense(512, input_dim=512)) model_fc.add(Activation('relu')) model_fc.add(BatchNormalization()) model_fc.add(Dropout(0.5)) model_fc.add(Dense(256, input_dim=512)) model_fc.add(Activation('relu')) model_fc.add(Dropout(0.3)) model_fc.add(Dense(63, input_dim=64)) model_fc.compile(optimizer='Adam', loss='mse') # - model_fc.summary() history = model_fc.fit(features, labels, batch_size=512, epochs=4, verbose=1, validation_split=0.2) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.legend(['Train', 'Test']) preds = model_fc.predict(features) error = labels - preds abs_error = np.absolute(error) avg_error = np.mean(abs_error, axis=0) plt.figure(figsize=(15, 10)) plt.xticks(rotation=90) plt.ylabel('Prediction Error (mm)') bar = plt.bar(df.columns[8:], avg_error) for i in range(0,63,3): bar[i].set_color('coral') bar[i+1].set_color('olivedrab') plt.show() def r2_score(v_true, v_pred): ssres = np.sum(np.square(v_true - v_pred)) sstot = np.sum(np.square(v_true - np.mean(v_true))) return 1 - ssres / sstot r2_score(labels, preds) model_fc.save('FC_mega_dataset_all_joints_model.h5')
ml/Large_dataset_build_model_all_joints.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # OpenML - Machine Learning as a community # > A description of how OpenML fits into traditional ML practices # # - toc: true # - badges: true # - comments: true # - categories: [OpenML] # - image: images/fastpages_posts/openml.png # - author: <NAME> # [OpenML](https://www.openml.org/) is an online Machine Learning (ML) experiments database accessible to everyone for free. The core idea is to have a single repository of datasets and results of ML experiments on them. Despite having gained a lot of popularity in recent years, with a plethora of tools now available, the numerous ML experimentations continue to happen in silos and not necessarily as one whole shared community. # In this post, we shall try to get a brief glimpse of what OpenML offers and how it can fit our current Machine Learning practices. # # Let us jump straight at getting our hands dirty by building a simple machine learning model. If it is simplicity we are looking for, it has to be the Iris dataset that we shall work with. In the example script below, we are going to load the Iris dataset available with scikit-learn, use 10-fold cross-validation to evaluate a Random Forest of 10 trees. Sounds trivial enough and is indeed less than 10 lines of code. from sklearn import datasets from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score # Loading Iris dataset X, y = datasets.load_iris(return_X_y=True) print(X.shape, y.shape) # Initializing a Random Forest with # arbitrary hyperparameters # max_depth kept as 2 since Iris has # only 4 features clf = RandomForestClassifier(n_estimators=10, max_depth=2) scores = cross_val_score(clf, X, y, cv=5, scoring='accuracy') print("Mean score : {:.5f}".format(scores.mean())) # A simple script and we achieve a mean accuracy of **95.33%**. That was easy. It is really amazing how far we have come with ML tools that make it easy to get started. As a result, we have hundreds of thousands of people working with these tools every day. That inevitably leads to the reinvention of the wheel. The tasks that each individual ML practitioner performs often have significant overlaps and can be omitted by reusing what someone from the community has done already. At the end of the day, we didn't build a Random Forest model all the way from scratch. We gladly reused code written by generous folks from the community. The special attribute of our species is the ability to work as a collective wherein our combined intellect becomes larger than the individual sum of parts. Why not do the same for ML? I mean, can I see what other ML practitioners have done to get better scores on the Iris dataset? # # Answering this is one of the targets of this post. We shall subsequently explore if this can be done, with the help of [OpenML](https://www.openml.org/). However, first, we shall briefly familiarize ourselves with few terminologies and see how we can split the earlier example we saw into modular components. # ### OpenML Components # <figure> # <img src="../images/fastpages-posts/openml.png" alt="Image source"> # <figcaption></figcaption> # </figure> # Image source: <a href="https://medium.com/open-machine-learning/openml-1e0d43f0ae13">https://medium.com/open-machine-learning/openml-1e0d43f0ae13</a> # **Dataset**: OpenML houses over 2k+ active datasets for various regression, classification, clustering, survival analysis, stream processing tasks and more. Any user can upload a dataset. Once uploaded, the server computes certain meta-features on the dataset - *Number of classes*, *Number of missing values*, *Number of features*, etc. With respect to our earlier example, the following line is the equivalent of fetching a dataset from OpenML. X, y = datasets.load_iris(return_X_y=True) # **Task**: A task is linked to a specific dataset, defining what the target/dependent variable is. Also specifies evaluation measures such as - accuracy, precision, area under curve, etc. or the kind of estimation procedure to be used such as - 10-fold *cross-validation*, n% holdout set, etc. With respect to our earlier example, the *parameters* to the following function call capture the idea of a task. scores = cross_val_score(clf, X, y, cv=5, scoring='accuracy') # **Flow**: Describes the kind of modelling to be performed. It could be a flow or a series of steps, i.e., a scikit-learn pipeline. For now, we have used a simple Random Forest model which is the *flow* component here. clf = RandomForestClassifier(n_estimators=10, max_depth=2) # **Run**: Pairs a *flow* and task together which results in a *run*. The *run* has the predictions which are turned into *evaluations* by the server. This is effectively captured by the *execution* of the line: scores = cross_val_score(clf, X, y, cv=5, scoring='accuracy') # Now, this may appear a little obfuscating given that we are trying to compartmentalize a simple 10-line code which works just fine. However, if we take a few seconds to go through the 4 components explained above, we can see that it makes our *training of a Random Forest* on Iris a series of modular tasks. Modules are such a fundamental concept in Computer Science. They are like Lego blocks. Once we have modules, it means we can plug and play at ease. The code snippet below attempts to rewrite the earlier example using the ideas of the OpenML components described, to give a glimpse of what we can potentially gain during experimentations. from sklearn import datasets from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score # #### DATASET component # To load IRIS dataset as a dataset module/component def dataset(): X, y = datasets.load_iris(return_X_y=True) return X, y # #### TASK component # + # Tasks here define the number of cross-validation folds # and the scoring metric to be used for evaluation def task_1(f): X, y = dataset() # loads IRIS return cross_val_score(f, X, y, cv=5, scoring='accuracy') def task_2(f): X, y = dataset() # loads IRIS return cross_val_score(f, X, y, cv=15, scoring='balanced_accuracy') # - # #### FLOW component # + # Flows determine the modelling technique to be applied # Helps define a model irrespective of dataset or tasks def flow_1(): clf = RandomForestClassifier(n_estimators=10, max_depth=2) return clf def flow_2(): clf = SVC(gamma='auto', kernel='linear') return clf # - # #### RUN component # Runs essentially evaluates a task-flow pairing # and therefore in effect executs the modelling # of a dataset as per the task task definition def run(task, flow): return task(flow) # + # Results for Random Forest rf_task_1 = run(task_1, flow_1()) rf_task_2 = run(task_2, flow_1()) print("RF using task 1: {:<.5}; task 2: {:<.5}".format(rf_task_1.mean(), rf_task_2.mean())) # Results for SVM svm_task_1 = run(task_1, flow_2()) svm_task_2 = run(task_2, flow_2()) print("SVM using task 1: {:<.5}; task 2: {:<.5}".format(svm_task_1.mean(), svm_task_2.mean())) # - # We can, therefore, compose various different tasks, flows, which are independent operations. Runs can then pair any such task and flow to construct an ML *workflow* and return the evaluated scores. This approach can help us define such components one-time, and we can extend this for any combination of a dataset, model, and for any number of evaluations in the future. Imagine if the entire ML *community* defines such tasks and various simple to complicated flows that they use in their daily practice. We can build custom working ML pipeline and even get to compare performances of our techniques on the same *task* with others! OpenML aims exactly for that. In the next section of this post, we shall scratch the surface of OpenML to see if we can actually do with OpenML what it promises. # ### Using OpenML # OpenML-Python can be installed using *pip* or by [cloning the git repo](https://openml.github.io/openml-python/develop/contributing.html#installation) and installing the current development version. So shall we then install OpenML? ;) It will be beneficial if the code snippets are tried out as this post is read. A consolidated Jupyter notebook with all the code can be found [here](https://nbviewer.jupyter.org/github/Neeratyoy/openml-python/blob/blog/OpenML%20-%20Machine%20Learning%20as%20a%20community.ipynb). # # Now that we have OpenML, let us jump straight into figuring out how we can get the Iris dataset from there. We can always browse the[OpenML website](https://www.openml.org/) and search for Iris. That is the easy route. Let us get familiar with the programmatic approach and learn how to fish instead. The OpenML-Python API can be found [here](https://openml.github.io/openml-python/develop/api.html). # #### Retrieving Iris from OpenML # In the example below, we will list out all possible datasets available in OpenML. We can choose the output format. I'll go with *dataframe* so that we obtain a pandas DataFrame and can get a neat tabular representation to search and sort specific entries. # + import openml import numpy as np import pandas as pd # + # Fetching the list of all available datasets on OpenML d = openml.datasets.list_datasets(output_format='dataframe') print(d.shape) # Listing column names or attributes that OpenML offers for name in d.columns: print(name) # - print(d.head()) # The column names indicate that they contain the meta-information about each of the datasets, and at this instance, we have access to **2958** datasets as indicated by the shape of the dataframe. We shall try searching for 'iris' in the column *name* and also use the *version* column to sort the results. # Filtering dataset list to have 'iris' in the 'name' column # then sorting the list based on the 'version' d[d['name'].str.contains('iris')].sort_values(by='version').head() # Okay, so the iris dataset with the version as 1 has an ID of **61**. For verification, we can check the [website for dataset ID 61](https://www.openml.org/d/61). We can see that it is the original Iris dataset which is of interest to us - 3 classes of 50 instances, with 4 numeric features. However, we shall retrieve the same information, as promised, programmatically. iris = openml.datasets.get_dataset(61) iris iris.features print(iris.description) # With the appropriate dataset available, let us briefly go back to the terminologies we discussed earlier. We have only used the *dataset* component so far. The *dataset* component is closely tied with the task component. To reiterate, the task would describe *how* the dataset will be used. # #### Retrieving relevant tasks from OpenML # We shall firstly list all available tasks that work with the Iris dataset. However, we are only treating Iris as a supervised classification problem and hence will filter accordingly. Following which, we will collect only the task IDs of the tasks relevant to us. df = openml.tasks.list_tasks(data_id=61, output_format='dataframe') df.head() # Filtering only the Supervised Classification tasks on Iris df.query("task_type=='Supervised Classification'").head() # Collecting all relevant task_ids tasks = df.query("task_type=='Supervised Classification'")['tid'].to_numpy() print(len(tasks)) # That settles the *task* component too. Notice how for one *dataset* (61), we obtain 11 task IDs which are of interest to us. This should illustrate the *one-to-many* relationship that *dataset-task components* can have. We have 2 more components to explore - *flows*, *runs*. We could list out all possible flows and filter out the ones we want, i.e., Random Forest. However, let us instead fetch all the evaluations made on the Iris dataset using the 11 tasks we collected above. # # We shall subsequently work with the scikit-learn based task which has been uploaded/used the most. We shall then further filter out the list of evaluations from the selected task (task_id=59 in this case), depending on if Random Forest was used. # Listing all evaluations made on the 11 tasks collected above # with evaluation metric as 'predictive_accuracy' task_df = openml.evaluations.list_evaluations(function='predictive_accuracy', task=tasks, output_format='dataframe') task_df.head() # Filtering based on sklearn (scikit-learn) task_df = task_df[task_df['flow_name'].str.contains("sklearn")] task_df.head() # Counting frequency of the different tasks used to # solve Iris as a supervised classification using scikit-learn task_df['task_id'].value_counts() # Retrieving the most used task t = openml.tasks.get_task(59) t # Filtering for only task_id=59 task_df = task_df.query("task_id==59") # Filtering based on Random Forest task_rf = task_df[task_df['flow_name'].str.contains("RandomForest")] task_rf.head() # #### Retrieving top-performing models from OpenML # Since we are an ambitious bunch of ML practitioners who settle for nothing but the best, and also since most results will not be considered worth the effort if not matching or beating *state-of-the-art*, we shall aim for the best scores. We'll sort the filtered results we obtained based on the score or '*value*' and then extract the components from that run - *task* and *flow*. task_rf.sort_values(by='value', ascending=False).head() # Fetching the Random Forest flow with the best score f = openml.flows.get_flow(2629) f # Fetching the run with the best score for # Random Forest on Iris r = openml.runs.get_run(523926) r # Okay, let's take a pause and re-assess. From multiple users across the globe, who had uploaded runs to OpenML, for a Random Forest run on the Iris, the best score seen till now is **96.67%**. That is certainly better than the naive model we built at the beginning to achieve **95.33%**. We had used a basic 10-fold cross-validation to evaluate a Random Forest of 10 trees with a max depth of 2. Let us see, what the best run uses and if it differs from our approach. # The scoring metric used t.evaluation_measure # The methodology used for estimations t.estimation_procedure # The model used f.name # The model parameters for param in r.parameter_settings: name, value = param['oml:name'], param['oml:value'] print("{:<25} : {:<10}".format(name, value)) # As evident, our initial approach is different on two fronts. We didn't explicitly use stratified sampling for our cross-validation. While the Random Forest hyperparameters are slightly different too (*max_depth=None*). That definitely sounds like a *to-do*, however, there is no reason why we should restrict ourselves to Random Forests. Remember, we are aiming *big* here. Given the [number of OpenML users](https://www.openml.org/search?type=user), there must be somebody who got a better score on Iris with some other model. Let us then retrieve that information. Programmatically, of course. # # In summary, we are now going to sort the performance of all scikit-learn based models on Iris dataset as per the task definition with *task_id=59*. # Fetching top performances task_df.sort_values(by='value', ascending=False).head() # Fetching best performing flow f = openml.flows.get_flow(6048) f # + # Fetching best performing run r = openml.runs.get_run(2012943) # The model parameters for param in r.parameter_settings: name, value = param['oml:name'], param['oml:value'] print("{:<25} : {:<10}".format(name, value)) # - # The highest score obtained among the uploaded results is **98.67%** using a [variant of SVM](https://scikit-learn.org/stable/modules/generated/sklearn.svm.NuSVC.html#sklearn.svm.NuSVC). However, if we check the corresponding flow description, we see that it is using an old scikit-learn version (0.18.1) and therefore may not be possible to replicate the exact results. However, in order to improve from our score of 95.33%, we should try running a *nu-SVC* on the same problem and see where we stand. Let's go for it. Via OpenML, of course. # #### Running best performing flow on the required task import openml import numpy as np from sklearn.svm import NuSVC # Building the NuSVC model object with parameters found clf = NuSVC(cache_size=200, class_weight=None, coef0=0.0, decision_function_shape=None, degree=3, gamma='auto', kernel='linear', max_iter=-1, nu=0.3, probability=True, random_state=3, shrinking=True, tol=3.2419092644286417e-05, verbose=False) # Obtaining task used earlier t = openml.tasks.get_task(59) t # Running the model on the task # Internally, the model will be made into # an OpenML flow and we can choose to retrieve it r, f = openml.runs.run_model_on_task(model=clf, task=t, upload_flow=False, return_flow=True) f # To obtain the score (without uploading) ## r.publish() can be used to upload these results ## need to sign-in to https://www.openml.org/ score = [] evaluations = r.fold_evaluations['predictive_accuracy'][0] for key in evaluations: score.append(evaluations[key]) print(np.mean(score)) # Lo and behold! We hit the magic number. I personally would have never tried out NuSVC and would have stuck around tweaking hyperparameters of the Random Forest. This is a new discovery of sorts for sure. I wonder though if anybody has tried XGBoost on Iris? # # In any case, we can now upload the results of this run to OpenML using: r.publish() # One would need to sign-in to https://www.openml.org/ and generate their respective *apikey*. The results would then be available for everyone to view and who knows, you can have your name against the *best-ever* performance measured on the Iris dataset! # --- # This post was in no ways intended to be a be-all-end-all guide to OpenML. The primary goal was to help form an acquaintance with the OpenML terminologies, introduce the API, establish connections with the general ML practices, and give a sneak-peek into the potential benefits of working together as a *community*. For a better understanding of OpenML, please explore the [documentation](https://openml.github.io/openml-python/develop/usage.html#usage). If one desires to continue from the examples given in this post and explore further, kindly refer to the [API](https://openml.github.io/openml-python/develop/api.html). # # OpenML-Python is an open-source project and contributions from everyone in the form of Issues and Pull Requests are most welcome. Contribution to the OpenML community is in fact not limited to code contribution. Every single user can make the community richer by sharing data, experiments, results, using OpenML. # # As ML practitioners, we may be dependent on tools for our tasks. However, as a collective, we can juice out its potential to a larger extent. Let us together, make ML more transparent, more democratic! # --- # Special thanks to Heidi, Bilge, Sahithya, <NAME> for the ideas, feedback, and support. # --- # Related readings: # * [To get started with OpenML-Python](https://openml.github.io/openml-python/develop/) # * [OpenML-Python Github](https://github.com/openml/openml-python) # * [The OpenML website](https://www.openml.org/) # * [Miscellaneous reading on OpenML](https://openml.github.io/blog/) # * [To get in touch!](https://www.openml.org/contact)
_notebooks/2019-10-26-OpenML-Machine-Learning-as-a-community.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment 4 # # Before working on this assignment please read these instructions fully. In the submission area, you will notice that you can click the link to **Preview the Grading** for each step of the assignment. This is the criteria that will be used for peer grading. Please familiarize yourself with the criteria before beginning the assignment. # # This assignment requires that you to find **at least** two datasets on the web which are related, and that you visualize these datasets to answer a question with the broad topic of **sports or athletics** (see below) for the region of **Brighton, Massachusetts, United States**, or **United States** more broadly. # # You can merge these datasets with data from different regions if you like! For instance, you might want to compare **Brighton, Massachusetts, United States** to Ann Arbor, USA. In that case at least one source file must be about **Brighton, Massachusetts, United States**. # # You are welcome to choose datasets at your discretion, but keep in mind **they will be shared with your peers**, so choose appropriate datasets. Sensitive, confidential, illicit, and proprietary materials are not good choices for datasets for this assignment. You are welcome to upload datasets of your own as well, and link to them using a third party repository such as github, bitbucket, pastebin, etc. Please be aware of the Coursera terms of service with respect to intellectual property. # # Also, you are welcome to preserve data in its original language, but for the purposes of grading you should provide english translations. You are welcome to provide multiple visuals in different languages if you would like! # # As this assignment is for the whole course, you must incorporate principles discussed in the first week, such as having as high data-ink ratio (Tufte) and aligning with Cairo’s principles of truth, beauty, function, and insight. # # Here are the assignment instructions: # # * State the region and the domain category that your data sets are about (e.g., **Brighton, Massachusetts, United States** and **sports or athletics**). # * You must state a question about the domain category and region that you identified as being interesting. # * You must provide at least two links to available datasets. These could be links to files such as CSV or Excel files, or links to websites which might have data in tabular form, such as Wikipedia pages. # * You must upload an image which addresses the research question you stated. In addition to addressing the question, this visual should follow Cairo's principles of truthfulness, functionality, beauty, and insightfulness. # * You must contribute a short (1-2 paragraph) written justification of how your visualization addresses your stated research question. # # What do we mean by **sports or athletics**? For this category we are interested in sporting events or athletics broadly, please feel free to creatively interpret the category when building your research question! # # ## Tips # * Wikipedia is an excellent source of data, and I strongly encourage you to explore it for new data sources. # * Many governments run open data initiatives at the city, region, and country levels, and these are wonderful resources for localized data sources. # * Several international agencies, such as the [United Nations](http://data.un.org/), the [World Bank](http://data.worldbank.org/), the [Global Open Data Index](http://index.okfn.org/place/) are other great places to look for data. # * This assignment requires you to convert and clean datafiles. Check out the discussion forums for tips on how to do this from various sources, and share your successes with your fellow students! # # ## Example # Looking for an example? Here's what our course assistant put together for the **Ann Arbor, MI, USA** area using **sports and athletics** as the topic. [Example Solution File](./readonly/Assignment4_example.pdf) # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from matplotlib import cm # %matplotlib inline celtics=pd.read_excel("boston_celtics.xlsx") celtics['Year']=celtics['Season'].apply(lambda x: x[:4]) celtics=celtics.set_index('Year') redsox_st=pd.read_excel("boston_redsox_stats.xlsx") redsox_st['wp']=redsox_st['W']/(redsox_st['W']+redsox_st['L']) redsox_att=pd.read_excel("boston_redsox_attendance.xlsx") #redsox_st.plot('wp','Finish', kind='scatter') redsox=pd.merge(redsox_st,redsox_att,left_on='Year',right_on='Year').drop(0).set_index('Year') new=redsox.groupby(['Finish_x']).mean() sns.set() plt.figure(figsize=(15,15)) plt.subplot(221) sns.set_style('white') ax=sns.kdeplot(redsox['Finish_y'],redsox['wp'],shade=True,cmap='Reds') plt.xlim(1,9) plt.ylabel('Win Percentage') plt.xlabel('') plt.annotate('corr=-0.89',(5,0.7)) plt.subplot(222) sns.set_style('white') ax=sns.kdeplot(redsox['Finish_y'],redsox['R/G'],shade=True,cmap='Blues') plt.xlim(1,9) plt.ylabel('Runs Scored per Game') plt.xlabel('') plt.annotate('corr=-0.54',(5,6.7)) plt.subplot(223) sns.set_style('white') ax=sns.kdeplot(redsox['Finish_y'],redsox['E'],shade=True,cmap='Greens') plt.xlim(1,9) plt.ylabel('Errors Committed') plt.xlabel('Finish Position') plt.annotate('corr=0.3',(5,350)) plt.subplot(224) sns.set_style('white') ax=sns.kdeplot(redsox['Finish_y'],redsox['Attend/G'],shade=True,cmap='Greys') plt.xlim(1,9) plt.ylabel('Tickets Sold Per Home Game') plt.xlabel('Finish Position') plt.annotate('corr=-0.48',(5,45000)) ax.figure.suptitle('Relationship between finish position and various match statistics \n across years (1901-2019) for Boston Red Sox team') plt.show() #sns.jointplot(redsox['Finish_y'],redsox['Attend/G'],kind='kde',alpha=0.5,space=0.3); #sns.jointplot(new['Finish_y'],new['E'],alpha=0.5,space=0.3); #sns.swarmplot(new['Finish_y'],new['wp']); #sns.violinplot(redsox['Finish_y'],redsox['wp']); #sns.swarmplot(redsox['Finish_y'],redsox['Attend/G']); #new.plot('Finish_y','wp') #new.plot('Finish_y','R/G') #new.plot('Finish_y','CS') #new.plot('Finish_y','BatAge') #new.plot('Finish_y','Fld%') # -
2_Boston Red Sox.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Problem Statement # # I need a command-line based python 3 script to Generate workout routines for me on my Ubuntu server. # # I need to be able to generate workout based on Categories or muscles (or at random) from the Categories.json and muscles.json files at this page: https://github.com/wger-project/wger/tree/master/wger/exercises/fixtures # # I need it to use the exercises.json file in that same page to generate the list. I need you to only reference exercises where "Language" equals 2. # # I need to be able to specify the number of different kind of exercises (1 - infinity) # # So for example, I run the script and it asks me which muscle groups or categories (and lists them) I want to hit. I list them (so like 1 - abs, 2 - calves, etc. And I enter 1 and 2). And then it asks how many exercises, and I put in a number. And then it generates a random workout for the day. # # In addition, I need another json of bodyweight exercises. They are on this website: https://www.stackhealthy.com/complete-list-of-crossfit-exercises/ and are under the "Bodyweight" section. (The list starts with 'Air Squats, Assisted Pull-up, Back extension, ...) # # I need to be able in the script mentioned above to also be able to specify the number of bodyweight exercises (so after saying I want to do something like abs and calves, then it will ask how many bodyweight exercises I want to do and I can input 0-whatever). Then it generates the workout of the day. # import json cat=json.loads(open("fixtures/categories.json").read()) mus=json.loads(open("fixtures/muscles.json").read()) exr=json.loads(open("fixtures/exercises.json").read()) bweight=json.loads(open("fixtures/bodyweight.json").read()) # made from https://www.stackhealthy.com/complete-list-of-crossfit-exercises/ # ### Categories Data len(cat) cat[0].keys() cat[0] for i in cat: print(i["pk"],i["fields"]["name"]) # ## Muslces Data len(mus) mus[0].keys() mus[0] for i in mus: print(i['pk'],i['fields']['name']) # ## Exercise Data exr[0].keys() exr[0] # Recommended to find language 2 in exercise dataset but sometime language key is missing error_index=[] k=0 for i in exr: try: v = i['fields']['language'] except: error_index.append(k) k+=1 len(error_index) exr[360]
data_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Copyright 2018 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # - # # Recognize and Count Objects in a Video # # An object detection classifier can be used to identify and locate objects in a static image. When using video, you can use the same approach to static detection individual frames. In this Jupyter Notebook, we'll use the [IBM PowerAI Vision](https://www.ibm.com/us-en/marketplace/ibm-powerai-vision) for object detection and [OpenCV Python API](https://opencv.org/) to process the video. # # Before running this notebook, you will need to train and deploy an object detection model. PowerAI Vision has auto-labeling to enhance your dataset for accuracy when using video input. After you train and deploy your model, set the `POWER_AI_VISION_API_URL` constant below to use your model for inference. # # Extracting frames and locating objects is easy with OpenCV and PowerAI Vision. The challenge is how to keep track of objects if you want to count them. As an object moves, you will need to be able to determine whether or not you have already counted the object. In this notebook, we'll use the OpenCV Tracking API to follow cars down the road while we run PowerAI Vision object detection on a sample of the frames. With tracking, we'll be able to avoid double counting without requiring a lot of code. # # | First Detected... | Followed Down the Road | # | :---: | :---: | # | ![detected](https://raw.githubusercontent.com/IBM/powerai-counting-cars/master/doc/source/images/output-frame_00011.jpg) | ![tracked](https://raw.githubusercontent.com/IBM/powerai-counting-cars/master/doc/source/images/output-frame_00128.jpg) | # # ## First setup some parameters # # ### Required setup! # # Your PowerAI Vision API URL for the model that you trained and deployed will need to be set here. # Set this URL using your PowerAI Vision host + /AIVision/api + your deployed web API URL. POWER_AI_VISION_API_URL = "https://ny1.ptopenlab.com/AIVision/api/dlapis/your-guid-here" # ### Optional configuration # # Here you can customize some settings to tune your results. # # > NOTE: The notebook uses sampling and cached results to speed things up for iterative development. If you change the video, you will need to run with `CLEAN = True` to delete and regenerate your cached frames and inference results! # + CLEAN = False # USE WITH CARE! Wipe out saved files when this is true (else reuse for speed) input_video_url = "https://ibm.box.com/shared/static/4r4tf2robabiqu36ocmykrzvojuse5k9.mp4" # The input video START_LINE = 0 # If start line is > 0, cars won't be added until below the line (try 200) FRAMES_DIR = "frames" # Output dir to hold/cache the original frames OUTPUT_DIR = "output" # Output dir to hold the annotated frames SAMPLING = 10 # Classify every n frames (use tracking in between) CONFIDENCE = 0.80 # Confidence threshold to filter iffy objects # OpenCV colors are (B, G, R) tuples -- RGB in reverse WHITE = (255, 255, 255) YELLOW = (66, 244, 238) GREEN = (80, 220, 60) LIGHT_CYAN = (255, 255, 224) DARK_BLUE = (139, 0, 0) GRAY = (128, 128, 128) # - # ## Install Python Requirements # !pip install opencv-python==3.4.0.12 # !pip install opencv-contrib-python==3.4.0.12 # !pip install requests==2.18.4 # !pip install pandas==0.22.0 # !pip install urllib3==1.22 # + import json import glob import math import os import shutil import cv2 from IPython.display import clear_output, Image, display import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) print("Warning: Certificates not verified!") # %matplotlib notebook # - # ## Download the video # This will download a small example video. # # !wget {input_video_url} input_video = input_video_url.split('/')[-1] # ## Create or clean the directories # Caching the frames and output directories allows the processing to continue where it left off. This is particularly useful when using a shared system with deployment time limits. This also allows you to quickly `Run all` when tweaking Python code that does not affect the inference. # # If you change the input video or just want a fresh start, you should `CLEAN` or change the directory names. # + if CLEAN: if os.path.isdir(FRAMES_DIR): shutil.rmtree(FRAMES_DIR) if os.path.isdir(OUTPUT_DIR): shutil.rmtree(OUTPUT_DIR) if not os.path.isdir(FRAMES_DIR): os.mkdir(FRAMES_DIR) if not os.path.isdir(OUTPUT_DIR): os.mkdir(OUTPUT_DIR) # - # ## Parse and explode the video file into JPEGs # Each frame is saved as an individual JPEG file for later use. # + if os.path.isfile(input_video): video_capture = cv2.VideoCapture(input_video) else: raise Exception("File %s doesn't exist!" % input_video) total_frames = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT)) print("Frame count estimate is %d" % total_frames) num = 0 while video_capture.get(cv2.CAP_PROP_POS_FRAMES) < video_capture.get(cv2.CAP_PROP_FRAME_COUNT): success, image = video_capture.read() if success: num = int(video_capture.get(cv2.CAP_PROP_POS_FRAMES)) print("Writing frame {num} of {total_frames}".format( num=num, total_frames=total_frames), end="\r") cv2.imwrite('{frames_dir}/frame_{num:05d}.jpg'.format( frames_dir=FRAMES_DIR, num=num), image) else: # TODO: If this happens, we need to add retry code raise Exception('Error writing frame_{num:05d}.jpg'.format( num=int(video_capture.get(cv2.CAP_PROP_POS_FRAMES)))) print("\nWrote {num} frames".format(num=num)) FRAME_FPS = int(video_capture.get(cv2.CAP_PROP_FPS)) FRAME_WIDTH = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)) FRAME_HEIGHT = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) ROI_YMAX = int(round(FRAME_HEIGHT * 0.75)) # Bottom quarter = finish line print("Frame Dimensions: %sx%s" % (FRAME_WIDTH, FRAME_HEIGHT)) # - # ## PowerAI Vision inference wrapper # Define a helper/wrapper to call PowerAI Vision and return the inference result. # + s = requests.Session() def detect_objects(filename): with open(filename, 'rb') as f: # WARNING! verify=False is here to allow an untrusted cert! r = s.post(POWER_AI_VISION_API_URL, files={'files': (filename, f)}, verify=False) return r.status_code, json.loads(r.text) # - # ## Test the API on a single frame # Let's look at the result of a single inference operation from the PowerAI Vision Object Detection API. We see a standard HTTP return code, and a JSON response which includes the image URL, and tuples that indicate the confidence and bounding-box coordinates of the objects that we classified. # + rc, jsonresp = detect_objects('frames/frame_00100.jpg') print("rc = %d" % rc) print("jsonresp: %s" % jsonresp) if 'classified' in jsonresp: print("Got back %d objects" % len(jsonresp['classified'])) print(json.dumps(jsonresp, indent=2)) # - # ## Get object detection results for sampled frames # Since we've stored all video frames on disk (for easy reference), we can iterate over those files # and make queries as appropriate to PowerAI Vision's API. We'll store the results in a # `tracking_results` dictionary, organized by file name. Since we are tracking objects from frame # to frame, we can use sampling to decide how often to check for new objects. # # We're also caching the results so that you can change later code and run the notebook over # without running the same inference over again. # + # Serialize requests, storing them in a "tracking_results" dict try: with open('frames/frame-data-newmodel.json') as existing_results: tracking_results = json.load(existing_results) except Exception: # Any fail to read existing results means we start over tracking_results = {} print("Sampling every %sth frame" % SAMPLING) i = 0 cache_used = 0 sampled = 0 for filename in sorted(glob.glob('frames/frame_*.jpg')): i += 1 if not i % SAMPLING == 0: # Sample every Nth continue existing_result = tracking_results.get(filename) if existing_result and existing_result['result'] == 'success': cache_used += 1 else: rc, results = detect_objects(filename) if rc != 200 or results['result'] != 'success': print("ERROR rc=%d for %s" % (rc, filename)) print("ERROR result=%s" % results) else: sampled += 1 # Save frequently to cache partial results tracking_results[filename] = results with open('frames/frame-data-newmodel.json', 'w') as fp: json.dump(tracking_results, fp) print("Processed file {num} of {total_frames} (used cache {cache_used} times)".format( num=i, total_frames=total_frames, cache_used=cache_used), end="\r") # Finally, write all our results with open('frames/frame-data-newmodel.json', 'w') as fp: json.dump(tracking_results, fp) print("\nDone") # - # ## Define helper functions for tracking and drawing labels # Refer to the [OpenCV docs.](https://docs.opencv.org/3.4.1/) # + def label_object(color, textcolor, fontface, image, car, textsize, thickness, xmax, xmid, xmin, ymax, ymid, ymin): cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, thickness) pos = (xmid - textsize[0]//2, ymid + textsize[1]//2) cv2.putText(image, car, pos, fontface, 1, textcolor, thickness, cv2.LINE_AA) def update_trackers(image, counters): left_lane = counters['left_lane'] right_lane = counters['right_lane'] boxes = [] color = (80, 220, 60) fontface = cv2.FONT_HERSHEY_SIMPLEX fontscale = 1 thickness = 1 for n, pair in enumerate(trackers): tracker, car = pair textsize, _baseline = cv2.getTextSize( car, fontface, fontscale, thickness) success, bbox = tracker.update(image) if not success: counters['lost_trackers'] += 1 del trackers[n] continue boxes.append(bbox) # Return updated box list xmin = int(bbox[0]) ymin = int(bbox[1]) xmax = int(bbox[0] + bbox[2]) ymax = int(bbox[1] + bbox[3]) xmid = int(round((xmin+xmax)/2)) ymid = int(round((ymin+ymax)/2)) if ymid >= ROI_YMAX: label_object(WHITE, WHITE, fontface, image, car, textsize, 1, xmax, xmid, xmin, ymax, ymid, ymin) # Count left-lane, right-lane as cars ymid crosses finish line if xmid < 630: left_lane += 1 else: right_lane += 1 # Stop tracking cars when they hit finish line del trackers[n] else: # Rectangle and number on the cars we are tracking label_object(color, YELLOW, fontface, image, car, textsize, 4, xmax, xmid, xmin, ymax, ymid, ymin) # Add finish line overlay/line overlay = image.copy() # Shade region of interest (ROI). We're really just using the top line. cv2.rectangle(overlay, (0, ROI_YMAX), (FRAME_WIDTH, FRAME_HEIGHT), DARK_BLUE, cv2.FILLED) cv2.addWeighted(overlay, 0.6, image, 0.4, 0, image) # Draw start line, if > 0 if START_LINE > 0: cv2.line(image, (0, START_LINE), (FRAME_WIDTH, START_LINE), GRAY, 4, cv2.LINE_AA) # Draw finish line with lane hash marks cv2.line(image, (0, ROI_YMAX), (FRAME_WIDTH, ROI_YMAX), LIGHT_CYAN, 4, cv2.LINE_AA) cv2.line(image, (350, ROI_YMAX - 20), (350, ROI_YMAX + 20), LIGHT_CYAN, 4, cv2.LINE_AA) cv2.line(image, (630, ROI_YMAX - 20), (630, ROI_YMAX + 20), LIGHT_CYAN, 4, cv2.LINE_AA) cv2.line(image, (950, ROI_YMAX - 20), (950, ROI_YMAX + 20), LIGHT_CYAN, 4, cv2.LINE_AA) # Add lane counter cv2.putText(image, "Lane counter:", (30, ROI_YMAX + 80), fontface, 1.5, LIGHT_CYAN, 4, cv2.LINE_AA) cv2.putText(image, str(left_lane), (480, ROI_YMAX + 80), fontface, 1.5, LIGHT_CYAN, 4, cv2.LINE_AA) cv2.putText(image, str(right_lane), (800, ROI_YMAX + 80), fontface, 1.5, LIGHT_CYAN, 4, cv2.LINE_AA) seconds = counters['frames'] / FRAME_FPS cv2.putText(image, "Cars/second:", (35, ROI_YMAX + 110), fontface, 0.5, LIGHT_CYAN, 1, cv2.LINE_AA) cv2.putText(image, '{0:.2f}'.format(left_lane / seconds), (480, ROI_YMAX + 110), fontface, 0.5, LIGHT_CYAN, 1, cv2.LINE_AA) cv2.putText(image, '{0:.2f}'.format(right_lane / seconds), (800, ROI_YMAX + 110), fontface, 0.5, LIGHT_CYAN, 1, cv2.LINE_AA) counters['left_lane'] = left_lane counters['right_lane'] = right_lane return boxes, counters # - def not_tracked(objects, boxes): if not objects: return [] # No new classified objects to search for if not boxes: return objects # No existing boxes, return all objects new_objects = [] for obj in objects: ymin = obj.get("ymin", "") ymax = obj.get("ymax", "") ymid = int(round((ymin+ymax)/2)) xmin = obj.get("xmin", "") xmax = obj.get("xmax", "") xmid = int(round((xmin+xmax)/2)) box_range = ((xmax - xmin) + (ymax - ymin)) / 2 for bbox in boxes: bxmin = int(bbox[0]) bymin = int(bbox[1]) bxmax = int(bbox[0] + bbox[2]) bymax = int(bbox[1] + bbox[3]) bxmid = int((bxmin + bxmax) / 2) bymid = int((bymin + bymax) / 2) if math.sqrt((xmid - bxmid)**2 + (ymid - bymid)**2) < box_range: # found existing, so break (do not add to new_objects) break else: new_objects.append(obj) return new_objects # + def in_range(obj): ymin = obj['ymin'] ymax = obj['ymax'] if ymin < START_LINE or ymax > ROI_YMAX: # Don't add new trackers before start or after finish. # Start line can help avoid overlaps and tracker loss. # Finish line protection avoids counting the car twice. return False return True def add_new_object(obj, image, cars): car = str(cars) xmin = obj['xmin'] xmax = obj['xmax'] ymin = obj['ymin'] ymax = obj['ymax'] xmid = int(round((xmin+xmax)/2)) ymid = int(round((ymin+ymax)/2)) fontface = cv2.FONT_HERSHEY_SIMPLEX fontscale = 1 thickness = 1 textsize, _baseline = cv2.getTextSize( car, fontface, fontscale, thickness) # init tracker tracker = cv2.TrackerKCF_create() # Note: Try comparing KCF with MIL success = tracker.init(image, (xmin, ymin, xmax-xmin, ymax-ymin)) if success: trackers.append((tracker, car)) label_object(GREEN, YELLOW, fontface, image, car, textsize, 4, xmax, xmid, xmin, ymax, ymid, ymin) # - # ## Inference, tracking, and annotation # Loop through the saved frames and: # 1. Update the trackers to follow already detected objects from frame to frame. # 1. Look for new objects if we ran inference on this frame. # * Check for overlap with tracked objects. # * If no overlap, assign a sequence number and start tracking. # 1. Write an annotated image with tracked objects highlighted and numbered. # + cars = 0 trackers = [] counters = { 'left_lane': 0, 'right_lane': 0, 'lost_trackers': 0, 'frames': 0, } with open('frames/frame-data-newmodel.json') as existing_results: tracking_results = json.load(existing_results) for filename in sorted(glob.glob('frames/frame_*.jpg')): counters['frames'] += 1 img = cv2.imread(filename) boxes, counters = update_trackers(img, counters) if filename in tracking_results and 'classified' in tracking_results[filename]: jsonresp = tracking_results[filename] for obj in not_tracked(jsonresp['classified'], boxes): if in_range(obj): cars += 1 add_new_object(obj, img, cars) # Label and start tracking # Draw the running total of cars in the image in the upper-left corner cv2.putText(img, 'Cars detected: ' + str(cars), (30, 60), cv2.FONT_HERSHEY_SIMPLEX, 1.5, DARK_BLUE, 4, cv2.LINE_AA) # Add note with count of trackers lost cv2.putText(img, 'Cars lost: ' + str(counters['lost_trackers']), (35, 85), cv2.FONT_HERSHEY_SIMPLEX, 0.5, DARK_BLUE, 1, cv2.LINE_AA) cv2.imwrite("output/output-" + filename.split('/')[1], img) print("Processed file {num} of {total_frames}".format( num=counters['frames'], total_frames=total_frames), end="\r") print("\nDone") # - # ## Play the annotated frames in the notebook # # This code will play the annotated frames in a loop to demonstrate the new video. # Running this in the notebook is usually slow. Shrinking the size helps some. # Refer to the following section to build a real, full speed video. # + for filename in sorted(glob.glob(os.path.join(os.path.abspath(OUTPUT_DIR), 'output-frame_*.jpg'))): frame = cv2.imread(filename) clear_output(wait=True) rows, columns, _channels = frame.shape frame = cv2.resize(frame, (int(columns/2), int(rows/2))) # shrink it _ret, jpg = cv2.imencode('.jpg', frame) display(Image(data=jpg)) print("\nDone") # - # ## Create a video from the annotated frames # # This command requires `ffmpeg`. It will combine the annotated # frames to build an MP4 video which you can play at full speed # (the notebook playback above was most likely slow). # # Uncomment the command to try running it from this notebook, or # # copy the output files to a system with `ffmpeg` and run the # command there. # # !ffmpeg -y -r 60 -f image2 -i output/output-frame_%05d.jpg -vcodec libx264 -crf 25 -pix_fmt yuvj420p annotated_video.mp4
data/examples/example_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import time import json import nltk import string import pickle import datetime import pickle import pandas as pd from os import listdir import re from sklearn.metrics import f1_score, recall_score, precision_score import numpy as np import pandas as pd import xml.etree.ElementTree as xml # + from moviepy.editor import * from tqdm.notebook import tqdm from collections import Counter from nltk.stem import PorterStemmer from nltk.stem import LancasterStemmer from nltk.tokenize import sent_tokenize, word_tokenize from sklearn.feature_extraction.text import TfidfVectorizer, TfidfTransformer strptime = datetime.datetime.strptime # - mentions_path = 'coref/summaries_mentions_first_story.pickle' mentions = pickle.load(open(mentions_path, 'rb')) print('support', mentions['s01e07']['support'][:2], '..') print('marked', mentions['s01e07']['marked'][:2], '..') print('characters', mentions['s01e07']['characters'][:2], '..') # + path_screenplays_scenes='coref/csi-corpus/screenplay_summarization/scene_level_n_aspects' all_chars = [] ep_characters = {} eps=[] for ep in listdir(path_screenplays_scenes): #print(ep) annotated_scenes=pd.read_csv(path_screenplays_scenes+'/'+ep) eps.append(ep.split('.csv')[0]) # + print(eps) print(eps[-7]) # - df=pd.DataFrame() df['content'] = print('yo') ps = PorterStemmer() ls = LancasterStemmer() do_stem = True ps.stem('kills') def compute_similarity(content, summary_line, method): if method[0] == 'tfidf': weights = method[1] return sum(weights[w] if w in weights else 0 for w in set(content.split()) if w in summary_line.split()) if method[0] == 'tfidf_log': weights = method[1] dn = np.log(len(content.split() + summary_line.split()) + 1) if len(content.strip().split()) > 0 else 1 return sum(weights[w] if w in weights else 0 for w in set(content.split()) if w in summary_line.split())/dn if method[0] == 'tfidf_log_2': weights = method[1] n = sum(1 if w in weights else 0 for w in set(content.split()) if w in summary_line.split()) dn = np.log(len(content.split() + summary_line.split()) - n + 1) dn = dn if dn > 0 else 1 return sum(weights[w] if w in weights else 0 for w in set(content.split()) if w in summary_line.split())/dn if method[0] == 'tfidf_log_3': weights = method[1] n = sum(1 if w in weights else 0 for w in set(content.split()) if w in summary_line.split()) dn = np.log10(len(content.split() + summary_line.split()) - n + 1) dn = dn if dn > 0 else 1 return sum(weights[w] if w in weights else 0 for w in set(content.split()) if w in summary_line.split())/dn if method[0] == 'tfidf_sqrt': weights = method[1] n = sum(1 if w in weights else 0 for w in set(content.split()) if w in summary_line.split()) dn = np.sqrt(len(content.split() + summary_line.split()) - n + 1) dn = dn if dn > 0 else 1 return sum(weights[w] if w in weights else 0 for w in set(content.split()) if w in summary_line.split())/dn elif method[0] == 'count': return len([w for w in set(content.split()) if w in summary_line.split()]) elif method[0] == 'count_2': dn = np.log(len(content.split() + summary_line.split()) + 1) dn = dn if dn > 0 else 1 return len([w for w in set(content.split()) if w in summary_line.split()]) / dn # + f1_scores=[] #eps=['s02e04'] #eps=['s03e12', 's05e13', 's01e20', 's05e10', 's01e08', 's04e14', 's05e22', 's03e19', 's04e23', 's05e21', 's02e06', 's04e10', 's05e08', 's03e05', 's04e22', 's05e06', 's01e13', 's02e01', 's05e17', 's01e07', 's04e21', 's05e12', 's02e15', 's04e12', 's03e03', 's04e05', 's02e10', 's02e09', 's05e05', 's05e03', 's04e15', 's01e23', 's03e21', 's04e06', 's03e08', 's01e19'] for ep in eps: method_used=('tfidf',tfidf_weights) annotated_scenes=pd.read_csv(path_screenplays_scenes+'/'+ep+'.csv') nb_positive=len(annotated_scenes[annotated_scenes['in_summary'] == 1]) vectorizer = TfidfVectorizer(min_df=2, stop_words='english') corpus = [' '.join(ps.stem(w) for w in word_tokenize(s) if w.lower() not in vectorizer.get_stop_words()) if do_stem else s for s in annotated_scenes['scene_text'].values] corpus = [s.translate(str.maketrans('', '', string.punctuation)).replace(' ', ' ').lower() for s in corpus] #print(len(corpus)) #print(len(summaries)) vectorizer.fit(corpus) corpus_tf_idf = vectorizer.transform(corpus) word_count = Counter([w for s in corpus for w in s.split()]) ', '.join(sorted(vectorizer.get_stop_words())) tfidf_weights = dict(zip(vectorizer.get_feature_names(), vectorizer.idf_)) summaries_raw = [s for s in mentions[ep]['support']] summaries = [' '.join(ps.stem(w) for w in word_tokenize(s) if w.lower() not in vectorizer.get_stop_words()) if do_stem else s for s in summaries_raw] summaries = [s.translate(str.maketrans('', '', string.punctuation)).replace(' ', ' ') for s in summaries] similarity_matrix = np.zeros((len(corpus), len(summaries))) for i, content in tqdm(enumerate(corpus), total=len(corpus)): for j, s in enumerate(summaries): #print(i) #print(j) similarity_matrix[i, j] = compute_similarity(content, s, method=method_used) unrolled_sm = [(similarity_matrix[i, j], i, j) for i in range(len(corpus)) for j in range(len(summaries))] best_matches = sorted(unrolled_sm, key=lambda x: -x[0]) score = {} #print(j, summaries_raw[j]) for (v, scene, syn) in best_matches: if scene not in score : score[scene] = [] score[scene].append(syn) for s in score: #print(max(score[s])) #print(sum(score[s])) #score[s] = sum(score[s]) score[s] = max(score[s]) reses = sorted(score.items(), key=lambda x: -x[1]) scenes=[i[0] for i in reses[0:nb_positive]] #print(reses[0]) #print(syn, summaries_raw[syn]) annotated_scenes['prediction'] = annotated_scenes['scene_id'].apply(lambda x: 1 if x in scenes else 0) #annotated_scenes['synopses'] = annotated_scenes['prediction'].apply(lambda x: summaries_raw[syn] if x==1 else 0) annotated_scenes['aspect?'] = annotated_scenes['aspects'].apply(lambda x: 0 if x == 'None' else 1) print(method_used[0]) annotated_scenes.to_csv(ep+'_'+method_used[0]+'_similarity.csv') #print('Episode') #print(ep) print(f1_score(annotated_scenes.in_summary, annotated_scenes.prediction)) f1_scores.append(f1_score(annotated_scenes.in_summary, annotated_scenes.prediction)) # + print(len(f1_scores)) print(np.mean(f1_scores)) print(f1_scores[15]) method_used=('tfidf',tfidf_weights) ep=eps[15] print(ep) #print(df) print(df['aspect?'][df['aspect?'] == 1].sum() ) print(df['in_summary'][df['in_summary'] == 1].sum() ) df_asp=pd.DataFrame(columns=['ep', 'asp_sum', 'asp_nosum', 'noasp_sum', 'nb_in_sum','nb_aspect']) for ep in eps: df=pd.read_csv(ep+'_'+method_used[0]+'_similarity.csv') in_summary=df.loc[df['in_summary'] == 1] asp=df.loc[df['aspect?'] == 1] asp_sum=df.loc[(df['aspect?'] == 1) & (df['in_summary'] == 1)] asp_nosum=df.loc[(df['aspect?'] == 1) & (df['in_summary'] == 0)] noasp_sum=df.loc[(df['aspect?'] == 0) & (df['in_summary'] == 1)] episode= pd.DataFrame(data= {'ep': [ep], 'asp_sum': [len(asp_sum)], 'asp_nosum': [len(asp_nosum)], 'noasp_sum':[len(noasp_sum)],'nb_in_sum':[len(in_summary)],'nb_aspect':[len(asp)]}) #print(episode) df_asp=df_asp.append(episode,ignore_index=True) df_asp.loc['Total']= df_asp.sum() df_asp['ep']['Total']='' print(df_asp) # + aspects =['Crime scene', 'Victim', 'Death cause', 'Perpetrator', 'Evidence', 'Motive'] #aspects =['Crime scene'] for aspect in aspects: df_aspect=pd.DataFrame(columns=['scene_id', 'scene_text', 'in_summary', 'aspects', 'prediction','ep','aspect_mentionned']) for ep in eps: df=pd.read_csv(ep+'_'+method_used[0]+'_similarity.csv') match=df[['scene_id', 'scene_text', 'in_summary', 'aspects', 'prediction']][df.isin([aspect]).aspects] #print(match) match['ep']=ep #match['aspect_mentionned']=match.scene_text.apply(lambda lst: 1 if x for x in lst in aspect.lower() else 0 ) #match['aspect_mentionned']=match.scene_text.apply(lambda x: 1 if x.contains(aspect.lower()) else 0 ) #any(aspect.lower() in x for x in lst) #match['aspect_mentionned']=match[['aspects']][match.scene_text.str.contains('Camera')]='True' match['aspect_mentionned']= np.where(match.scene_text.str.contains(aspect.lower()), 1, 0) df_aspect=df_aspect.append(match,ignore_index=True) #df_aspect.to_csv(aspect+'.csv') #print(df_aspect.ep[4]) #print(df_aspect.scene_text[4]) #print(df_aspect.in_summary[4]) print(aspect) print(df_aspect['aspect_mentionned'].value_counts()) #print(df_aspect['scene_text']) #print('Recall',recall_score(df_aspect.in_summary.astype(int), df_aspect.prediction.astype(int))) #print('Precision',precision_score(df_aspect.in_summary.astype(int), df_aspect.prediction.astype(int))) #print('-----------------------------------------------------------') # - for ep in eps: df=pd.read_csv(ep+'_'+method_used[0]+'_similarity.csv') print(df) word_count corpus_tf_idf.shape print(len(vectorizer.get_stop_words())) tfidf_weights['camera'] tfidf_weights['man'] tfidf_weights['hallway'] # + #summaries_raw = [' '.join(w[1:] if w[0] in ['@', '#', '~'] else w for w in s.split(' ')) for s in summaries_raw] # - compute_similarity('No more excus . a woman is talk to a man and he is smile TV : ryan stacey', 'kat persuad stacey and jean to come to R & R with kat and kim , leav charli and alfi to look after lili', method=('tfidf', tfidf_weights)), compute_similarity('No more excus . a woman is talk to a man and he is smile TV : ryan stacey', 'kat persuad stacey and jean to come to R & R with kat and kim , leav charli and alfi to look after lili', method=('count',)) tfidf_weights['stacey'] print(len(summaries)) print(len(corpus)) best_matches_count[0] for v, i, j in best_matches_count[:40]: print(i, df['content'].iloc[i]) print("======================") #print(summaries_raw[j]) #print("======================") #print(v) #print("======================") #print(list(w for w in set(summaries[j].split()) if w in corpus[i].split())) # + # similarity_matrix_tfidf = similarity_matrix # - # %%time similarity_matrix_tfidf = np.zeros((len(df), len(summaries))) for i, content in tqdm(enumerate(corpus), total=len(corpus)): for j, s in enumerate(summaries): similarity_matrix_tfidf[i, j] = compute_similarity(content, s, method=('tfidf_log_2', tfidf_weights)) np.max(similarity_matrix_tfidf) similarity_matrix_tfidf.shape unrolled_sm = [(similarity_matrix_tfidf[i, j], i, j) for i in range(len(corpus)) for j in range(len(summaries))] best_matches = sorted(unrolled_sm, key=lambda x: -x[0]) best_matches[:10] for v, i, j in best_matches[:2]: print(i, df['content'].iloc[i]) print("======================") #print(j, summaries_raw[j]) print("======================") print() # # Making the cut best_matches[:14] # + annotated_scenes # + f1_score(annotated_scenes.in_summary, annotated_scenes.prediction) # + votes_selection = {c:[] for c in ['Janine', 'Ryan', 'Stacey']} votes_durations = {c:[] for c in ['Janine', 'Ryan', 'Stacey']} for char in votes_selection: print(char.upper() + ' =====>') counter = 1 for s in reses: if char in df['content'].iloc[s[0]]: print(df['content'].iloc[s[0]]) votes_selection[char].append(df['sid'].iloc[s[0]]) votes_durations[char].append(df['end'].iloc[s[0]] - df['begin'].iloc[s[0]]) if len(votes_selection[char]) == 20: break # + top_selection = {c:[] for c in ['Janine', 'Ryan', 'Stacey']} top_durations = {c:[] for c in ['Janine', 'Ryan', 'Stacey']} top_times = {c:[] for c in ['Janine', 'Ryan', 'Stacey']} for char in top_selection: print(char.upper() + ' =====>') counter = 1 seen = set() for (v, s, l) in best_matches: if s not in seen and char in df['content'].iloc[s] + df['chars_query'].iloc[s]: sentence = df['transcript'].iloc[s].strip() print(sentence) top_selection[char].append(df['sid'].iloc[s]) top_durations[char].append(round(df['end'].iloc[s] - df['begin'].iloc[s], 2)) top_times[char].append((df['begin'].iloc[s], df['end'].iloc[s])) seen.add(s) while sentence and sentence[-1] not in ['.', '?', '!', "'"]: s += 1 sentence = df['transcript'].iloc[s].strip() if sentence == '': break print(' ++++ ', sentence) top_selection[char].append(df['sid'].iloc[s]) top_durations[char].append(round(df['end'].iloc[s] - df['begin'].iloc[s], 2)) top_times[char].append((df['begin'].iloc[s], df['end'].iloc[s])) seen.add(s) counter +=1 counter += 1 if len(seen) >= 20: break # - for char in top_selection: print(char.upper()) for run in range(1, 5): print('Run', run, ':', ' '.join(sorted(top_selection[char][:5*run]))) for char in top_durations: print(char.upper()) for run in range(1, 5): print('Run', run, ':', sum(top_durations[char][:5*run])) for char in votes_durations: print(char.upper()) for run in range(1, 5): print('Run', run, ':', sum(votes_durations[char][:5*run])) top_times pickle.dump(top_selection, open('selected_shots_final.pickle', 'wb')) pickle.dump(top_times, open('selected_shots_boundaries_final.pickle', 'wb')) # # Generate the Videos! # ! pip install -q moviepy shot_boundaries = {} for character in top_selection: for shot in top_selection[character]: if shot not in shot_boundaries: shot_boundaries[shot] = mastershot_df[mastershot_df.sid == shot][['start', 'end']].values[0] set([k.split('-')[0] for k in shot_boundaries]) shot_boundaries.keys() shot185_1736 3763.13 * 25 os.listdir('../../eastenders_episodes') """%%time frames = {} # Read until video is completed for filename in tqdm(os.listdir('../../eastenders_episodes'), total=11): print('Processing', filename) vid = cv2.VideoCapture('../../eastenders_episodes/' + filename) vid_id = file2id[filename] current_frame = 0 for sid in sorted([sid for sid in shot_boundaries if sid.startswith(vid_id)]): start_frame = int(shot_boundaries[shot][0] * 25) end_frame = int(shot_boundaries[shot][1] * 25) frames[sid] = [] while(cap.isOpened()): ret, frame = cap.read() current_frame += 1 if ret == True and current_frame >= start_frame and current_frame <= end_frame: frames[sid].append(frame) if ret == True and current_frame > end_frame: print(f'frames[{sid}]:', len(frames[sid])) break vid.release()""" print('Using OpenCV is too slow') df[df.sid == '175-1772'][['begin', 'end']].values[0] # %%time frames = {} durs = {} # Read until video is completed for filename in tqdm(os.listdir('../../eastenders_episodes'), total=11): print('Processing', filename) video = VideoFileClip('../../eastenders_episodes/' + filename) vid_id = file2id[filename] current_frame = 0 for sid in sorted([sid for sid in shot_boundaries if sid.startswith(vid_id)]): try: start, end = shot_boundaries[sid] frames[sid] = video.subclip(start, end) durs[sid] = (strptime(end, '%H:%M:%S.%f') - strptime(start, '%H:%M:%S.%f')).total_seconds() except Exception as e: print('EXCEPTION', str(e)) print(sid, start, end) for char in top_selection: print(char.upper()) print('Generating videos for', char, ':') for run in range(1, 5): t = time.time() print(' Run', run, '..', end=' ') shots = sorted(top_selection[char][:5*run]) result = concatenate_videoclips([frames[sid] for sid in shots]) result.write_videofile(f"videos/MeMAD_{run}_{char}.mp4",fps=25, logger=None) print(f'Done! ({(time.time() - t):.2f} s)') results = {'Janine': {1:[('175_1', 5.3), ('175_2', 5.3)], 2:[('175_1', 5.3), ('175_2', 5.3), ('175_3', 5.3)], 3:[('175_1', 5.3), ('175_2', 5.3), ('175_3', 5.3), ('175_4', 5.3)], 4:[('175_1', 5.3), ('175_2', 5.3), ('175_3', 5.3), ('175_4', 5.3), ('175_5', 5.3)],}, 'Ryan': {1:[('175_1', 5.3), ('175_2', 5.3)], 2:[('175_1', 5.3), ('175_2', 5.3), ('175_3', 5.3)], 3:[('175_1', 5.3), ('175_2', 5.3), ('175_3', 5.3), ('175_4', 5.3)], 4:[('175_1', 5.3), ('175_2', 5.3), ('175_3', 5.3), ('175_4', 5.3), ('175_5', 5.3)],}, 'Stacey': {1:[('175_1', 5.3), ('175_2', 5.3)], 2:[('175_1', 5.3), ('175_2', 5.3), ('175_3', 5.3)], 3:[('175_1', 5.3), ('175_2', 5.3), ('175_3', 5.3), ('175_4', 5.3)], 4:[('175_1', 5.3), ('175_2', 5.3), ('175_3', 5.3), ('175_4', 5.3), ('175_5', 5.3)],}} results results = {} for char in top_selection: results[char] = {} for run in range(1, 5): shots = sorted(top_selection[char][:5*run]) results[char][run] = [(shot.replace('-', '_'), durs[shot]) for shot in shots] # [('100-1', 10.5) for shot in shots] # results def generate_results(results): team_name = 'MeMAD' desc = "The output" query_characters = ['Janine', 'Ryan', 'Stacey'] shots_per_run = {1: 5, 2: 10, 3:15, 4:20} for run in [1, 2, 3, 4]: with open(f'xml/{team_name}_run_{run}.xml', 'wt') as file: file.write('<!DOCTYPE videoSummarizationResults SYSTEM "https://www-nlpir.nist.gov/projects/tv2020/dtds/videoSummarizationResults.dtd">\n') file.write('<videoSummarizationResults>\n') file.write(f'\t<videoSummarizationRunResult pid="{team_name}" priority="{run}" desc="Run {run}">\n') for character in query_characters: n_shots = shots_per_run[run] duration = sum([c[1] for c in results[character][run]]) file.write(f'\t\t<videoSummarizationTopicResult target="{character}" numShots="{shots_per_run[run]}" summTime="{round(duration, 1)}">\n') for i, shot_id in enumerate(results[character][run]): file.write(f'\t\t\t<item seqNum="{i+1}" shotId="shot{shot_id[0]}"/>\n') file.write(f'\t\t</videoSummarizationTopicResult>\n') file.write('\t</videoSummarizationRunResult>\n') file.write('</videoSummarizationResults>\n') generate_results(results)
other_experiments/CSI/submission_generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.10 64-bit (''base'': conda)' # name: python3 # --- # # Dataset Generation # # Here we will generate 2 batches of datasets for our multi-class classification experiments. # # First we produce our baseline datasets containing most of the features present in CIC_DDoS2019, and then we produce our time-based feature datasets, each containing only the 25 time-based features as well as a label # We start by importing relavent libraries, setting a seed for reproducibility, and by printing out the versions of the libraries we are using for reproducibility. # + import os, platform, pprint, sys import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd seed: int = 14 # set up pretty printer for easier data evaluation pretty = pprint.PrettyPrinter(indent=4, width=30).pprint print( f''' python:\t{platform.python_version()} \tmatplotlib:\t{mpl.__version__} \tnumpy:\t\t{np.__version__} \tpandas:\t\t{pd.__version__} ''' ) # - # ## Preliminaries # # Next, we do some preliminary set up. We list the data files we will be using and a list of new column names for the datasets that is more readable and understandable. # + data_set_1: list = [ 'DrDoS_DNS.csv' , 'DrDoS_LDAP.csv' , 'DrDoS_MSSQL.csv' , 'DrDoS_NetBIOS.csv' , 'DrDoS_NTP.csv' , 'DrDoS_SNMP.csv' , 'DrDoS_SSDP.csv' , 'DrDoS_UDP.csv' , 'Syn.csv' , 'TFTP.csv' , 'UDPLag.csv' , ] data_set_2: list = [ 'LDAP.csv' ,'MSSQL.csv' , 'NetBIOS.csv' ,'Portmap.csv' , 'Syn.csv' ,'UDP.csv' , 'UDPLag.csv' , ] data_set: list = data_set_1 + data_set_2 # a list of DDoS attack types with indicies that map to the indicies of data_set data_location: list = [ 'DNS' , 'LDAP' , 'MSSQL', 'NetBIOS', 'NTP' , 'SNMP' , 'SSDP', 'UDP', 'Syn' , 'TFTP', 'UDPLag', 'LDAP' , 'MSSQL' , 'NetBIOS', 'Portmap', 'Syn' , 'UDP', 'UDPLag', ] # standardized column names for our data new_column_names: dict = { 'Unnamed: 0' :'Unnamed' , 'Flow ID' :'Flow ID' , ' Source IP' :'Source IP' , ' Source Port' :'Source Port' , ' Destination IP' :'Destination IP' , ' Destination Port' :'Destination Port' , ' Protocol' :'Protocol' , ' Total Length of Bwd Packets':'Total Length of Bwd Packets' , ' Flow Duration' :'Flow Duration' , ' Total Fwd Packets' :'Total Fwd Packets' , ' Total Backward Packets' :'Total Backward Packets' , 'Total Length of Fwd Packets' :'Total Length of Fwd Packets' , ' Timestamp' :'Timestamp' , ' Init_Win_bytes_backward' :'Init Win bytes backward' , ' Fwd Packet Length Max' :'Fwd Packet Length Max' , ' Fwd Packet Length Min' :'Fwd Packet Length Min' , ' Fwd Packet Length Mean' :'Fwd Packet Length Mean' , ' Fwd Packet Length Std' :'Fwd Packet Length Std' , 'Bwd Packet Length Max' :'Bwd Packet Length Max' , ' Bwd Packet Length Min' :'Bwd Packet Length Min' , ' Bwd Packet Length Mean' :'Bwd Packet Length Mean' , ' Bwd Packet Length Std' :'Bwd Packet Length Std' , 'Flow Bytes/s' :'Flow Bytes/s' , ' Flow Packets/s' :'Flow Packets/s' , ' Flow IAT Mean' :'Flow IAT Mean' , ' Flow IAT Std' :'Flow IAT Std' , ' Flow IAT Max' :'Flow IAT Max' , ' Flow IAT Min' :'Flow IAT Min' , 'Fwd IAT Total' :'Fwd IAT Total' , ' Fwd IAT Mean' :'Fwd IAT Mean' , ' Fwd IAT Std' :'Fwd IAT Std' , ' Fwd IAT Max' :'Fwd IAT Max' , ' Fwd IAT Min' :'Fwd IAT Min' , 'Bwd IAT Total' :'Bwd IAT Total' , ' Bwd IAT Mean' :'Bwd IAT Mean' , ' Bwd IAT Std' :'Bwd IAT Std' , ' Bwd IAT Max' :'Bwd IAT Max' , ' Bwd IAT Min' :'Bwd IAT Min' , 'Fwd PSH Flags' :'Fwd PSH Flags' , ' Bwd PSH Flags' :'Bwd PSH Flags' , ' Fwd URG Flags' :'Fwd URG Flags' , ' Bwd URG Flags' :'Bwd URG Flags' , ' Fwd Header Length' :'Fwd Header Length' , ' Bwd Header Length' :'Bwd Header Length' , 'Fwd Packets/s' :'Fwd Packets/s' , ' Bwd Packets/s' :'Bwd Packets/s' , ' Min Packet Length' :'Min Packet Length' , ' Max Packet Length' :'Max Packet Length' , ' Packet Length Mean' :'Packet Length Mean' , ' Packet Length Std' :'Packet Length Std' , ' Packet Length Variance' :'Packet Length Variance' , 'FIN Flag Count' :'FIN Flag Count' , ' SYN Flag Count' :'SYN Flag Count' , ' RST Flag Count' :'RST Flag Count' , ' PSH Flag Count' :'PSH Flag Count' , ' ACK Flag Count' :'ACK Flag Count' , ' URG Flag Count' :'URG Flag Count' , ' CWE Flag Count' :'CWE Flag Count' , ' ECE Flag Count' :'ECE Flag Count' , ' Down/Up Ratio' :'Down/Up Ratio' , ' Average Packet Size' :'Average Packet Size' , ' Avg Fwd Segment Size' :'Avg Fwd Segment Size' , ' Avg Bwd Segment Size' :'Avg Bwd Segment Size' , ' Fwd Header Length.1' :'Fwd Header Length.1' , 'Fwd Avg Bytes/Bulk' :'Fwd Avg Bytes/Bulk' , ' Inbound' :'Inbound' , ' Fwd Avg Packets/Bulk' :'Fwd Avg Packets/Bulk' , ' Fwd Avg Bulk Rate' :'Fwd Avg Bulk Rate' , ' Bwd Avg Bytes/Bulk' :'Bwd Avg Bytes/Bulk' , ' Bwd Avg Packets/Bulk' :'Bwd Avg Packets/Bulk' , 'Bwd Avg Bulk Rate' :'Bwd Avg Bulk Rate' , 'Subflow Fwd Packets' :'Subflow Fwd Packets' , ' Subflow Fwd Bytes' :'Subflow Fwd Bytes' , ' Subflow Bwd Packets' :'Subflow Bwd Packets' , ' Subflow Bwd Bytes' :'Subflow Bwd Bytes' , 'Init_Win_bytes_forward' :'Init Win bytes forward' , ' act_data_pkt_fwd' :'act data pkt fwd' , ' min_seg_size_forward' :'min seg size forward' , 'Active Mean' :'Active Mean' , ' Active Std' :'Active Std' , ' Active Max' :'Active Max' , ' Active Min' :'Active Min' , 'Idle Mean' :'Idle Mean' , ' Idle Std' :'Idle Std' , ' Idle Max' :'Idle Max' , ' Idle Min' :'Idle Min' , 'SimillarHTTP' :'SimillarHTTP' , ' Label' :'Label' , } # + def get_file_path(directory: str): ''' Closure that will return a function that returns the filepath to the directory given to the closure ''' def func(file: str) -> str: return os.path.join(directory, file) return func # locations of the data files relative to current directory data_path_1: str = './original/01-12/' data_path_2: str = './original/03-11/' # use the get_file_path closure to create a function that will return the path to a file file_path_1 = get_file_path(data_path_1) file_path_2 = get_file_path(data_path_2) # a list of all complete filepaths relative to current directory with indicies mapped to the indicies of data_set file_set: list = list(map(file_path_1, data_set_1)) file_set.extend(list(map(file_path_2, data_set_2))) print(f'We will be cleaning {len(file_set)} files:') print(f'Benign samples will be grabbed from each dataset and saved separately\n') pretty(file_set) # - # Now that we have our file paths, we set up a list of features to prune during our preprocessing phase # prune is a list of all features we know we don't want to use # Unnamed is eliminated because it is un-labeled and we cannot verify what it qualities of the data if describes # Fwd Header Length.1 is eliminated because it is a duplicate # all the other features are eliminated because they are string values and cannot be used for classification prune: list = [ 'Fwd Header Length.1', 'Unnamed', 'Source Port', 'Destination Port', 'Flow ID', 'Source IP', 'Destination IP', 'Timestamp', 'SimillarHTTP' ] # Maranhao et al. found in their study 'Tensor based framework for Distributed Denial of Service attack detection' that nine features were filled with only 0 values for every data collection in the dataset. Since an empty column of zeros will not contribute to the model's performance, we will remove those columns. # + # toPrune is a list of features with empty columns of 0s toPrune: list = [ 'Fwd URG Flags', 'Bwd URG Flags', 'Fwd PSH Flags', 'Fwd Avg Bytes/Bulk', 'Fwd Avg Packets/Bulk', 'Fwd Avg Bulk Rate', 'Bwd Avg Bytes/Bulk', 'Bwd Avg Packets/Bulk', 'Bwd Avg Bulk Rate' ] for i in toPrune: if i not in prune: prune.append(i) print(f'We will be pruning {len(prune)} features') for i, x in enumerate(prune): print(f'\t{i+1}:\t{x}') # - # ## Preprocessing and Data Cleaning # # Now that the preliminaries are done, we start processing the data. First we define some functions to load and clean the data, then we combine the data into dataframes based on their DDoS attack type. We keep the data manageable by sampling it down to sets of a million samples, using our seed to ensure that the results are reproducible. # + def clean_data(df: pd.DataFrame) -> pd.DataFrame: ''' Function will take a dataframe and remove the values from prune Inf values will also be removed from Flow Bytes/s and Flow Packets/s once appropriate rows and columns have been removed, we will return the dataframe with the appropriate values ''' # remove the features in the prune list for col in prune: if col in df.columns: df.drop(columns=[col], inplace=True) # drop missing values/NaN etc. df.dropna(inplace=True) # Search through dataframe for any Infinite or NaN values in various forms that were not picked up previously invalid_values: list = [ np.inf, np.nan, 'Infinity', 'inf', 'NaN', 'nan' ] for col in df.columns: for value in invalid_values: indexNames = df[df[col] == value].index if not indexNames.empty: print(f'deleting {len(indexNames)} rows with Infinity in column {col}') df.drop(indexNames, inplace=True) # Standardize the contents of the Label column df = df.replace( ['DrDoS_DNS'], 'DNS') df = df.replace( ['DrDoS_LDAP'], 'LDAP') df = df.replace( ['DrDoS_MSSQL'], 'MSSQL') df = df.replace( ['DrDoS_NetBIOS'], 'NetBIOS') df = df.replace( ['DrDoS_NTP'], 'NTP') df = df.replace( ['DrDoS_SNMP'], 'SNMP') df = df.replace( ['DrDoS_SSDP'], 'SSDP') df = df.replace( ['DrDoS_UDP'], 'UDP') df = df.replace( ['UDP-lag'], 'UDPLag') return df def load_data(filePath: str) -> tuple: ''' Loads the Dataset from the given filepath and caches it for quick access in the future Function will only work when filepath is a .csv file After the data is loaded, the benign samples are split and saved in a list the malicious samples are split and saved in a dictionary of lists indexed by attack type only the top million malicious samples are kept ''' # slice off the ./CSV/ from the filePath if filePath[0] == '.' and filePath[1] == '/': filePathClean: str = filePath[11::] pickleDump: str = f'./cache/{filePathClean}.pickle' else: pickleDump: str = f'./cache/{filePath}.pickle' print(f'Loading Dataset: {filePath}') print(f'\tTo Dataset Cache: {pickleDump}\n') # check if data already exists within cache if os.path.exists(pickleDump): df = pd.read_pickle(pickleDump) # if not, load data and clean it before caching it else: df = pd.read_csv(filePath, low_memory=True) df.to_pickle(pickleDump) df = df.rename(columns=new_column_names) # split the data into benign and malicious samples, keeping only the top 1 milliion # (+ 200 thousand to replace samples removed by cleaning) malicious samples benignSamples = df[df['Label'] == 'BENIGN'] maliciousSamples = df[df['Label'] != 'BENIGN'] if maliciousSamples.shape[0] > 1200000: maliciousSamples = maliciousSamples.sample(n=1200000, random_state=seed) # we remove the WebDDoS attacks because they are mixed in with other attacks # and the dataset only provides less than 500 WebDDoS samples # so we cannot do anything meaningful with them at the scale of our other experiments maliciousSamples = maliciousSamples[maliciousSamples['Label'] != 'WebDDoS'] print(f'\tLoaded {df.shape[0]} Samples as {benignSamples.shape[0]} Benign samples and {maliciousSamples.shape[0]} Malicious samples\n') return (benignSamples, maliciousSamples) # + # set up a dictionary to hold all the malicious samples malicious_dict: dict = {} for i in range(len(data_location)): malicious_dict[data_location[i]] = [] # load the data and save the samples in a dictionary or list for further processing for i in range(len(data_set)): benignSamples, maliciousSamples = load_data(file_set[i]) benignSamples = clean_data(benignSamples) maliciousSamples = clean_data(maliciousSamples) print() if i == 0: benign_list: list = [benignSamples] else: benign_list.append(benignSamples) malicious_dict[data_location[i]].append(maliciousSamples) # save the benign samples as a single dataframe benign_df: pd.DataFrame = pd.concat(benign_list, ignore_index=True) print(f'Benign Samples: {benign_df.shape[0]}') # - for key in malicious_dict.keys(): for entry in malicious_dict[key]: print(key, ':', entry.shape) # + attack_samples: dict = {} for key in malicious_dict.keys(): ddos_samples: list = [] for df in malicious_dict[key]: ddos_samples.append(df[df['Label'] == key]) new_df = pd.concat(ddos_samples, ignore_index=True) if new_df.shape[0] > 1000000: new_df = new_df.sample(n=1000000, random_state=seed) attack_samples[key] = new_df print('Benign', ':', benign_df.shape) for key in attack_samples.keys(): print(key, ':', attack_samples[key].shape) print(f'\tto file: ./prepared/single/{key}.csv') attack_samples[key].to_csv(f'./prepared/single/{key}.csv', index=False) # - print('Benign', ':', benign_df.shape) print(f'\tto file: ./prepared/single/BENIGN.csv') benign_df.to_csv(f'./prepared/single/BENIGN.csv', index=False) assert False # ## Baseline dataset generation # Now that we have all of our data in 13 dataframes, we can begin to create our datasets. We will form 13 datasets for one-vs-all multi-class classification. # # # The first dataset will be the Benign vs DDoS dataset. It will be a 50/50 split of the Benign vs DDoS samples. The DDoS samples will be equal parts of each DDoS attack type. Since we have around 112 thousand benign samples, we will use 112,000 benign samples and 112,000 samples of each DDoS attack type. # # # Datasets 2-13 will each be one of the DDoS attack types (except WebDDoS since it has less samples) vs a basket of all the other DDoS attack types and the benign samples. Each dataset will have double the number of available samples for that attack type. # # Datasets 14-25 will be each of the DDoS attack types against Benign Samples. Each dataset will have double the number of available benign samples # + # First we make the first dataset, benign vs DDoS. It will be a 50/50 split between # benign and DDoS samples where the DDoS samples are chosen equally from a pool of # all the DDoS attack types. total_benign = benign_df.shape[0] total_each_attack_type = int(total_benign/12) DDoS_list = [] for key in attack_samples.keys(): DDoS_list.append(attack_samples[key].sample(n=total_each_attack_type, random_state=seed)) ddos_df = pd.concat(DDoS_list, ignore_index=True) to_replace = list(attack_samples.keys()) # to_replace.append('UDP-lag') ddos_df.replace(to_replace=to_replace, value="DDOS", inplace=True) Benign_vs_DDoS = pd.concat([benign_df, ddos_df], ignore_index=True) print(f'Benign vs DDoS: {Benign_vs_DDoS.shape}') Benign_vs_DDoS.to_csv("./prepared/baseline/Benign_vs_DDoS.csv", index=False) # + # Here we make the Many vs Many dataset that includes benign samples. # We use the same number of benign and ddos samples from each ddos attack type. total_benign = benign_df.shape[0] total_each_attack_type = total_benign DDoS_list = [] for key in attack_samples.keys(): DDoS_list.append(attack_samples[key].sample(n=total_each_attack_type, random_state=seed)) ddos_df = pd.concat(DDoS_list, ignore_index=True) # to_replace = list(attack_samples.keys()) # # to_replace.append('UDP-lag') # ddos_df.replace(to_replace=to_replace, value="DDOS", inplace=True) Benign_Many_vs_Many = pd.concat([benign_df, ddos_df], ignore_index=True) print(f'Benign Many vs Many: {Benign_Many_vs_Many.shape}') Benign_Many_vs_Many.to_csv("./prepared/baseline/Benign_Many_vs_Many.csv", index=False) # - # here we generate attack vs basket datasets. one for each attack type for key in attack_samples.keys(): total_attacks = attack_samples[key].shape[0] total_each_other_type = int(total_attacks / 12) other_type_list = [] for attack in attack_samples.keys(): if attack != key: other_type_list.append(attack_samples[attack].sample(n=total_each_other_type, random_state=seed)) other_type_list.append(benign_df.sample(n=total_each_other_type, random_state=seed)) other_df = pd.concat(other_type_list, ignore_index=True) to_replace = list(attack_samples.keys()) to_replace.append('BENIGN') other_df.replace(to_replace=to_replace, value=f'NOT{key}', inplace=True) attack_df = attack_samples[key] Attack_vs_all = pd.concat([attack_df, other_df], ignore_index=True) print(f'{key} vs all: {Attack_vs_all.shape}') Attack_vs_all.to_csv(f'./prepared/baseline/{key}_vs_all.csv', index=False) # + # here we generate the many vs many dataset without benign samples total_each_attack_type: int = 10000000 for key in attack_samples.keys(): if total_each_attack_type > attack_samples[key].shape[0]: total_each_attack_type = attack_samples[key].shape[0] attack_list = [] for attack in attack_samples.keys(): attack_list.append(attack_samples[attack].sample(n=total_each_attack_type, random_state=seed)) Attacks_Many_vs_Many = pd.concat(attack_list, ignore_index=True) print(f'Attacks Many vs Many: {Attacks_Many_vs_Many.shape}') Attacks_Many_vs_Many.to_csv(f'./prepared/baseline/Attacks_Many_vs_Many.csv', index=False) # - # here we generate attack vs benign datasets. one for each attack type for key in attack_samples.keys(): total_benign = benign_df.shape[0] attack_df = attack_samples[key].sample(n=total_benign) Attack_vs_Benign = pd.concat([attack_df, benign_df], ignore_index=True) print(f'{key} vs Benign: {Attack_vs_Benign.shape}') Attack_vs_Benign.to_csv(f'./prepared/baseline/{key}_vs_benign.csv', index=False) Benign_vs_DDoS.shape # here we generate attack vs basket datasets. one for each attack type without benign samples for key in attack_samples.keys(): total_attacks = attack_samples[key].shape[0] total_each_other_type = int(total_attacks / 11) other_type_list = [] for attack in attack_samples.keys(): if attack != key: other_type_list.append(attack_samples[attack].sample(n=total_each_other_type, random_state=seed)) other_df = pd.concat(other_type_list, ignore_index=True) to_replace = list(attack_samples.keys()) to_replace.append('BENIGN') other_df.replace(to_replace=to_replace, value=f'DDOS', inplace=True) attack_df = attack_samples[key] Attack_vs_DDoS = pd.concat([attack_df, other_df], ignore_index=True) print(f'{key} vs DDoS: {Attack_vs_DDoS.shape}') Attack_vs_DDoS.to_csv(f'./prepared/baseline/{key}_vs_ddos.csv', index=False) # ## Time-Based Dataset Generation # Since one of our research directions is investigating the use of time-based features as a methodology to detect and classify DDoS traffic like they have been used to detect and classify Tor traffic, we will now create datasets containing only the time-based features. Lashkari et al. used a set of 23 time based features given by the pic below, but in addition to those 23, there are 2 more: # * Forward Inter Arival Time Total (Fwd IAT Total) # * Backward Inter Arrival Time Total (Bwd IAT Total) # ![Feature descriptions used by Lashkari et al, 2017 in their conference paper -- Characterization of Tor Traffic using Time based Features](./assets/CIC_feature_descriptions.png "Feature descriptions used by Lashkari et al, 2017 in their conference paper -- Characterization of Tor Traffic using Time based Features") # a list of all the time based features, as they are given in the dataframes we are dealing with. # We also add Label to make a total of 26 features time_based_features: list = [ 'Fwd IAT Mean' , 'Fwd IAT Std' , 'Fwd IAT Max' , 'Fwd IAT Min' , 'Bwd IAT Mean' , 'Bwd IAT Std' , 'Bwd IAT Max' , 'Bwd IAT Min' , 'Flow IAT Mean' , 'Flow IAT Std' , 'Flow IAT Max' , 'Flow IAT Min' , 'Active Mean' , 'Active Std' , 'Active Max' , 'Active Min' , 'Idle Mean' , 'Idle Std' , 'Idle Max' , 'Idle Min' , 'Flow Bytes/s' , 'Flow Packets/s' , 'Flow Duration' , 'Fwd IAT Total' , 'Bwd IAT Total' , 'Label' ] # + Time_Based_Benign_vs_DDoS = Benign_vs_DDoS[time_based_features] Time_Based_Benign_vs_DDoS.to_csv("./prepared/timebased/Benign_vs_DDoS.csv", index=False) # + Time_Based_Benign_Many_vs_Many = Benign_Many_vs_Many[time_based_features] Time_Based_Benign_Many_vs_Many.to_csv("./prepared/timebased/Benign_Many_vs_Many.csv", index=False) # - Time_Based_Benign_vs_DDoS.shape for key in attack_samples.keys(): total_attacks = attack_samples[key].shape[0] total_each_other_type = int(total_attacks / 12) other_type_list = [] for attack in attack_samples.keys(): if attack != key: other_type_list.append(attack_samples[attack].sample(n=total_each_other_type, random_state=seed)) other_type_list.append(benign_df.sample(n=total_each_other_type, random_state=seed)) other_df = pd.concat(other_type_list, ignore_index=True) to_replace = list(attack_samples.keys()) to_replace.append('BENIGN') other_df.replace(to_replace, value=f'NOT{key}', inplace=True) attack_df = attack_samples[key] Attack_vs_all = pd.concat([attack_df, other_df], ignore_index=True) Time_Based_Attack_vs_all = Attack_vs_all[time_based_features] Time_Based_Attack_vs_all.to_csv(f'./prepared/timebased/{key}_vs_all.csv', index=False) # + Time_Based_Attacks_Many_vs_Many = Attacks_Many_vs_Many[time_based_features] Time_Based_Attacks_Many_vs_Many.to_csv("./prepared/timebased/Attacks_Many_vs_Many.csv", index=False) # - # here we generate attack vs benign datasets. one for each attack type for key in attack_samples.keys(): total_benign = benign_df.shape[0] attack_df = attack_samples[key].sample(n=total_benign) Attack_vs_Benign = pd.concat([attack_df, benign_df], ignore_index=True) Time_Based_Attack_vs_Benign = Attack_vs_Benign[time_based_features] Time_Based_Attack_vs_Benign.to_csv(f'./prepared/timebased/{key}_vs_benign.csv', index=False) # here we generate attack vs basket datasets. one for each attack type without benign samples for key in attack_samples.keys(): total_attacks = attack_samples[key].shape[0] total_each_other_type = int(total_attacks / 11) other_type_list = [] for attack in attack_samples.keys(): if attack != key: other_type_list.append(attack_samples[attack].sample(n=total_each_other_type, random_state=seed)) other_df = pd.concat(other_type_list, ignore_index=True) to_replace = list(attack_samples.keys()) to_replace.append('BENIGN') other_df.replace(to_replace=to_replace, value=f'DDOS', inplace=True) attack_df = attack_samples[key] Attack_vs_DDoS = pd.concat([attack_df, other_df], ignore_index=True) Time_Based_Attack_vs_DDoS = Attack_vs_DDoS[time_based_features] Time_Based_Attack_vs_DDoS.to_csv(f'./prepared/timebased/{key}_vs_ddos.csv', index=False)
data/Dataset_generation.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Machine Learning. # ## Aprendizaje Estadístico. # # https://sites.google.com/a/ciencias.unam.mx/statistical_learning/clase # # El aprendizaje estadístico (Statistical Learning (SL) en inglés) es un vasto conjunto de herramientas cuyo objetivo principal es obtener el mayor conocimiento posible de un conjunto de datos. # # * Dependiendo del tipo de pregunta que deseamos realizar acerca de la naturaleza de los datos será la técnica que se utilizará. # * De manera general, las técnicas de aprendizaje en SL se pueden clasificar en dos grandes rubros: Supervisado y No Supervisado # * Uno de los principales objetivos del SL es el de predicción. # ## Statistical Learning y Machine Learning. # # * ML es una rama de la Inteligencia Artificial. # * SL surgió como una rama de la Estadística. # * Hay mucho en común entre las dos disciplinas, tanto el objetivo # principal (predicción), como la clasificacion de problemas # (supervisado vs. no supervisado). # * ML : Se enfoca a problemas de gran escala y/o tiempo reducido y está enfocado a soluciones con Exactitud. # * SL enfatiza en los modelos, su interpretación y la medición de su eficacia y está enfocado a soluciones con un balance entre Exactitud y Precisión. # * La division entre ambas disciplinas cada ves es más tenue. # ## El ciclo de *Machine Learning*. # <img src="img/01/proceso_de_machine_learning.png" width=80%> # ### Selección del modelo. # # Los modelos en *Machine Learning* son algoritmos y técnicas que permiten "emular" un fenómeno a partir de ciertos patrones definidos por atributos (*features*). # # * Un atributo (*feature*) es una propiedad mesurable o una característica de un fenómeno observado. Por lo general, un atributo es la columna de una tabla. Los atributos al final de cuentas son variables, las cuales penden ser la causa o el efecto de un fenómeno. # # * El *dataset* de *features* descrito por el modelo se conoce como *feature space*. A un punto dado dentro del *feature space* se le conoce como *k*. # # # * https://youtu.be/s4pVapaE9AY # * https://youtu.be/EfjGEs83RtY # * https://youtu.be/j1bpJTYdQGE # ### Entrenamiento del modelo. # # El proceso de entrenamiento de un modelo consiste en ajustar un modelo a un conjunto de datos. # ### Evaluación. # # La evaluación sucede una vez que el modelo fue entrenado para ajustarse a un dataset inicial y entonces el modelo se aplica a otros datasets para validar la generalización. # # ## Tipos de algoritmos de *ML*. # # * **Aprendizaje supervisado.** Es un mecanismo que infiere las relaciones inherentes entre los datos observados y una variable dependiente (*label*), lo que podría permitir una predicción. # # * **Aprendizaje no supervisado.** Son algoritmos diseñados para encontrar las estructurtras en *datasets* que no tienen definidas variables dependientes (*unlabeled*) y en el que se utilizan modelos probabilístivos para describir un fenómeno. # # * **Metodología de refuerzo del Aprendizaje.** Son algoritmos que permiten adaptar los modelos conforme el entorno cambia. # ## Principales algoritmos de *ML*. # # https://en.wikipedia.org/wiki/Outline_of_machine_learning#Machine_learning_algorithms # ### Algoritmos supervisados. # #### Métodos de regresión. # # * Regresión lineal. # * Regresión logísitca. # * Regresión polinomial. # * Regresión por pasos. # * Regresión Ridge. # * Regresión Lasso. # * Regresión ElasticNet. # #### Vecinos cercanos a K (*K Nearest Neighbors*). # #### Clasificación por Naive Bayes. # #### Support Vector Machines. # #### Árboles de Decisión. # ### Algoritmos no supervisados. # #### K Means Clustering. Agrupamiento de promedios de k. # #### Self-organized Maps. # ### Redes neuronales. # <p style="text-align: center"><a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Licencia Creative Commons" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a><br />Esta obra está bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Licencia Creative Commons Atribución 4.0 Internacional</a>.</p> # <p style="text-align: center">&copy; <NAME>. 2020.</p>
12_machine_learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # def jogar(): # print(40 * "*") # print("Bem vindo ao jogo da FORCA") # print(40 * "*") # palavra_secreta = 'banana' # enforcou = False # acertou = False # while not enforcou and not acertou: # chute = input("Qual letra? ") # print('jogando...') # # if __name__ == "main": # jogar() # - palavra = 'banana' palavra.find('b') #vai me retornar a posição da letra, no caso 0 (zero) - primeira palavra.find('n') #retorna a posição da letra encontrada. mas apenas da primeira palavra.find('z') #retorna -1 porque não encontrou a letra procurada # O find também aceita um segundo parâmetro, que define a partir de qual posição gostaríamos de começar, por exemplo: # # >>> palavra = "aluracursos" # >>> palavra.find("a",1) #procurando "a" a partir da segunda posição # 4 for letra in palavra: print(letra) # + def jogar(): print(40 * "*") print("Bem vindo ao jogo da FORCA") print(40 * "*") palavra_secreta = 'banana' enforcou = False acertou = False while not enforcou and not acertou: chute = input("Qual letra? ") for letra in palavra_secreta: if letra == chute: print(f'Temos a letra {chute}') # else: # print(f"Não tem a letra {chute}") print('jogando...') # if __name__ == "main": jogar() # - # ### O Python diferencia letra maiúscula de minúscula, portanto a != A # # #### Vamos criar um index para o python devolver a posição em que está a letra. # + def jogar(): print(40 * "*") print("Bem vindo ao jogo da FORCA") print(40 * "*") palavra_secreta = 'banana' enforcou = False acertou = False while not enforcou and not acertou: chute = input("Qual letra? ") index = 0 for letra in palavra_secreta: if letra == chute: print(f'Temos a letra {chute} na posição {index}') index = index + 1 print('jogando...') # if __name__ == "main": jogar() # - # ### Alguns métodos usados com strings nome = 'rafael' nome.capitalize() #Retorna com PRIMEIRA LETRA MAIÚSCULA nome # + # nome.capitalize(inplace = True) #VAI DAR ERROR # TypeError: str.capitalize() takes no keyword arguments #Pra guardar com a letra maiúscula vou precisar criar outra variável nome2 = nome.capitalize() # - nome2 # + nome.endswith('el') #faço uma pergunta se a palavra termina com tal(is) letra(s) #STARTSWITH # - nome2.lower() #transforma todos os caracteres em letras minúsculas nome2.upper() fruta = ' abacaxi ' fruta fruta.strip() fruta nova_fruta = fruta.strip() nova_fruta # + def jogar(): print(40 * "*") print("Bem vindo ao jogo da FORCA") print(40 * "*") palavra_secreta = 'banana' enforcou = False acertou = False while not enforcou and not acertou: chute = input("Qual letra? ") chute = chute.strip() index = 0 for letra in palavra_secreta: if chute.upper() == letra.upper(): print(f'Temos a letra {chute} na posição {index}') index = index + 1 print('jogando...') # if __name__ == "main": jogar() # -
PY_02_Intro_parte_2/PY_02_forca_pt2_strings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lab 1a # ## Basics Practice: Notebooks, Comments, print(), type(), Addition, Errors and Art # # ### Student will be able to # - Use Python 3 in Jupyter notebooks # - Write working code using `print()` and `#` comments # - Write working code using `type()` and variables # - Combine Strings using string addition (+) # - Add numbers in code (+) # - Troubleshoot errors # - Create character art # # &gt;**Note:** The **[ ]** indicates student has a task to complete. # # &gt;**Reminder:** To run code and save changes: student should upload or clone a copy of notebooks. # # #### Notebook use # - [ ] Insert a **code cell** below # - [ ] Enter the following Python code, including the comment: # ```Python # # [ ] print 'Hello!' and remember to save notebook! # print('Hello!') # ``` # Then run the code - the output should be: # `Hello!` # #### Run the cell below # - [ ] Use **Ctrl + Enter** # - [ ] Use **Shift + Enter** print('watch for the cat') # #### Student's Notebook editing # - [ ] Edit **this** notebook Markdown cell replacing the word "Student's" above with your name # - [ ] Run the cell to display the formatted text # - [ ] Run any 'markdown' cells that are in edit mode, so they are easier to read # #### [ ] Convert \*this\* cell from markdown to a code cell, then run it # print('Run as a code cell') # # ## # Comments # Create a code comment that identifies this notebook, containing your name and the date. # #### Use print() to # - [ ] print [**your_name**] # - [ ] print **is using Python!** # + # [ ] print your name # [ ] print "is using Python" # - # Output above should be: # `Your Name # is using Python!` # #### Use variables in print() # - [ ] Create a variable **your_name** and assign it a string containing your name # - [ ] Print **your_name** # + # [ ] create a variable your_name and assign it a string containing your name #[ ] print your_name # - # #### Create more string variables # - **[ ]** Create variables as directed below # - **[ ]** Print the variables # + # [ ] create variables and assign values for: favorite_song, shoe_size, lucky_number # [ ] print the value of each variable favorite_song, shoe_size, and lucky_number # - # #### Use string addition # - **[ ]** Print the above string variables (favorite_song, shoe_size, lucky_number) combined with a description by using **string addition** # &gt;For example, favorite_song displayed as: # `favorite song is happy birthday` # + # [ ] print favorite_song with description # [ ] print shoe_size with description # [ ] print lucky_number with description # - # ##### More string addition # - **[ ]** Make a single string (sentence) in a variable called favorite_lucky_shoe using **string addition** with favorite_song, shoe_size, lucky_number variables and other strings as needed # - **[ ]** Print the value of the favorite_lucky_shoe variable string # &gt; Sample output: # `For singing happy birthday 8.5 times, you will be fined $25` # + # assign favorite_lucky_shoe using # - # ### print() art # #### Use `print()` and the asterisk **\*** to create the following shapes: # - [ ] Diagonal line # - [ ] Rectangle # - [ ] Smiley face # + # [ ] print a diagonal using "*" # [ ] rectangle using "*" # [ ] smiley using "*" # - # ### Using `type()` # #### Calculate the *type* using `type()` # + # [ ] display the type of 'your name' (use single quotes) # + # [ ] display the type of "save your notebook!" (use double quotes) # + # [ ] display the type of "25" (use quotes) # + # [ ] display the type of "save your notebook " + 'your name' # + # [ ] display the type of 25 (no quotes) # + # [ ] display the type of 25 + 10 # + # [ ] display the type of 1.55 # + # [ ] display the type of 1.55 + 25 # - # #### Find the type of variables # - **[ ]** Run the cell below to make the variables available to be used in other code # - **[ ]** Display the data type as directed in the cells that follow # + # assignments ***RUN THIS CELL*** before starting the section # + # [ ] display the current type of the variable student_name # + # [ ] display the type of student_age # + # [ ] display the type of student_grade # + # [ ] display the type of student_age + student_grade # + # [ ] display the current type of student_id # + # assign new value to student_id # [ ] display the current of student_id # - # #### Number integer addition # # - **[ ]** Create variables (x, y, z) with integer values # + # [ ] create integer variables (x, y, z) and assign them 1-3 digit integers (no decimals - no quotes) # - # - **[ ]** Insert a **code cell** below # - **[ ]** Create an integer variable named **xyz_sum** equal to the sum of x, y, and z # - **[ ]** Print the value of **xyz_sum** # + # - # ### Errors # - **[ ]** Troubleshoot and fix the errors below # + # [ ] fix the error print("Hello World!"") # + # [ ] fix the error print(strings have quotes and variables have names) # + # [ ] fix the error print( "I have $" + 5) # + # [ ] fix the error print('always save the notebook") # - # ## ASCII art # # - **[ ]** Display first name or initials as ASCII Art # - **[ ]** Challenge: insert an additional code cell to make an ASCII picture # # Check out the video if you are unsure of ASCII Art (it was the extra credit assignment) # # [![view video](https://iajupyterprodblobs.blob.core.windows.net/imagecontainer/common/play_video.png)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d7a3d1b4-8d8d-4e9e-a984-a6920bcd7ca1/Unit1_Section1.5-ASCII_Art.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d7a3d1b4-8d8d-4e9e-a984-a6920bcd7ca1/Unit1_Section1.5-ASCII_Art.vtt","srclang":"en","kind":"subtitles","label":"english"}]) # # # + # [ ] ASCII ART # + # [ ] ASCII ART # - # [Terms of use](http://go.microsoft.com/fwlink/?LinkID=206977)   [Privacy &amp; cookies](https://go.microsoft.com/fwlink/?LinkId=521839)   © 2017 Microsoft
ModuleTwoLabActivity (3).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import matplotlib.pyplot as plt import numpy as np from ipywidgets import interact, interactive, fixed def rbfimq(x, y, c=1): z = 1 / np.sqrt((x-y)**2 + c**2) return z def drawheatmap(max_xy, step, kfunc, **kwargs): X = np.arange(-max_xy, max_xy+1, step) chunks = X.shape[0] data = np.zeros((chunks, chunks)) X = np.arange(-max_xy, max_xy+1, step) for i, x in enumerate(X): for j, y in enumerate(X): data[i, j] = kfunc(x, y, **kwargs) plt.gca().invert_yaxis() plt.pcolormesh(X, X, data, cmap='Blues', vmin=0, vmax=1) plt.colorbar() drawheatmap(5, 0.5, rbfimq) @interact(c=(0.01, 5)) def inter_rbf(c): drawheatmap(5, 0.5, rbfimq, c=c)
visualize-rbf-imq.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/spour/DeepExplain/blob/master/TFomics_interpretability_example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="IV3iMx56OMoI" outputId="0b7677f5-dccb-43d1-d9c3-19ec61941e4a" # gpu_info = !nvidia-smi gpu_info = '\n'.join(gpu_info) print(gpu_info) # + colab={"base_uri": "https://localhost:8080/"} id="bch39A2vQadU" outputId="9e054033-2fab-447b-d547-8a09c94ede87" # !pip install logomaker # !pip install https://github.com/p-koo/tfomics/tarball/master # + colab={"base_uri": "https://localhost:8080/"} id="7w27KShoQdY9" outputId="335e3d45-8f9b-4794-afd4-68ce9059a894" # !wget https://www.dropbox.com/s/5iww0ootxkr6e21/synthetic_code_dataset.h5 # + id="v-LccPSzQiVt" import os, h5py import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import tensorflow as tf from tensorflow import keras # + [markdown] id="GF7U1QPYbHAJ" # #### Load data # + id="0zl17eRXQ5WZ" colab={"base_uri": "https://localhost:8080/"} outputId="1faf6aeb-dce5-4f2a-bb03-250c7e6d7514" data_path = '.' filepath = os.path.join(data_path, 'synthetic_code_dataset.h5') with h5py.File(filepath, 'r') as dataset: x_train = np.array(dataset['X_train']).astype(np.float32) y_train = np.array(dataset['Y_train']).astype(np.float32) x_valid = np.array(dataset['X_valid']).astype(np.float32) y_valid = np.array(dataset['Y_valid']).astype(np.int32) x_test = np.array(dataset['X_test']).astype(np.float32) y_test = np.array(dataset['Y_test']).astype(np.int32) model_test = np.array(dataset['model_test']).astype(np.float32) model_test = model_test.transpose([0,2,1]) x_train = x_train.transpose([0,2,1]) x_valid = x_valid.transpose([0,2,1]) x_test = x_test.transpose([0,2,1]) N, L, A = x_train.shape print(model_test.shape) # + [markdown] id="oBHDFfwXbIjo" # #### Create model # + id="O7x9BQcORGbG" keras.backend.clear_session() # l2 regularization l2 = keras.regularizers.l2(1e-6) # input layer inputs = keras.layers.Input(shape=(L,A)) # layer 1 - convolution nn = keras.layers.Conv1D(filters=32, kernel_size=19, strides=1, activation=None, use_bias=False, padding='same', kernel_regularizer=l2, )(inputs) nn = keras.layers.BatchNormalization()(nn) nn = keras.layers.Activation('exponential')(nn) nn = keras.layers.MaxPool1D(pool_size=4)(nn) nn = keras.layers.Dropout(0.1)(nn) # layer 2 - convolution nn = keras.layers.Conv1D(filters=128, kernel_size=7, strides=1, activation=None, use_bias=False, padding='same', kernel_regularizer=l2, )(nn) nn = keras.layers.BatchNormalization()(nn) nn = keras.layers.Activation('relu')(nn) nn = keras.layers.MaxPool1D(pool_size=25)(nn) nn = keras.layers.Dropout(0.1)(nn) # layer 3 - Fully-connected nn = keras.layers.Flatten()(nn) nn = keras.layers.Dense(512, activation=None, use_bias=False, kernel_regularizer=l2, )(nn) nn = keras.layers.BatchNormalization()(nn) nn = keras.layers.Activation('relu')(nn) nn = keras.layers.Dropout(0.5)(nn) # Output layer logits = keras.layers.Dense(1, activation='linear', use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros')(nn) outputs = keras.layers.Activation('sigmoid')(logits) # create keras model model = keras.Model(inputs=inputs, outputs=outputs) # set up optimizer and metrics optimizer = keras.optimizers.Adam(learning_rate=0.001) loss = keras.losses.BinaryCrossentropy(from_logits=False, label_smoothing=0.0) # compile model auroc = keras.metrics.AUC(curve='ROC', name='auroc') aupr = keras.metrics.AUC(curve='PR', name='aupr') model.compile(optimizer=optimizer, loss=loss, metrics=[auroc, aupr]) # + [markdown] id="mTDb92s4bKpN" # #### Train model # + colab={"base_uri": "https://localhost:8080/"} id="y6nomjibRaQQ" outputId="684a3c99-c87f-4472-8ca6-cbf8d35c6899" # early stopping callback es_callback = keras.callbacks.EarlyStopping(monitor='val_aupr', #'val_aupr',# patience=10, verbose=1, mode='max', restore_best_weights=False) # reduce learning rate callback reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_aupr', factor=0.2, patience=3, min_lr=1e-7, mode='max', verbose=1) # train model history = model.fit(x_train, y_train, epochs=100, batch_size=128, shuffle=True, validation_data=(x_valid, y_valid), callbacks=[es_callback, reduce_lr]) # + [markdown] id="c0H9hDG5bPJQ" # #### Evaluate model on test set # + colab={"base_uri": "https://localhost:8080/"} id="38b8P9TPRalV" outputId="d440bd2d-d470-476e-8bcd-20f2063fc345" # evaluate model on test set results = model.evaluate(x_test, y_test, batch_size=512, verbose=0) print(results) # + [markdown] id="vpxh3xAEbSnW" # # Interpretability analysis # # #### calculate attribution maps # + colab={"base_uri": "https://localhost:8080/"} id="b4vT-hNo0kAW" outputId="2a78b1a3-57af-49f5-bb52-8bb6de034003" X_model = model_test[pos_index[:num_analyze]] print(X_model.shape) # + colab={"base_uri": "https://localhost:8080/"} id="EcFbFcU05E0F" outputId="6becbcb3-b853-4cfb-fc35-7362f2effdbd" ! [[ ! -f sequences.simdata.gz ]] && wget https://raw.githubusercontent.com/AvantiShri/model_storage/db919b12f750e5844402153233249bb3d24e9e9a/deeplift/genomics/sequences.simdata.gz ! [[ ! -f keras2_conv1d_record_5_model_PQzyq_modelJson.json ]] && wget https://raw.githubusercontent.com/AvantiShri/model_storage/b6e1d69/deeplift/genomics/keras2_conv1d_record_5_model_PQzyq_modelJson.json ! [[ ! -f keras2_conv1d_record_5_model_PQzyq_modelWeights.h5 ]] && wget https://raw.githubusercontent.com/AvantiShri/model_storage/b6e1d69/deeplift/genomics/keras2_conv1d_record_5_model_PQzyq_modelWeights.h5 ! [[ ! -f test.txt.gz ]] && wget https://raw.githubusercontent.com/AvantiShri/model_storage/9aadb769735c60eb90f7d3d896632ac749a1bdd2/deeplift/genomics/test.txt.gz # !pip install simdna import simdna.synthetic as synthetic import gzip data_filename = "sequences.simdata.gz" #read in the data in the testing set test_ids_fh = gzip.open("test.txt.gz","rb") ids_to_load = [x.decode("utf-8").rstrip("\n") for x in test_ids_fh] data = synthetic.read_simdata_file(data_filename, ids_to_load=ids_to_load) def one_hot_encode_along_channel_axis(sequence): to_return = np.zeros((len(sequence),4), dtype=np.int8) seq_to_one_hot_fill_in_array(zeros_array=to_return, sequence=sequence, one_hot_axis=1) return to_return def seq_to_one_hot_fill_in_array(zeros_array, sequence, one_hot_axis): assert one_hot_axis==0 or one_hot_axis==1 if (one_hot_axis==0): assert zeros_array.shape[1] == len(sequence) elif (one_hot_axis==1): assert zeros_array.shape[0] == len(sequence) #will mutate zeros_array for (i,char) in enumerate(sequence): if (char=="A" or char=="a"): char_idx = 0 elif (char=="C" or char=="c"): char_idx = 1 elif (char=="G" or char=="g"): char_idx = 2 elif (char=="T" or char=="t"): char_idx = 3 elif (char=="N" or char=="n"): continue #leave that pos as all 0's else: raise RuntimeError("Unsupported character: "+str(char)) if (one_hot_axis==0): zeros_array[char_idx,i] = 1 elif (one_hot_axis==1): zeros_array[i,char_idx] = 1 onehot_data = np.array([one_hot_encode_along_channel_axis(seq) for seq in data.sequences]) nums = np.ones(800) nums[:100] = 0 np.random.shuffle(nums) history = model.fit(onehot_data, nums, epochs=100, batch_size=128, shuffle=True, validation_split = 0.2, callbacks=[es_callback, reduce_lr]) # + id="fr0aFRa86ED2" y_test=y_test[0:800] # + id="jsLbAtfYRh9C" from tfomics import explain # number of test sequences to analyze (set this to 500 because expintgrad takes long) num_analyze = 500 # get positive label sequences and sequence model pos_index = np.where(y_test[:,0] == 1)[0] X = onehot_data[pos_index[:num_analyze]] X_model = np.full((800, 200, 4), 0.25) # X_model = model_test[pos_index[:num_analyze]] # instantiate explainer class explainer = explain.Explainer(model, class_index=0) # calculate attribution maps mutagenesis_scores = explainer.mutagenesis(X, class_index=None) saliency_scores = explainer.saliency_maps(X) smoothgrad_scores = explainer.smoothgrad(X, num_samples=50, mean=0.0, stddev=0.1) intgrad_scores = explainer.integrated_grad(X, baseline_type='zeros') expintgrad_scores = explainer.expected_integrated_grad(X, num_baseline=50, baseline_type='random') # reduce attribution maps to 1D scores mut_scores = explain.l2_norm(mutagenesis_scores) sal_scores = explain.grad_times_input(X, saliency_scores) sg_scores = explain.grad_times_input(X, smoothgrad_scores) int_scores = explain.grad_times_input(X, intgrad_scores) expint_scores = explain.grad_times_input(X, expintgrad_scores) # + colab={"base_uri": "https://localhost:8080/"} id="d2H6pkk31MzA" outputId="37c77b05-e991-4e99-94e2-b32c992138bb" print(mut_scores.shape) print(X_model.shape) print(x_test[:500, :, :].shape) mutagenesis_roc, mutagenesis_pr = evaluate.interpretability_performance(mut_scores, x_test[:500, :, :], threshold) mut_scores.shape # + id="pnW_H1HcRh_V" from tfomics import evaluate # compare distribution of attribution scores at positions with and without motifs #I think Xmodel is negative background, e.g. 0.25 for probability of each base in each position # interpretability_performance(scores, x_model, threshold=0.01) # Compare attribution scores to ground truth (e.g. x_model). # scores --> (N,L) # x_model --> (N,L,A) threshold = 0.1 mutagenesis_roc, mutagenesis_pr = evaluate.interpretability_performance(mut_scores, X_model, threshold) saliency_roc, saliency_pr = evaluate.interpretability_performance(sal_scores, X_model, threshold) smoothgrad_roc, smoothgrad_pr = evaluate.interpretability_performance(sg_scores, X_model, threshold) intgrad_roc, intgrad_pr = evaluate.interpretability_performance(int_scores, X_model, threshold) expintgrad_roc, expintgrad_pr = evaluate.interpretability_performance(expint_scores, X_model, threshold) # + [markdown] id="vflrW2bbaocK" # #### Print interpretabity AUROC # + id="-m6HumHYRiB9" colab={"base_uri": "https://localhost:8080/"} outputId="6f5fe7b0-2a7d-4b28-a0e8-392c0729e83e" print("%s: %.3f+/-%.3f"%('mutagenesis', np.mean(mutagenesis_roc), np.std(mutagenesis_roc))) print("%s: %.3f+/-%.3f"%('saliency', np.mean(saliency_roc), np.std(saliency_roc))) print("%s: %.3f+/-%.3f"%('smoothgrad', np.mean(smoothgrad_roc), np.std(smoothgrad_roc))) print("%s: %.3f+/-%.3f"%('intgrad', np.mean(intgrad_roc), np.std(intgrad_roc))) print("%s: %.3f+/-%.3f"%('expintgrad', np.mean(expintgrad_roc), np.std(expintgrad_roc))) # + [markdown] id="pqCJ2ClJatNN" # #### Print interpretabity AUPR # + id="bVl9ybasRiEs" colab={"base_uri": "https://localhost:8080/"} outputId="bd3c372c-e254-48d3-d754-6d5270833ae5" print("%s: %.3f+/-%.3f"%('mutagenesis', np.mean(mutagenesis_pr), np.std(mutagenesis_pr))) print("%s: %.3f+/-%.3f"%('saliency', np.mean(saliency_pr), np.std(saliency_pr))) print("%s: %.3f+/-%.3f"%('smoothgrad', np.mean(smoothgrad_pr), np.std(smoothgrad_pr))) print("%s: %.3f+/-%.3f"%('intgrad', np.mean(intgrad_pr), np.std(intgrad_pr))) print("%s: %.3f+/-%.3f"%('expintgrad', np.mean(expintgrad_pr), np.std(expintgrad_pr))) # + [markdown] id="nvbieUaFfPwp" # #### Plot interpretability performance box plot # + id="5_vWc40dZpM5" colab={"base_uri": "https://localhost:8080/", "height": 339} outputId="67222a0b-e97e-4d53-bd56-0fedcddda4e4" scores = [saliency_roc, mutagenesis_roc, intgrad_roc, smoothgrad_roc, expintgrad_roc] score_names = ['saliency_scores', 'mut_scores', 'intgrad_scores', 'smoothgrad_scores', 'exp_intgrad_scores'] names = ['Saliency', 'Mutagenesis', 'Integrated-Grad', 'SmoothGrad', 'Expected IntGrad'] fig = plt.figure(figsize=(14,4)) ax = plt.subplot(1,2,1) ax.boxplot(scores); plt.ylabel('AUROC', fontsize=12) plt.yticks([0.6, 0.7, 0.8, 0.9, 1.0], fontsize=12) plt.xticks(range(1,6), names, fontsize=12, rotation=45) ax.set_ybound([.55,1.05]) #ax.tick_params(labelbottom=False) scores = [saliency_pr, mutagenesis_pr, intgrad_pr, smoothgrad_pr, expintgrad_pr] ax = plt.subplot(1,2,2) ax.boxplot(scores); plt.ylabel('AUPR', fontsize=12) plt.yticks([0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], fontsize=12) plt.xticks(range(1,6), names, fontsize=12, rotation=45) ax.set_ybound([.35,1.05]) #ax.tick_params(labelbottom=False) # + id="Hx20Wc6r1cC7" colab={"base_uri": "https://localhost:8080/", "height": 697} outputId="1a2ea13d-3fde-4e1f-839e-7e310746b19c" from tfomics import impress score_names = ['saliency_scores', 'mut_scores', 'intgrad_scores', 'smoothgrad_scores', 'exp_intgrad_scores'] names = ['Saliency', 'Mutagenesis', 'Integrated-Grad', 'SmoothGrad', 'Expected IntGrad'] scores = [saliency_roc, mutagenesis_roc, intgrad_roc, smoothgrad_roc, expintgrad_roc] fig = plt.figure(figsize=(7,4)) ax = impress.box_violin_plot(scores, ylabel='AUROC', xlabel=names) ax.set_ybound([.55,1.0]) #ax.tick_params(labelbottom=False) scores = [saliency_pr, mutagenesis_pr, intgrad_pr, smoothgrad_pr, expintgrad_pr] fig = plt.figure(figsize=(7,4)) ax = impress.box_violin_plot(scores, ylabel='AUPR', xlabel=names) ax.set_ybound([.35,1.0]) #ax.tick_params(labelbottom=False) # + [markdown] id="4taVqjHhau-w" # #### Plot a comparison of the attribution maps # # + id="nLcHI8mvRnEo" colab={"base_uri": "https://localhost:8080/", "height": 411} outputId="9a5c3ac8-8fd1-45a7-e6b5-aaa3026be31c" from tfomics import impress index = 3 # sequence index x = np.expand_dims(X[index], axis=0) # convert attribution maps to pandas dataframe for logomaker scores = np.expand_dims(saliency_scores[index], axis=0) saliency_df = impress.grad_times_input_to_df(x, scores) scores = np.expand_dims(smoothgrad_scores[index], axis=0) smoothgrad_df = impress.grad_times_input_to_df(x, scores) scores = np.expand_dims(intgrad_scores[index], axis=0) intgrad_df = impress.grad_times_input_to_df(x, scores) scores = np.expand_dims(expintgrad_scores[index], axis=0) expintgrad_df = impress.grad_times_input_to_df(x, scores) scores = np.expand_dims(mutagenesis_scores[index], axis=0) mutagenesis_df = impress.l2_norm_to_df(x, scores) # ground truth sequence model model_df = impress.prob_to_info_df(X_model[index]) # plot comparison fig = plt.figure(figsize=(20,7)) ax = plt.subplot(6,1,1) impress.plot_attribution_map(saliency_df, ax, figsize=(20,1)) plt.ylabel('Saliency') ax = plt.subplot(6,1,2) impress.plot_attribution_map(smoothgrad_df, ax, figsize=(20,1)) plt.ylabel('SmoothGrad') ax = plt.subplot(6,1,3) impress.plot_attribution_map(intgrad_df, ax, figsize=(20,1)) plt.ylabel('IntGrad') ax = plt.subplot(6,1,4) impress.plot_attribution_map(expintgrad_df, ax, figsize=(20,1)) plt.ylabel('ExpIntGrad') ax = plt.subplot(6,1,5) impress.plot_attribution_map(mutagenesis_df, ax, figsize=(20,1)) plt.ylabel('Mutagenesis'); ax = plt.subplot(6,1,6) impress.plot_attribution_map(model_df, ax, figsize=(20,1)) plt.ylabel('Model'); # + [markdown] id="fi6ic9Qe3hVm" # # Calculate some statistics about signal and noise of attribution scores # + id="TliDwk37_zbH" from tfomics import evaluate # compare distribution of attribution scores at positions with and without motifs threshold = 0.1 top_k = 10 mut_signal, mut_noise_max, mut_noise_mean, mut_noise_topk = evaluate.signal_noise_stats(mut_scores, X_model, top_k, threshold) sal_signal, sal_noise_max, sal_noise_mean, sal_noise_topk = evaluate.signal_noise_stats(sal_scores, X_model, top_k, threshold) sg_signal, sg_noise_max, sg_noise_mean, sg_noise_topk = evaluate.signal_noise_stats(sg_scores, X_model, top_k, threshold) int_signal, int_noise_max, int_noise_mean, int_noise_topk = evaluate.signal_noise_stats(int_scores, X_model, top_k, threshold) expint_signal, expint_noise_max, expint_noise_mean, expint_noise_topk = evaluate.signal_noise_stats(expint_scores, X_model, top_k, threshold) # + [markdown] id="H_07I9MZCCx8" # # Plot signal to noise ratio of attribution scores # # Signal is determined by position with the motif and the noise is set to the max attribution score where motifs are not embedded # # + id="htnKMJw8AMe7" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="c98389f1-99e9-41d3-a089-f968406a1e3f" from tfomics import impress scores = [sal_signal/sal_noise_mean, mut_signal/mut_noise_mean, int_signal/int_noise_mean, sg_signal/sg_noise_mean, expint_signal/expint_noise_mean] names = ['Saliency', 'Mutagenesis', 'Integrated-Grad', 'SmoothGrad', 'Expected IntGrad'] fig = plt.figure(figsize=(6,4)) ax = impress.box_violin_plot(scores, ylabel='SNR', xlabel=names) ax.set_ybound([0, 40]) # + id="hIudrMA3S8HZ" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="082d2706-eed4-4c28-9c92-98ac22b9a91b" scores = [sal_signal/sal_noise_topk, mut_signal/mut_noise_topk, int_signal/int_noise_topk, sg_signal/sg_noise_topk, expint_signal/expint_noise_topk] names = ['Saliency', 'Mutagenesis', 'Integrated-Grad', 'SmoothGrad', 'Expected IntGrad'] fig = plt.figure(figsize=(6,4)) ax = impress.box_violin_plot(scores, ylabel='SNR', xlabel=names) ax.set_ybound([0, 10]) # + [markdown] id="KYsdoR1OB6Bs" # # Plot average signal and top-k noise of attribution maps # # Signal and noise are determined by positions with motifs and without motifs # + id="QQpNa6q5_zeO" colab={"base_uri": "https://localhost:8080/", "height": 337} outputId="5fd41ec5-3929-491c-9178-d1b264d48e40" fig = plt.figure(figsize=(5,5)) norm = np.max([mut_signal, mut_noise_topk]) plt.scatter(mut_noise_topk/norm, mut_signal/norm, s=50, alpha=0.4) norm = np.max([sal_signal, sal_noise_topk]) plt.scatter(sal_noise_topk/norm, sal_signal/norm, s=50, alpha=0.4) norm = np.max([sg_signal, sg_noise_topk]) plt.scatter(sg_noise_topk/norm, sg_signal/norm, s=50, alpha=0.4) norm = np.max([int_signal, int_noise_topk]) plt.scatter(int_noise_topk/norm, int_signal/norm, s=50, alpha=0.4) norm = np.max([expint_signal, expint_noise_topk]) plt.scatter(expint_noise_topk/norm, expint_signal/norm, s=50, alpha=0.4) plt.plot([0,1], [0,1], '--k') # + id="VMgGo97bNuv_"
TFomics_interpretability_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Visualization in Depth With Bokeh # For details on bokeh, see http://bokeh.pydata.org/en/latest/docs/user_guide.html#userguide # ## Setup from bokeh.plotting import figure, output_file, show, output_notebook, vplot import random import numpy as np import pandas as pd output_notebook() # Use so see output in the Jupyter notebook import bokeh bokeh.__version__ # ## Objectives # - Describe how to create interactive visualizations using bokeh # - Running example and visualization goals # - How to approach a new system # - Hover # - Widgets # - Study bokeh as a system # - Function specification - what it provides programmers # - Design (which has client and server parts) # ## How to Learn a New System # Steps # - Find an example close to what you want # - Create an environment that runs the example # - Abstract the key concepts of how it works # - Transform the example into what you want # ## Running Example - Biological Data from IPython.display import Image Image(filename='biological_data.png') df_bio = pd.read_csv("biological_data.csv") df_bio.head() # Desired visualization # - Scatterplot of rate vs. yield # - Hover shows the evolutionary "line" # - Widgets can specify color (and legend) for values of line # ## Step 1: Find something close plot = figure(plot_width=400, plot_height=400) plot.circle(df_bio['rate'], df_bio['yield']) plot.xaxis.axis_label = 'rate' plot.yaxis.axis_label = 'yield' show(plot) # ### Step 1a: Distinguish "evolutionary lines" by color # Let's distinguish the lines with colors. First, how many lines are there? # What are the possible colors df_bio['line'].unique() # Generate a plot with a different color for each line colors = {'HA': 'red', 'HR': 'green', 'UA': 'blue', 'WT': 'purple'} plot = figure(plot_width=700, plot_height=800) plot.title.text = 'Phenotypes for evolutionary lines.' for line in list(colors.keys()): df = df_bio[df_bio.line == line] color = colors[line] plot.circle(df['rate'], df['yield'], color=color, legend=line) plot.legend.location = "top_right" show(plot) # What colors are possible to use? Check out bokeh.palettes import bokeh.palettes as palettes print palettes.__doc__ #palettes.magma(4) # **Exercise**: Handle colors for the plot for an arbitrary number of evolutionary lines. (Hint: construct the colors dictionary using the values of 'line' and a palette.) # + # Generate the colors dictionary # Fill this in.... # + # Plot with the generated palette # Fill this in ... # - # ### Bokeh tools # Tools can be specified and positioned when the Figure is created. The interaction workflow is (a) select a tool (identified by vertical blue line), (b) perform gesture for tool. TOOLS = 'box_zoom,box_select,resize,reset' plot = figure(plot_width=200, plot_height=200, title=None, tools=TOOLS) plot.scatter(range(10), range(10)) show(plot) from bokeh.models import HoverTool, BoxSelectTool TOOLS = [HoverTool(), BoxSelectTool()] plot = figure(plot_width=200, plot_height=200, title=None, tools=TOOLS) show(plot) # ### Synthesizing Bokeh Concepts (Classes) # **Figure** # - Created using the figure() # - Controls the size of the plot # - Allows other elements to be added # - Has properties for title, x-axis label, y-axis label # # **Glyph** # - Mark that's added to the plot - circle, line, polygon # - Created using Figure methods plot.circle(df['rate'], df['yield'], color=color, legend=line) # # **Tool** # - Provides user interactions with the graph using gestures # - Created using a separate constructor ( # # ### Adding a Hover Tool # Based on our knowledge of Bokeh concepts, is a Tool associated with Figure or Glyph? # Which classes will be involved in hovering: # - Plot & Tool only # - Glyph only # - Tool and Glyph # Start with some examples. First, simple hovering. # + from bokeh.plotting import figure, output_file, show from bokeh.models import HoverTool, BoxSelectTool output_file("toolbar.html") TOOLS = [BoxSelectTool(), HoverTool()] p = figure(plot_width=400, plot_height=400, title=None, tools=TOOLS) p.circle([1, 2, 3, 4, 5], [2, 5, 8, 2, 7], size=10) show(p) # - # Now add ad-hoc data # + from bokeh.plotting import figure, output_file, show, ColumnDataSource from bokeh.models import HoverTool output_file("toolbar.html") hover = HoverTool( tooltips=[ ("index", "$index"), ("(x,y)", "(@x, @y)"), ("desc", "@desc"), ] ) p = figure(plot_width=400, plot_height=400, tools=[hover], title="Mouse over the dots") source = ColumnDataSource( data={ 'x': [1, 2, 3, 4, 5], 'y': [2, 5, 8, 2, 7], 'desc': ['A', 'b', 'C', 'd', 'E'], } ) p.circle('x', 'y', size=20, source=source) show(p) # - # **Exercise**: Plot the biological data with colors and a hover that shows the evolutionary line. # ## Bokeh Widgets # See widget.py and my_app.py # ## Bokeh Server Image(filename='BokehArchitecture.png')
PreFall2018/Visualization-in-Depth/Visualization-in-depth.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt # <figure><img src="../../../images/maxwell.png\" width=700 /> # # $k_s = 100 N/m$ # # $b = 6 Ns/m$ # # $l_{s_0}$ = 0.2 m$ # # At $t = 0$, the length $l = 0.3 m$ and it is at rest. At $t= 2 s$ the length increases to $l = 0.35 m$. # <figure><img src="../../../images/Voight.png\" width=700 /> # ## Task for now # # Implement the Kelvin model. # # <figure><img src="../../../images/Kelvin.png\" width=700 /> # # ## Task for next Lecture # # Set the parameters of the model so as to the response of the model is similar to the shown in the figure below. In this study the initial length of the fibre is 10 mm. Then the length decreases to 7 mm with different velocities [1]. # # # <figure><img src="./Loock2008.png\" width=700 /> # Figure adapted from [1] # # # [1] <NAME>.; <NAME>.; <NAME>. Viscoelastic properties of passive skeletal muscle in compression: Stress-relaxation behaviour and constitutive modelling. Journal of Biomechanics, v. 41, n. 7, p. 1555–1566, 2008.
courses/modsim2018/renatowatanabe/Lecture8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Assess the error associated with using annual-mean tendencies to calculate WMT # This is motivated by the advent of OM4_highres and the call for diagnostics. Is it sufficient to have the tendencies output as an annual mean, or do they need to be monthly or higher? # Import packages import xarray as xr import numpy as np import matplotlib.pyplot as plt from xhistogram.xarray import histogram from xgcm import Grid # Don't display filter warnings import warnings warnings.filterwarnings("ignore") # Set figure font size plt.rcParams.update({'font.size':14}) # + # Load data on native grid rootdir = '/archive/gam/MOM6-examples/ice_ocean_SIS2/Baltic_OM4_025/1yr/' gridname = 'native' prefix = '19000101.ocean_' # Diagnostics were saved into different files suffixs = ['1900','heat','salt'] ds = xr.Dataset() for suffix in suffixs: filename = prefix+gridname+'_'+suffix+'*.nc' dsnow = xr.open_mfdataset(rootdir+filename) ds = xr.merge([ds,dsnow]) gridname = '19000101.ocean_static.nc' grid = xr.open_dataset(rootdir+gridname).squeeze() # Specify the diffusive tendency terms processes=['boundary forcing','vertical diffusion','neutral diffusion', 'frazil ice','internal heat'] terms = {} terms['heat'] = {'boundary forcing':'boundary_forcing_heat_tendency', 'vertical diffusion':'opottempdiff', 'neutral diffusion':'opottemppmdiff', 'frazil ice':'frazil_heat_tendency', 'internal heat':'internal_heat_heat_tendency'} colors = {} colors['heat'] = {'boundary forcing':'tab:blue', 'vertical diffusion':'tab:orange', 'neutral diffusion':'tab:green', 'frazil ice':'tab:red', 'internal heat':'tab:purple'} # Specify the tracer range and bin widths (\delta\lambda) for the calculation delta_l = 0.2 lmin = -2 lmax = 10 bins = np.arange(lmin,lmax,delta_l) # Specify constants for the reference density and the specific heat capacity rho0 = 1035. Cp = 3992. # - G_monthly = xr.Dataset() for process in processes: term = terms['heat'][process] nanmask = np.isnan(ds[term]) G_monthly[process] = histogram(ds['temp'].where(~nanmask).squeeze(), bins=[bins], dim=['xh','yh','zl'], weights=( rho0*(ds[term]/(Cp*rho0) )*grid['areacello'] ).where(~nanmask).squeeze() )/np.diff(bins) G_yearly = xr.Dataset() for process in processes: term = terms['heat'][process] nanmask = np.isnan(ds[term].mean('time')) G_yearly[process] = histogram(ds['temp'].mean('time').where(~nanmask).squeeze(), bins=[bins], dim=['xh','yh','zl'], weights=( rho0*(ds[term].mean('time')/(Cp*rho0) )*grid['areacello'] ).where(~nanmask).squeeze() )/np.diff(bins) # Plot the time-mean transformation fig, ax = plt.subplots(figsize=(12,8)) total_monthly = xr.zeros_like(G_monthly[processes[0]].mean('time')) total_yearly = xr.zeros_like(G_yearly[processes[0]]) for process in processes: ax.plot(G_monthly['temp_bin'],G_monthly[process].mean('time'),label=process,color=colors['heat'][process]) ax.plot(G_yearly['temp_bin'],G_yearly[process],linestyle='--',color=colors['heat'][process]) total_monthly += G_monthly[process].mean('time') total_yearly += G_yearly[process] ax.plot(G['temp_bin'],total_monthly,color='k',label='TOTAL') ax.plot(G['temp_bin'],total_yearly,linestyle='--',color='k') ax.legend(loc='lower left') ax.set_xlabel('Temperature [$^\circ C$]') ax.set_ylabel('Transformation [$kg\,s^{-1}$]');
notebooks/calc_wmt_timeaveraging_error.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # TensorFlow-Slim # [TensorFlow-Slim](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim) is a high-level API for building TensorFlow models. TF-Slim makes defining models in TensorFlow easier, cutting down on the number of lines required to define models and reducing overall clutter. In particular, TF-Slim shines in image domain problems, and weights pre-trained on the [ImageNet dataset](http://www.image-net.org/) for many famous CNN architectures are provided for [download](https://github.com/tensorflow/models/tree/master/slim#pre-trained-models). # # *Note: Unlike previous notebooks, not every cell here is necessarily meant to run. Some are just for illustration.* # ## VGG-16 # To show these benefits, this tutorial will focus on [VGG-16](https://arxiv.org/abs/1409.1556). This style of architecture came in 2nd during the 2014 ImageNet Large Scale Visual Recognition Challenge and is famous for its simplicity and depth. The model looks like this: # # ![vgg16](Figures/vgg16.png) # # The architecture is pretty straight-forward: simply stack multiple 3x3 convolutional filters one after another, interleave with 2x2 maxpools, double the number of convolutional filters after each maxpool, flatten, and finish with fully connected layers. A couple ideas behind this model: # # - Instead of using larger filters, VGG notes that the receptive field of two stacked layers of 3x3 filters is 5x5, and with 3 layers, 7x7. Using 3x3's allows VGG to insert additional non-linearities and requires fewer weight parameters to learn. # # - Doubling the width of the network every time the features are spatially downsampled (maxpooled) gives the model more representational capacity while achieving spatial compression. # ### TensorFlow Core # In code, setting up the computation graph for prediction with just TensorFlow Core API is kind of a lot: # + import tensorflow as tf # Set up the data loading: images, labels = ... # Define the model with tf.name_scope('conv1_1') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(bias, name=scope) with tf.name_scope('conv1_2') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(conv1, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(bias, name=scope) pool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1') with tf.name_scope('conv2_1') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv2 = tf.nn.relu(bias, name=scope) with tf.name_scope('conv2_2') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(conv2, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv2 = tf.nn.relu(bias, name=scope) pool2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2') with tf.name_scope('conv3_1') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv3 = tf.nn.relu(bias, name=scope) with tf.name_scope('conv3_2') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv3 = tf.nn.relu(bias, name=scope) with tf.name_scope('conv3_3') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv3 = tf.nn.relu(bias, name=scope) pool3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3') with tf.name_scope('conv4_1') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(pool3, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv4 = tf.nn.relu(bias, name=scope) with tf.name_scope('conv4_2') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv4 = tf.nn.relu(bias, name=scope) with tf.name_scope('conv4_3') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv4 = tf.nn.relu(bias, name=scope) pool4 = tf.nn.max_pool(conv4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4') with tf.name_scope('conv5_1') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(pool4, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv5 = tf.nn.relu(bias, name=scope) with tf.name_scope('conv5_2') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(conv5, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv5 = tf.nn.relu(bias, name=scope) with tf.name_scope('conv5_3') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(conv5, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv5 = tf.nn.relu(bias, name=scope) pool5 = tf.nn.max_pool(conv5, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool5') with tf.name_scope('fc_6') as scope: flat = tf.reshape(pool5, [-1, 7*7*512]) weights = tf.Variable(tf.truncated_normal([7*7*512, 4096], dtype=tf.float32, stddev=1e-1), name='weights') mat = tf.matmul(flat, weights) biases = tf.Variable(tf.constant(0.0, shape=[4096], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(mat, biases) fc6 = tf.nn.relu(bias, name=scope) fc6_drop = tf.nn.dropout(fc6, keep_prob=0.5, name='dropout') with tf.name_scope('fc_7') as scope: weights = tf.Variable(tf.truncated_normal([4096, 4096], dtype=tf.float32, stddev=1e-1), name='weights') mat = tf.matmul(fc6, weights) biases = tf.Variable(tf.constant(0.0, shape=[4096], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(mat, biases) fc7 = tf.nn.relu(bias, name=scope) fc7_drop = tf.nn.dropout(fc7, keep_prob=0.5, name='dropout') with tf.name_scope('fc_8') as scope: weights = tf.Variable(tf.truncated_normal([4096, 1000], dtype=tf.float32, stddev=1e-1), name='weights') mat = tf.matmul(fc7, weights) biases = tf.Variable(tf.constant(0.0, shape=[1000], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(mat, biases) predictions = bias # - # Understanding every line of this model isn't important. The main point to notice is how much space this takes up. Several of the above lines (conv2d, bias_add, relu, maxpool) can obviously be combined to cut down on the size a bit, and you could also try to compress the code with some clever `for` looping, but all at the cost of sacrificing readability. With this much code, there is high potential for bugs or typos (to be honest, there are probably a few up there^), and modifying or refactoring the code becomes a huge pain. # # By the way, although VGG-16's paper was titled "Very Deep Convolutional Networks for Large-Scale Image Recognition", it isn't even considered a particularly deep network by today's standards. [Residual Networks](https://arxiv.org/abs/1512.03385) (2015) started beating state-of-the-art results with 50, 101, and 152 layers in their first incarnation, before really going off the deep end and getting up to 1001 layers and beyond. I'll spare you from me typing out the uncompressed TensorFlow Core code for that. # ### TF-Slim # Enter TF-Slim. The same VGG-16 model can be expressed as follows: # + import tensorflow as tf slim = tf.contrib.slim # Set up the data loading: images, labels = ... # Define the model: with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=tf.nn.relu, weights_initializer=tf.truncated_normal_initializer(0.0, 0.01), weights_regularizer=slim.l2_regularizer(0.0005)): net = slim.repeat(images, 2, slim.conv2d, 64, [3, 3], scope='conv1') net = slim.max_pool2d(net, [2, 2], scope='pool1') net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2') net = slim.max_pool2d(net, [2, 2], scope='pool2') net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3') net = slim.max_pool2d(net, [2, 2], scope='pool3') net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4') net = slim.max_pool2d(net, [2, 2], scope='pool4') net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5') net = slim.max_pool2d(net, [2, 2], scope='pool5') net = slim.fully_connected(net, 4096, scope='fc6') net = slim.dropout(net, 0.5, scope='dropout6') net = slim.fully_connected(net, 4096, scope='fc7') net = slim.dropout(net, 0.5, scope='dropout7') net = slim.fully_connected(net, 1000, activation_fn=None, scope='fc8') predictions = net # - # Much cleaner. For the TF-Slim version, it's much more obvious what the network is doing, writing it is faster, and typos and bugs are much less likely. # # Things to notice: # # - Weight and bias variables for every layer are automatically generated and tracked. Also, the "in_channel" parameter for determining weight dimension is automatically inferred from the input. This allows you to focus on what layers you want to add to the model, without worrying as much about boilerplate code. # # - The repeat() function allows you to add the same layer multiple times. In terms of variable scoping, repeat() will add "_#" to the scope to distinguish the layers, so we'll still have layers of scope "`conv1_1`, `conv1_2`, `conv2_1`, etc...". # # - The non-linear activation function (here: ReLU) is wrapped directly into the layer. In more advanced architectures with batch normalization, that's included as well. # # - With slim.argscope(), we're able to specify defaults for common parameter arguments, such as the type of activation function or weights_initializer. Of course, these defaults can still be overridden in any individual layer, as demonstrated in the finally fully connected layer (fc8). # # If you're reusing one of the famous architectures (like VGG-16), TF-Slim already has them defined, so it becomes even easier: # + import tensorflow as tf slim = tf.contrib.slim vgg = tf.contrib.slim.nets.vgg # Set up the data loading: images, labels = ... # Define the model: predictions = vgg.vgg16(images) # - # ## Pre-Trained Weights # TF-Slim provides weights pre-trained on the ImageNet dataset available for [download](https://github.com/tensorflow/models/tree/master/slim#pre-trained-models). First a quick tutorial on saving and restoring models: # ### Saving and Restoring # One of the nice features of modern machine learning frameworks is the ability to save model parameters in a clean way. While this may not have been a big deal for the MNIST logistic regression model because training only took a few seconds, it's easy to see why you wouldn't want to have to re-train a model from scratch every time you wanted to do inference or make a small change if training takes days or weeks. # # TensorFlow provides this functionality with its [Saver()](https://www.tensorflow.org/programmers_guide/variables#saving_and_restoring) class. While I just said that saving the weights for the MNIST logistic regression model isn't necessary because of how it is easy to train, let's do it anyway for illustrative purposes: # + import tensorflow as tf from tqdm import trange from tensorflow.examples.tutorials.mnist import input_data # Import data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # Create the model x = tf.placeholder(tf.float32, [None, 784], name='x') W = tf.Variable(tf.zeros([784, 10]), name='W') b = tf.Variable(tf.zeros([10]), name='b') y = tf.nn.bias_add(tf.matmul(x, W), b, name='y') # Define loss and optimizer y_ = tf.placeholder(tf.float32, [None, 10], name='y_') cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) # Variable Initializer init_op = tf.global_variables_initializer() # Create a Saver object for saving weights saver = tf.train.Saver() # Create a Session object, initialize all variables sess = tf.Session() sess.run(init_op) # Train for _ in trange(1000): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) # Save model save_path = saver.save(sess, "./log_reg_model.ckpt") print("Model saved in file: %s" % save_path) # Test trained model correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print('Test accuracy: {0}'.format(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))) sess.close() # - # Note, the differences from what we worked with yesterday: # # - In lines 9-12, 15, there are now 'names' properties attached to certain ops and variables of the graph. There are many reasons to do this, but here, it will help us identify which variables are which when restoring. # - In line 23, we create a Saver() object, and in line 35, we save the variables of the model to a checkpoint file. This will create a series of files containing our saved model. # # Otherwise, the code is more or less the same. # # To restore the model: # + import tensorflow as tf from tqdm import trange from tensorflow.examples.tutorials.mnist import input_data # Import data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # Create a Session object, initialize all variables sess = tf.Session() # Restore weights saver = tf.train.import_meta_graph('./log_reg_model.ckpt.meta') saver.restore(sess, tf.train.latest_checkpoint('./')) print("Model restored.") graph = tf.get_default_graph() x = graph.get_tensor_by_name("x:0") y = graph.get_tensor_by_name("y:0") y_ = graph.get_tensor_by_name("y_:0") # Test trained model correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print('Test accuracy: {0}'.format(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))) sess.close() # - # Importantly, notice that we didn't have to retrain the model. Instead, the graph and all variable values were loaded directly from our checkpoint files. In this example, this probably takes just as long, but for more complex models, the utility of saving/restoring is immense. # ### TF-Slim Model Zoo # One of the biggest and most surprising unintended benefits of the ImageNet competition was deep networks' transfer learning properties: CNNs trained on ImageNet classification could be re-used as general purpose feature extractors for other tasks, such as object detection. Training on ImageNet is very intensive and expensive in both time and computation, and requires a good deal of set-up. As such, the availability of weights already pre-trained on ImageNet has significantly accelerated and democratized deep learning research. # # Pre-trained models of several famous architectures are listed in the TF Slim portion of the [TensorFlow repository](https://github.com/tensorflow/models/tree/master/slim#pre-trained-models). Also included are the papers that proposed them and their respective performances on ImageNet. Side note: remember though that accuracy is not the only consideration when picking a network; memory and speed are important to keep in mind as well. # # Each entry has a link that allows you to download the checkpoint file of the pre-trained network. Alternatively, you can download the weights as part of your program. A tutorial can be found [here](https://github.com/tensorflow/models/blob/master/slim/slim_walkthrough.ipynb), but the general idea: # + from datasets import dataset_utils import tensorflow as tf url = "http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz" checkpoints_dir = './checkpoints' if not tf.gfile.Exists(checkpoints_dir): tf.gfile.MakeDirs(checkpoints_dir) dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir) # + import os import tensorflow as tf from nets import vgg slim = tf.contrib.slim # Load images images = ... # Pre-process processed_images = ... # Create the model, use the default arg scope to configure the batch norm parameters. with slim.arg_scope(vgg.vgg_arg_scope()): logits, _ = vgg.vgg_16(processed_images, num_classes=1000, is_training=False) probabilities = tf.nn.softmax(logits) # Load checkpoint values init_fn = slim.assign_from_checkpoint_fn( os.path.join(checkpoints_dir, 'vgg_16.ckpt'), slim.get_model_variables('vgg_16'))
02A_TensorFlow-Slim.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="FhGuhbZ6M5tl" # ##### Copyright 2018 The TensorFlow Authors. # + cellView="form" colab_type="code" id="AwOEIRJC6Une" colab={} #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + cellView="form" colab_type="code" id="KyPEtTqk6VdG" colab={} #@title MIT License # # Copyright (c) 2017 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # + [markdown] colab_type="text" id="EIdT9iu_Z4Rb" # # 회귀: 자동차 연비 예측하기 # + [markdown] colab_type="text" id="bBIlTPscrIT9" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/ko/r1/tutorials/keras/basic_regression.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/ko/r1/tutorials/keras/basic_regression.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃허브(GitHub) 소스 보기</a> # </td> # </table> # + [markdown] id="YYwLLNVaaJU9" colab_type="text" # Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도 # 불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다. # 이 번역에 개선할 부분이 있다면 # [tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다. # 문서 번역이나 리뷰에 참여하려면 # [<EMAIL>](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)로 # 메일을 보내주시기 바랍니다. # + [markdown] colab_type="text" id="AHp3M9ZmrIxj" # *회귀*(regression)는 가격이나 확률 같이 연속된 출력 값을 예측하는 것이 목적입니다. 이와는 달리 *분류*(classification)는 여러개의 클래스 중 하나의 클래스를 선택하는 것이 목적입니다(예를 들어, 사진에 사과 또는 오렌지가 포함되어 있을 때 어떤 과일인지 인식하는 것). # # 이 노트북은 [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) 데이터셋을 사용하여 1970년대 후반과 1980년대 초반의 자동차 연비를 예측하는 모델을 만듭니다. 이 기간에 출시된 자동차 정보를 모델에 제공하겠습니다. 이 정보에는 실린더 수, 배기량, 마력(horsepower), 공차 중량 같은 속성이 포함됩니다. # # 이 예제는 `tf.keras` API를 사용합니다. 자세한 내용은 [케라스 가이드](https://www.tensorflow.org/r1/guide/keras)를 참고하세요. # + colab_type="code" id="moB4tpEHxKB3" colab={} # 산점도 행렬을 그리기 위해 seaborn 패키지를 설치합니다 # !pip install seaborn # + colab_type="code" id="1rRo8oNqZ-Rj" colab={} from __future__ import absolute_import, division, print_function, unicode_literals, unicode_literals import pathlib import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers print(tf.__version__) # + [markdown] colab_type="text" id="F_72b0LCNbjx" # ## Auto MPG 데이터셋 # # 이 데이터셋은 [UCI 머신 러닝 저장소](https://archive.ics.uci.edu/ml/)에서 다운로드할 수 있습니다. # + [markdown] colab_type="text" id="gFh9ne3FZ-On" # ### 데이터 구하기 # 먼저 데이터셋을 다운로드합니다. # + colab_type="code" id="p9kxxgzvzlyz" colab={} dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data") dataset_path # + [markdown] colab_type="text" id="nslsRLh7Zss4" # 판다스를 사용하여 데이터를 읽습니다. # + colab_type="code" id="CiX2FI4gZtTt" colab={} column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight', 'Acceleration', 'Model Year', 'Origin'] raw_dataset = pd.read_csv(dataset_path, names=column_names, na_values = "?", comment='\t', sep=" ", skipinitialspace=True) dataset = raw_dataset.copy() dataset.tail() # + [markdown] colab_type="text" id="3MWuJTKEDM-f" # ### 데이터 정제하기 # # 이 데이터셋은 일부 데이터가 누락되어 있습니다. # + colab_type="code" id="JEJHhN65a2VV" colab={} dataset.isna().sum() # + [markdown] colab_type="text" id="9UPN0KBHa_WI" # 문제를 간단하게 만들기 위해서 누락된 행을 삭제하겠습니다. # + colab_type="code" id="4ZUDosChC1UN" colab={} dataset = dataset.dropna() # + [markdown] colab_type="text" id="8XKitwaH4v8h" # `"Origin"` 열은 수치형이 아니고 범주형이므로 원-핫 인코딩(one-hot encoding)으로 변환하겠습니다: # + colab_type="code" id="gWNTD2QjBWFJ" colab={} origin = dataset.pop('Origin') # + colab_type="code" id="ulXz4J7PAUzk" colab={} dataset['USA'] = (origin == 1)*1.0 dataset['Europe'] = (origin == 2)*1.0 dataset['Japan'] = (origin == 3)*1.0 dataset.tail() # + [markdown] colab_type="text" id="Cuym4yvk76vU" # ### 데이터셋을 훈련 세트와 테스트 세트로 분할하기 # # 이제 데이터를 훈련 세트와 테스트 세트로 분할합니다. # # 테스트 세트는 모델을 최종적으로 평가할 때 사용합니다. # + colab_type="code" id="qn-IGhUE7_1H" colab={} train_dataset = dataset.sample(frac=0.8,random_state=0) test_dataset = dataset.drop(train_dataset.index) # + [markdown] colab_type="text" id="J4ubs136WLNp" # ### 데이터 조사하기 # # 훈련 세트에서 몇 개의 열을 선택해 산점도 행렬을 만들어 살펴 보겠습니다. # + colab_type="code" id="oRKO_x8gWKv-" colab={} sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde") # + [markdown] colab_type="text" id="gavKO_6DWRMP" # 전반적인 통계도 확인해 보죠: # + colab_type="code" id="yi2FzC3T21jR" colab={} train_stats = train_dataset.describe() train_stats.pop("MPG") train_stats = train_stats.transpose() train_stats # + [markdown] colab_type="text" id="Db7Auq1yXUvh" # ### 특성과 레이블 분리하기 # # 특성에서 타깃 값 또는 "레이블"을 분리합니다. 이 레이블을 예측하기 위해 모델을 훈련시킬 것입니다. # + colab_type="code" id="t2sluJdCW7jN" colab={} train_labels = train_dataset.pop('MPG') test_labels = test_dataset.pop('MPG') # + [markdown] colab_type="text" id="mRklxK5s388r" # ### 데이터 정규화 # # 위 `train_stats` 통계를 다시 살펴보고 각 특성의 범위가 얼마나 다른지 확인해 보죠. # + [markdown] colab_type="text" id="-ywmerQ6dSox" # 특성의 스케일과 범위가 다르면 정규화(normalization)하는 것이 권장됩니다. 특성을 정규화하지 않아도 모델이 *수렴할 수 있지만*, 훈련시키기 어렵고 입력 단위에 의존적인 모델이 만들어집니다. # # 노트: 의도적으로 훈련 세트만 사용하여 통계치를 생성했습니다. 이 통계는 테스트 세트를 정규화할 때에도 사용됩니다. 이는 테스트 세트를 모델이 훈련에 사용했던 것과 동일한 분포로 투영하기 위해서입니다. # + colab_type="code" id="JlC5ooJrgjQF" colab={} def norm(x): return (x - train_stats['mean']) / train_stats['std'] normed_train_data = norm(train_dataset) normed_test_data = norm(test_dataset) # + [markdown] colab_type="text" id="BuiClDk45eS4" # 정규화된 데이터를 사용하여 모델을 훈련합니다. # # 주의: 여기에서 입력 데이터를 정규화하기 위해 사용한 통계치(평균과 표준편차)는 원-핫 인코딩과 마찬가지로 모델에 주입되는 모든 데이터에 적용되어야 합니다. 여기에는 테스트 세트는 물론 모델이 실전에 투입되어 얻은 라이브 데이터도 포함됩니다. # + [markdown] colab_type="text" id="SmjdzxKzEu1-" # ## 모델 # + [markdown] colab_type="text" id="6SWtkIjhrZwa" # ### 모델 만들기 # # 모델을 구성해 보죠. 여기에서는 두 개의 완전 연결(densely connected) 은닉층으로 `Sequential` 모델을 만들겠습니다. 출력 층은 하나의 연속적인 값을 반환합니다. 나중에 두 번째 모델을 만들기 쉽도록 `build_model` 함수로 모델 구성 단계를 감싸겠습니다. # + colab_type="code" id="c26juK7ZG8j-" colab={} def build_model(): model = keras.Sequential([ layers.Dense(64, activation=tf.nn.relu, input_shape=[9]), layers.Dense(64, activation=tf.nn.relu), layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=['mean_absolute_error', 'mean_squared_error']) return model # + colab_type="code" id="cGbPb-PHGbhs" colab={} model = build_model() # + [markdown] colab_type="text" id="Sj49Og4YGULr" # ### 모델 확인 # # `.summary` 메서드를 사용해 모델에 대한 간단한 정보를 출력합니다. # + colab_type="code" id="ReAD0n6MsFK-" colab={} model.summary() # + [markdown] colab_type="text" id="Vt6W50qGsJAL" # 모델을 한번 실행해 보죠. 훈련 세트에서 `10` 샘플을 하나의 배치로 만들어 `model.predict` 메서드를 호출해 보겠습니다. # + colab_type="code" id="-d-gBaVtGTSC" colab={} example_batch = normed_train_data[:10] example_result = model.predict(example_batch) example_result # + [markdown] colab_type="text" id="QlM8KrSOsaYo" # 제대로 작동하는 것 같네요. 결괏값의 크기와 타입이 기대했던 대로입니다. # + [markdown] colab_type="text" id="0-qWCsh6DlyH" # ### 모델 훈련 # # 이 모델을 1,000번의 에포크(epoch) 동안 훈련합니다. 훈련 정확도와 검증 정확도는 `history` 객체에 기록됩니다. # + colab_type="code" id="sD7qHCmNIOY0" colab={} # 에포크가 끝날 때마다 점(.)을 출력해 훈련 진행 과정을 표시합니다 class PrintDot(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs): if epoch % 100 == 0: print('') print('.', end='') EPOCHS = 1000 history = model.fit( normed_train_data, train_labels, epochs=EPOCHS, validation_split = 0.2, verbose=0, callbacks=[PrintDot()]) # + [markdown] colab_type="text" id="tQm3pc0FYPQB" # `history` 객체에 저장된 통계치를 사용해 모델의 훈련 과정을 시각화해 보죠. # + colab_type="code" id="4Xj91b-dymEy" colab={} hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch hist.tail() # + colab_type="code" id="B6XriGbVPh2t" colab={} import matplotlib.pyplot as plt def plot_history(history): hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch plt.figure(figsize=(8,12)) plt.subplot(2,1,1) plt.xlabel('Epoch') plt.ylabel('Mean Abs Error [MPG]') plt.plot(hist['epoch'], hist['mean_absolute_error'], label='Train Error') plt.plot(hist['epoch'], hist['val_mean_absolute_error'], label = 'Val Error') plt.ylim([0,5]) plt.legend() plt.subplot(2,1,2) plt.xlabel('Epoch') plt.ylabel('Mean Square Error [$MPG^2$]') plt.plot(hist['epoch'], hist['mean_squared_error'], label='Train Error') plt.plot(hist['epoch'], hist['val_mean_squared_error'], label = 'Val Error') plt.ylim([0,20]) plt.legend() plt.show() plot_history(history) # + [markdown] colab_type="text" id="AqsuANc11FYv" # 이 그래프를 보면 수 백번 에포크를 진행한 이후에는 모델이 거의 향상되지 않는 것 같습니다. `model.fit` 메서드를 수정하여 검증 점수가 향상되지 않으면 자동으로 훈련을 멈추도록 만들어 보죠. 에포크마다 훈련 상태를 점검하기 위해 *EarlyStopping 콜백(callback)*을 사용하겠습니다. 지정된 에포크 횟수 동안 성능 향상이 없으면 자동으로 훈련이 멈춥니다. # # 이 콜백에 대해 더 자세한 내용은 [여기](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping)를 참고하세요. # + colab_type="code" id="fdMZuhUgzMZ4" colab={} model = build_model() # patience 매개변수는 성능 향상을 체크할 에포크 횟수입니다 early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) history = model.fit(normed_train_data, train_labels, epochs=EPOCHS, validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()]) plot_history(history) # + [markdown] colab_type="text" id="3St8-DmrX8P4" # 이 그래프를 보면 검증 세트의 평균 오차가 약 +/- 2 MPG입니다. 좋은 결과인가요? 이에 대한 평가는 여러분에게 맡기겠습니다. # # 모델을 훈련할 때 사용하지 않았던 **테스트 세트**에서 모델의 성능을 확인해 보죠. 이를 통해 모델이 실전에 투입되었을 때 모델의 성능을 짐작할 수 있습니다: # + colab_type="code" id="jl_yNr5n1kms" colab={} loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0) print("테스트 세트의 평균 절대 오차: {:5.2f} MPG".format(mae)) # + [markdown] colab_type="text" id="ft603OzXuEZC" # ## 예측 # # 마지막으로 테스트 세트에 있는 샘플을 사용해 MPG 값을 예측해 보겠습니다: # + colab_type="code" id="Xe7RXH3N3CWU" colab={} test_predictions = model.predict(normed_test_data).flatten() plt.scatter(test_labels, test_predictions) plt.xlabel('True Values [MPG]') plt.ylabel('Predictions [MPG]') plt.axis('equal') plt.axis('square') plt.xlim([0,plt.xlim()[1]]) plt.ylim([0,plt.ylim()[1]]) _ = plt.plot([-100, 100], [-100, 100]) # + [markdown] id="mU1jBsRLaCeY" colab_type="text" # 모델이 꽤 잘 예측한 것 같습니다. 오차의 분포를 살펴 보죠. # + colab_type="code" id="f-OHX4DiXd8x" colab={} error = test_predictions - test_labels plt.hist(error, bins = 25) plt.xlabel("Prediction Error [MPG]") _ = plt.ylabel("Count") # + [markdown] id="3PkzkjFkaCed" colab_type="text" # 가우시안 분포가 아니지만 아마도 훈련 샘플의 수가 매우 작기 때문일 것입니다. # + [markdown] colab_type="text" id="vgGQuV-yqYZH" # ## 결론 # # 이 노트북은 회귀 문제를 위한 기법을 소개합니다. # # * 평균 제곱 오차(MSE)는 회귀 문제에서 자주 사용하는 손실 함수입니다(분류 문제에서 사용하는 손실 함수와 다릅니다). # * 비슷하게 회귀에서 사용되는 평가 지표도 분류와 다릅니다. 많이 사용하는 회귀 지표는 평균 절댓값 오차(MAE)입니다. # * 수치 입력 데이터의 특성이 여러 가지 범위를 가질 때 동일한 범위가 되도록 각 특성의 스케일을 독립적으로 조정해야 합니다. # * 훈련 데이터가 많지 않다면 과대적합을 피하기 위해 은닉층의 개수가 적은 소규모 네트워크를 선택하는 방법이 좋습니다. # * 조기 종료(Early stopping)은 과대적합을 방지하기 위한 좋은 방법입니다.
site/ko/r1/tutorials/keras/basic_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="zbPJ4u2Xb_Mf" colab_type="code" outputId="8ac17970-7f5f-4abd-963c-475c4562bdc4" executionInfo={"status": "ok", "timestamp": 1586435771260, "user_tz": -480, "elapsed": 11855, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 478} import pandas as pd import numpy as np import datetime import tqdm import matplotlib.pyplot as plt import seaborn as sns sns.set() # %matplotlib inline data0 = pd.read_csv("data/1019_SP.csv").dropna() data1 = pd.read_csv("data/1019_CL.csv").dropna() data2 = pd.read_csv("data/1019_OVX.csv").dropna() data3 = pd.read_csv("data/1019_GC.csv").dropna() data4 = pd.read_csv("data/1019_GVZ.csv").dropna() Date = data0.get("Date") Date = pd.to_datetime(Date) Close = data0.get("Close") Close_cl = data1.get("Close") Close_ovx = data2.get("Close") Close_gc = data3.get("Close") Close_gvz = data4.get("Close") dict = {"Date": Date, "close": Close, "close_cl": Close_cl, "close_ovx": Close_ovx, "close_gc": Close_gc, "close_gvz": Close_gvz, } df = pd.DataFrame(dict) df.set_index('Date', inplace=True) df = df.dropna() print("總共:%d個交易時間點" % (len(Date))) print("去除空白值後剩下:%d個交易時間點" % (len(df))) df.tail(10) # + id="XdqaOc7xcK5o" colab_type="code" colab={} dataset = pd.DataFrame({ 'Close':df['close'], 'Close_CL':df['close_cl'], 'Close_OVX':df['close_ovx'], #'Close_GC':df['close_gc'], #'Close_GVZ':df['close_gvz'], 'return': df['close'], }) feature_names = list(dataset.columns[:-1]) # + id="_CZ88I-cdWky" colab_type="code" outputId="85ec4675-889a-4c87-d83d-c5bb756c12b7" executionInfo={"status": "ok", "timestamp": 1586435772883, "user_tz": -480, "elapsed": 1538, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 51} print("before dropping NaN", dataset.shape) dataset = dataset.dropna() print("after dropping NaN", dataset.shape) # + id="Mm-x7N8ZdY_p" colab_type="code" outputId="0ae062e2-82b8-4d96-fcc8-c73819ea2017" executionInfo={"status": "ok", "timestamp": 1586435772885, "user_tz": -480, "elapsed": 1165, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 297} from sklearn.preprocessing import MinMaxScaler ss = MinMaxScaler() dataset_scaled = ss.fit_transform(dataset) dataset_scaled = pd.DataFrame(dataset_scaled, columns=dataset.columns, index=dataset.index) #dataset_scaled['return'] = dataset['return'] dataset_scaled.describe() # + id="MYoVGmRXd-bM" colab_type="code" outputId="beaf585b-6f24-4268-a4d6-a59c21987529" executionInfo={"status": "ok", "timestamp": 1586435775869, "user_tz": -480, "elapsed": 3091, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 168, "referenced_widgets": ["f2006b4e492b4c8f9399ce1bbad5e59d", "<KEY>", "c2ac3381098145beaae51d1472a4c137", "9446eef784e04f7ba146b54e6a9720ba", "ff2a0c0d132e4b428258d1937fedf70e", "b90aa2c9e0cf49cea6c2ae8245289a91", "<KEY>", "d63855faef6b48afbb08ea0665083369"]} from keras.utils import np_utils #每張圖包含幾天的資料 time_period = 20 #預設幾日後的漲跌 day = 10 cnn_x = [] cnn_y = [] indexs = [] dataset_scaled_x= dataset_scaled[feature_names] for i in tqdm.tqdm_notebook(range(0, len(dataset_scaled)-time_period-1-day)): cnn_x.append(dataset_scaled_x.iloc[i:i+time_period].values) r = dataset_scaled['return'].iloc[i+time_period-1+day]/dataset_scaled['return'].iloc[i+time_period-1] if r > 1: r = 0 else: r = 1 rr = np_utils.to_categorical(r, num_classes=2) cnn_y.append(rr) indexs.append(dataset_scaled.index[i+time_period-1]) cnn_x = np.array(cnn_x) cnn_y = np.array(cnn_y) indexes = np.array(indexs) # + id="TCBNdYS0E-xR" colab_type="code" colab={} #2010~2018年為訓練和測試,2019年則為驗證 import datetime cnn_x_t = cnn_x[indexes < datetime.datetime(2019,1,1)] cnn_y_t = cnn_y[indexes < datetime.datetime(2019,1,1)] cnn_x_validation = cnn_x[indexes > datetime.datetime(2019,1,1)] cnn_y_validation = cnn_y[indexes > datetime.datetime(2019,1,1)] # + id="fiW7hZeFN5zf" colab_type="code" colab={} from sklearn.model_selection import train_test_split cnn_x_train, cnn_x_test, cnn_y_train, cnn_y_test = train_test_split(cnn_x_t, cnn_y_t, test_size=0.2, random_state =7, stratify=cnn_y_t, shuffle = True) # + id="EVihjDsseSFd" colab_type="code" colab={} cnn_x_train = np.stack((cnn_x_train,)*3,axis=-1) cnn_x_test = np.stack((cnn_x_test,)*3,axis=-1) cnn_x_validation = np.stack((cnn_x_validation,)*3,axis=-1) # + id="aE8Wu-yVeWXA" colab_type="code" outputId="a1ed4556-b6a2-445b-ee50-8d9a2c7a9fc2" executionInfo={"status": "ok", "timestamp": 1586435786630, "user_tz": -480, "elapsed": 4564, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 862} fig = plt.figure(figsize = (15,15)) columns = rows = 5 for i in range(1,columns*rows+1): index = np.random.randint(len(cnn_x_train)) img = cnn_x_train[index] fig.add_subplot(rows,columns,i) plt.axis("off") plt.title(str(index)+' class = '+str(np.argmax(cnn_y_train[index]))) plt.subplots_adjust(wspace=0.2,hspace=0.2) plt.imshow(img) plt.show() # + id="zc4KPIFveZON" colab_type="code" outputId="0158f1bb-f498-4182-9b11-3c0b4c68d602" executionInfo={"status": "ok", "timestamp": 1586435793068, "user_tz": -480, "elapsed": 9842, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 442} import tensorflow.keras import tensorflow.keras.layers as layers from tensorflow.keras.models import Sequential from tensorflow.keras.initializers import he_normal input_shape = cnn_x_train[0].shape model = Sequential() model.add(layers.Conv2D(filters=32, kernel_size=(3,3), activation='relu', padding="same", input_shape=input_shape)) model.add(layers.Conv2D(filters=64, kernel_size=(3,3), activation='relu', padding="same")) model.add(layers.MaxPooling2D(pool_size=(2,2))) model.add(layers.Dropout(0.25)) model.add(layers.Flatten()) model.add(layers.Dense(units=128, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(units=2, activation='softmax')) model.compile(loss='categorical_crossentropy',optimizer="Adam", metrics=['accuracy']) print(model.summary()) # + id="hrpvc1b0epEK" colab_type="code" outputId="7fb27fa5-5278-415c-8d8d-2b315035a8f1" executionInfo={"status": "ok", "timestamp": 1586435818836, "user_tz": -480, "elapsed": 35098, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} from tensorflow.keras.callbacks import EarlyStopping earlystop = EarlyStopping(monitor='val_loss', patience=10, verbose=1) history = model.fit(cnn_x_train, cnn_y_train, batch_size=100, epochs=250, verbose=1, validation_data=(cnn_x_test,cnn_y_test), callbacks=[earlystop], ) # + id="GTYzyv5fevXq" colab_type="code" outputId="22d168ee-5bf3-4de5-d83c-9e1629758835" executionInfo={"status": "ok", "timestamp": 1586435818837, "user_tz": -480, "elapsed": 34743, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 119} o_loss,o_accuracy = model.evaluate(cnn_x_train,cnn_y_train) print("對訓練資料的:\nLoss: %.2f, Accuracy: %.2f" % (o_loss, o_accuracy)) n_loss,n_accuracy = model.evaluate(cnn_x_test,cnn_y_test) print("對測試資料的:\nLoss: %.2f, Accuracy: %.2f" % (n_loss, n_accuracy)) # + id="ZLJ7p_IDfQnI" colab_type="code" outputId="c91d01a9-8483-4a8b-86e4-3a2162459edb" executionInfo={"status": "ok", "timestamp": 1586435819955, "user_tz": -480, "elapsed": 35679, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 695} plt.style.use("ggplot") plt.figure(figsize=(10, 5)) plt.plot(np.log(history.history['loss']), color = 'green', label='Loss') plt.plot(np.log(history.history['val_loss']), color = 'blue', label='Val_Loss') plt.legend() plt.title('Training loss based on CNN') plt.ylabel('Value') plt.xlabel('Number of epochs') plt.show() plt.style.use("ggplot") plt.figure(figsize=(10, 5)) plt.plot(history.history['accuracy'], color = 'green', label='accuracy') plt.plot(history.history['val_accuracy'], color = 'blue', label='Val_accuracy') plt.legend() plt.title('Accuracy based on CNN') plt.ylabel('Value') plt.xlabel('Number of epochs') plt.show() # + id="qBKUFhrfuOpD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 88} outputId="41a368e5-d8b7-4ec0-d4d8-dc93bbd8e656" executionInfo={"status": "ok", "timestamp": 1586435820599, "user_tz": -480, "elapsed": 36150, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} pre = model.predict_classes(cnn_x_test) pre = pd.Series(pre) # + id="dczFL93_fQqA" colab_type="code" outputId="2ee4ee41-bb0e-4c7a-f1ac-534b21e19087" executionInfo={"status": "ok", "timestamp": 1586435820599, "user_tz": -480, "elapsed": 35987, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 438} from sklearn.metrics import confusion_matrix #將onehot編碼轉回數組cnn_x_validation cnn_y_test = np.argmax(cnn_y_test, axis=1) xcnn_y_test = pd.Series(cnn_y_test) LABELS = ["Rise","Fall"] conf_matrix = confusion_matrix(xcnn_y_test, pre) t_size = len(cnn_y_test) t_size1 = (cnn_y_test<1).sum() t_guess = ((cnn_y_test<1).sum())/(len(cnn_y_test)) TP = conf_matrix[0][0] FN = conf_matrix[0][1] FP = conf_matrix[1][0] TN = conf_matrix[1][1] accuracy = (TP+TN)/(TP+FN+FP+TN) precision = TP/(TP+FP) recall = TP/(TP+FN) F1 = 2*((precision*recall)/(precision+recall)) print("TP: %.2f, FN: %.2f, FP: %.2f, TN: %.2f" % (TP,FN,FP,TN)) print("對訓練集資料的:Loss: %.2f, Accuracy: %.2f" % (o_loss, o_accuracy)) print("對測試集資料的:Loss: %.2f, Accuracy: %.2f" % (n_loss, n_accuracy)) print("\n測試資料總共:%.2f 筆, 其中漲的筆數為: %.2f, 全部猜漲猜對的機率是: %.2f" % (t_size, t_size1, t_guess)) print("該測試集資料的:\nAccuracy: %.2f, Precision: %.2f, Recall: %.2f\nF1 score: %.2f" % (accuracy, precision,recall,F1)) sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt="d",center=0.7,cmap = 'GnBu'); plt.title("Confusion matrix") plt.ylabel('True class') plt.xlabel('Predicted class') plt.show() # + id="hn6b7KjEpsy7" colab_type="code" outputId="7d6a9218-1e01-4329-91a9-3807a9128afb" executionInfo={"status": "ok", "timestamp": 1586435820600, "user_tz": -480, "elapsed": 35832, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 285} pre = model.predict_classes(cnn_x_validation) pre = pd.Series(pre) pre.hist() # + id="DXqRgCaafQsE" colab_type="code" outputId="ef790695-6c47-4aae-c871-49ab320be15b" executionInfo={"status": "ok", "timestamp": 1586435820601, "user_tz": -480, "elapsed": 35694, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 438} from sklearn.metrics import confusion_matrix #將onehot編碼轉回數組cnn_x_validation cnn_y_validation = np.argmax(cnn_y_validation, axis=1) xcnn_y_validation = pd.Series(cnn_y_validation) LABELS = ["Rise","Fall"] conf_matrix = confusion_matrix(xcnn_y_validation, pre) t_size = len(cnn_y_validation) t_size1 = (cnn_y_validation<1).sum() t_guess = ((cnn_y_validation<1).sum())/(len(cnn_y_validation)) TP = conf_matrix[0][0] FN = conf_matrix[0][1] FP = conf_matrix[1][0] TN = conf_matrix[1][1] accuracy = (TP+TN)/(TP+FN+FP+TN) precision = TP/(TP+FP) recall = TP/(TP+FN) F1 = 2*((precision*recall)/(precision+recall)) print("TP: %.2f, FN: %.2f, FP: %.2f, TN: %.2f" % (TP,FN,FP,TN)) print("對訓練集資料的:Loss: %.2f, Accuracy: %.2f" % (o_loss, o_accuracy)) print("對測試集資料的:Loss: %.2f, Accuracy: %.2f" % (n_loss, n_accuracy)) print("\n測試資料總共:%.2f 筆, 其中漲的筆數為: %.2f, 全部猜漲猜對的機率是: %.2f" % (t_size, t_size1, t_guess)) print("該測試集資料的:\nAccuracy: %.2f, Precision: %.2f, Recall: %.2f\nF1 score: %.2f" % (accuracy, precision,recall,F1)) sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt="d",center=0.7,cmap = 'GnBu'); plt.title("Confusion matrix") plt.ylabel('True class') plt.xlabel('Predicted class') plt.show() # + id="FTn-teHjfQug" colab_type="code" colab={}
experiments/class3/[1_Final]CNN5_v1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="ktbQAC-Xfvku" colab_type="text" # ## Data Directory # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" id="FQ0GwLD7fkO3" colab_type="code" colab={} import os print(os.listdir("../input")) print(os.listdir("../input/dataset/dataset")) # + [markdown] id="rtcL6uLDfkO_" colab_type="text" # ## Install Dependencies # + id="BtFHREU2fkPA" colab_type="code" colab={} import torch print(torch.__version__) print(torch.cuda.device_count()) print(torch.cuda.is_available()) # + [markdown] id="hPjrVskrfkPD" colab_type="text" # ## Import libraries # + id="KBoTqWBlfkPE" colab_type="code" colab={} import os import cv2 import torch import torch.nn as nn import torch.optim as optim import torchvision.transforms as transforms import numpy as np import pandas as pd from torch.utils import data from torch.utils.data import DataLoader from torch.optim.lr_scheduler import MultiStepLR # + [markdown] id="3y6zGj92fkPG" colab_type="text" # ## Hyper-parameters # + id="wDOKqTl6fkPH" colab_type="code" colab={} dataroot = "../input/dataset/dataset/" ckptroot = "./" lr = 1e-4 weight_decay = 1e-5 batch_size = 32 num_workers = 8 test_size = 0.8 shuffle = True epochs = 80 start_epoch = 0 resume = False # + [markdown] id="U7E4M2ynfkPJ" colab_type="text" # ## Helper functions # + id="swFFOrkRfkPK" colab_type="code" colab={} def toDevice(datas, device): """Enable cuda.""" imgs, angles = datas return imgs.float().to(device), angles.float().to(device) def augment(dataroot, imgName, angle): """Data augmentation.""" name = dataroot + 'IMG/' + imgName.split('\\')[-1] current_image = cv2.imread(name) if current_image is None: print(name) current_image = current_image[65:-25, :, :] if np.random.rand() < 0.5: current_image = cv2.flip(current_image, 1) angle = angle * -1.0 return current_image, angle # + [markdown] id="yOo86RC1fkPM" colab_type="text" # ## Load data # + id="j0n38m6vfkPN" colab_type="code" colab={} import scipy from scipy import signal def load_data(data_dir, test_size): """Load training data and train validation split""" # reads CSV file into a single dataframe variable data_df = pd.read_csv(os.path.join(data_dir, 'driving_log.csv'), names=['center', 'left', 'right', 'steering', 'throttle', 'reverse', 'speed']) # smooth data signal with `savgol_filter` data_df["steering"] = signal.savgol_filter(data_df["steering"].values.tolist(), 51, 11) # Divide the data into training set and validation set train_len = int(test_size * data_df.shape[0]) valid_len = data_df.shape[0] - train_len trainset, valset = data.random_split( data_df.values.tolist(), lengths=[train_len, valid_len]) return trainset, valset trainset, valset = load_data(dataroot, test_size) # + [markdown] id="BzY3zIFdfkPb" colab_type="text" # ## Create dataset # + id="-DpSjiP-fkPf" colab_type="code" colab={} class TripletDataset(data.Dataset): def __init__(self, dataroot, samples, transform=None): self.samples = samples self.dataroot = dataroot self.transform = transform def __getitem__(self, index): batch_samples = self.samples[index] steering_angle = float(batch_samples[3]) center_img, steering_angle_center = augment(self.dataroot, batch_samples[0], steering_angle) left_img, steering_angle_left = augment(self.dataroot, batch_samples[1], steering_angle + 0.4) right_img, steering_angle_right = augment(self.dataroot, batch_samples[2], steering_angle - 0.4) center_img = self.transform(center_img) left_img = self.transform(left_img) right_img = self.transform(right_img) return (center_img, steering_angle_center), (left_img, steering_angle_left), (right_img, steering_angle_right) def __len__(self): return len(self.samples) # + [markdown] id="FUfByxjNfkPj" colab_type="text" # ## Get data loader # + id="GV_h604FfkPk" colab_type="code" outputId="ebdd8196-2093-44ca-a56c-5465f4b816d6" colab={} print("==> Preparing dataset ...") def data_loader(dataroot, trainset, valset, batch_size, shuffle, num_workers): """Self-Driving vehicles simulator dataset Loader. Args: trainset: training set valset: validation set batch_size: training set input batch size shuffle: whether shuffle during training process num_workers: number of workers in DataLoader Returns: trainloader (torch.utils.data.DataLoader): DataLoader for training set testloader (torch.utils.data.DataLoader): DataLoader for validation set """ transformations = transforms.Compose( [transforms.Lambda(lambda x: (x / 127.5) - 1.0)]) # Load training data and validation data training_set = TripletDataset(dataroot, trainset, transformations) trainloader = DataLoader(training_set, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers) validation_set = TripletDataset(dataroot, valset, transformations) valloader = DataLoader(validation_set, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers) return trainloader, valloader trainloader, validationloader = data_loader(dataroot, trainset, valset, batch_size, shuffle, num_workers) # + [markdown] id="dDdWHeMifkPn" colab_type="text" # ## Define model # + id="1K-KmFHWfkPo" colab_type="code" colab={} class NetworkNvidia(nn.Module): """NVIDIA model used in the paper.""" def __init__(self): """Initialize NVIDIA model. NVIDIA model used Image normalization to avoid saturation and make gradients work better. Convolution: 5x5, filter: 24, strides: 2x2, activation: ELU Convolution: 5x5, filter: 36, strides: 2x2, activation: ELU Convolution: 5x5, filter: 48, strides: 2x2, activation: ELU Convolution: 3x3, filter: 64, strides: 1x1, activation: ELU Convolution: 3x3, filter: 64, strides: 1x1, activation: ELU Drop out (0.5) Fully connected: neurons: 100, activation: ELU Fully connected: neurons: 50, activation: ELU Fully connected: neurons: 10, activation: ELU Fully connected: neurons: 1 (output) the convolution layers are meant to handle feature engineering the fully connected layer for predicting the steering angle. """ super(NetworkNvidia, self).__init__() self.conv_layers = nn.Sequential( nn.Conv2d(3, 24, 5, stride=2), nn.ELU(), nn.Conv2d(24, 36, 5, stride=2), nn.ELU(), nn.Conv2d(36, 48, 5, stride=2), nn.ELU(), nn.Conv2d(48, 64, 3), nn.ELU(), nn.Conv2d(64, 64, 3), nn.Dropout(0.5) ) self.linear_layers = nn.Sequential( nn.Linear(in_features=64 * 2 * 33, out_features=100), nn.ELU(), nn.Linear(in_features=100, out_features=50), nn.ELU(), nn.Linear(in_features=50, out_features=10), nn.Linear(in_features=10, out_features=1) ) def forward(self, input): """Forward pass.""" input = input.view(input.size(0), 3, 70, 320) output = self.conv_layers(input) # print(output.shape) output = output.view(output.size(0), -1) output = self.linear_layers(output) return output # Define model print("==> Initialize model ...") model = NetworkNvidia() print("==> Initialize model done ...") # + [markdown] id="HxGAzAzDfkPu" colab_type="text" # ## Define optimizer and criterion # + id="_iH3OhLBfkPu" colab_type="code" colab={} # Define optimizer and criterion optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) criterion = nn.MSELoss() # + [markdown] id="Pw1poQoYfkPy" colab_type="text" # ## Learning rate scheduler # + id="P1MCyJhSfkPz" colab_type="code" colab={} # learning rate scheduler scheduler = MultiStepLR(optimizer, milestones=[30, 50], gamma=0.1) # transfer to gpu device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(device) # + [markdown] id="OM520SyQfkP1" colab_type="text" # ## Resume training # + id="luVRBv3CfkP2" colab_type="code" colab={} if resume: print("==> Loading checkpoint ...") checkpoint = torch.load("../input/pretrainedmodels/both-nvidia-model-61.h5", map_location=lambda storage, loc: storage) start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) # + [markdown] id="3f1RloYefkP3" colab_type="text" # ## Train # + id="gC9EK0w0fkP5" colab_type="code" colab={} class Trainer(object): """Trainer.""" def __init__(self, ckptroot, model, device, epochs, criterion, optimizer, scheduler, start_epoch, trainloader, validationloader): """Self-Driving car Trainer. Args: model: device: epochs: criterion: optimizer: start_epoch: trainloader: validationloader: """ super(Trainer, self).__init__() self.model = model self.device = device self.epochs = epochs self.ckptroot = ckptroot self.criterion = criterion self.optimizer = optimizer self.scheduler = scheduler self.start_epoch = start_epoch self.trainloader = trainloader self.validationloader = validationloader def train(self): """Training process.""" self.model.to(self.device) for epoch in range(self.start_epoch, self.epochs + self.start_epoch): self.scheduler.step() # Training train_loss = 0.0 self.model.train() for local_batch, (centers, lefts, rights) in enumerate(self.trainloader): # Transfer to GPU centers, lefts, rights = toDevice(centers, self.device), toDevice( lefts, self.device), toDevice(rights, self.device) # Model computations self.optimizer.zero_grad() datas = [centers, lefts, rights] for data in datas: imgs, angles = data # print("training image: ", imgs.shape) outputs = self.model(imgs) loss = self.criterion(outputs, angles.unsqueeze(1)) loss.backward() self.optimizer.step() train_loss += loss.data.item() if local_batch % 100 == 0: print("Training Epoch: {} | Loss: {}".format(epoch, train_loss / (local_batch + 1))) # Validation self.model.eval() valid_loss = 0 with torch.set_grad_enabled(False): for local_batch, (centers, lefts, rights) in enumerate(self.validationloader): # Transfer to GPU centers, lefts, rights = toDevice(centers, self.device), toDevice( lefts, self.device), toDevice(rights, self.device) # Model computations self.optimizer.zero_grad() datas = [centers, lefts, rights] for data in datas: imgs, angles = data outputs = self.model(imgs) loss = self.criterion(outputs, angles.unsqueeze(1)) valid_loss += loss.data.item() if local_batch % 100 == 0: print("Validation Loss: {}".format(valid_loss / (local_batch + 1))) print() # Save model if epoch % 5 == 0 or epoch == self.epochs + self.start_epoch - 1: state = { 'epoch': epoch + 1, 'state_dict': self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'scheduler': self.scheduler.state_dict(), } self.save_checkpoint(state) def save_checkpoint(self, state): """Save checkpoint.""" print("==> Save checkpoint ...") if not os.path.exists(self.ckptroot): os.makedirs(self.ckptroot) torch.save(state, self.ckptroot + 'both-nvidia-model-{}.h5'.format(state['epoch'])) # + id="Ycy2rSL3fkP7" colab_type="code" outputId="3f4bf9c9-bfca-4e2e-8810-1c47af381175" colab={} print("==> Start training ...") trainer = Trainer(ckptroot, model, device, epochs, criterion, optimizer, scheduler, start_epoch, trainloader, validationloader) trainer.train()
src/train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # DEC-11 JSQL # ### Import Libraries import json # ### Getting Inputs # + # testcase 1 x = """ { "table name":"my_table", "headers":{ "1":{ "column name":"id", "data type":"integer" }, "2":{ "column name":"name", "data type":"varchar(30)" } }, "records":{ "1":[ 1, "Josh" ], "2":[ 2, "Mike" ], "3":[ 3, "Tom" ] } } """ # - # ### Parse Json # ### Function to get SQL statements def jsql(x): #Parse JSON y = json.loads(x) #Get SQL statements output = "CREATE TABLE " output += y["table name"] output += " (" st = "" for key,val in y["headers"].items(): st += val["column name"] st += " " st += val["data type"] st += "," output += st[0:len(st)-1] output += ");\n" for key,val in y["records"].items(): st = str(val) output += "INSERT INTO " output += y["table name"] output += " values " output += "(" output += st[1:len(st) - 1] output += ");" output += '\n' return output # ### ANSWER FOR TESTCASE answer = jsql(x) print(answer) # ### ANSWER FOR SAMPLE INPUT import urllib link = "https://raw.githubusercontent.com/SVCE-ACM/A-December-of-Algorithms-2020/main/src/assets/dec%2011%20sample%20input.json" f = urllib.request.urlopen(link) myfile = f.read() answers = jsql(myfile) print(answers)
December-11/ipynb_rohithmsr_jsql.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Map S-sulphenylated peptide fragments on 3D Structure # The goal of this study is to systematically map the positions of S-sulphenylation of proteins onto 3D protein structures in the Protein Data Bank. # Data source: # # <NAME>, <NAME>, <NAME>, <NAME>. Diverse Redoxome Reactivity Profiles # of Carbon Nucleophiles. J Am Chem Soc. 2017 Apr 19;139(15):5588-5595. [doi: # 10.1021/jacs.7b01791](https://doi.org/10.1021/jacs.7b01791) # # Excerpt from abstract: # Analysis of sulfenic acid-reactive C-nucleophile fragments screened against a colon cancer cell proteome. Covalent ligands were identified for >1280 S-sulfenylated cysteines present in "druggable" proteins and orphan targets, revealing disparate reactivity profiles and target preferences. import requests from io import BytesIO import xlrd import pandas as pd import numpy as np import py3Dmol from pyspark.sql import SparkSession from pyspark.sql.functions import explode, split, substring_index,regexp_extract from mmtfPyspark.datasets import pdbToUniProt from ipywidgets import interact, IntSlider, widgets spark = SparkSession.builder.appName("S-Sulphenylation").getOrCreate() # + code_folding=[] # dataset = {'BTD': 'https://pubs.acs.org/doi/suppl/10.1021/jacs.7b01791/suppl_file/ja7b01791_si_002.xlsx', # 'DYn-2':'https://pubs.acs.org/doi/suppl/10.1021/jacs.7b01791/suppl_file/ja7b01791_si_003.xlsx', # 'PRD': 'https://pubs.acs.org/doi/suppl/10.1021/jacs.7b01791/suppl_file/ja7b01791_si_004.xlsx', # 'PYD': 'https://pubs.acs.org/doi/suppl/10.1021/jacs.7b01791/suppl_file/ja7b01791_si_005.xlsx', # 'TD': 'https://pubs.acs.org/doi/suppl/10.1021/jacs.7b01791/suppl_file/ja7b01791_si_006.xlsx'} # - dataset = {'BTD': ('https://pubs.acs.org/doi/suppl/10.1021/jacs.7b01791/suppl_file/ja7b01791_si_002.xlsx', '418@C'), 'DYn-2':('https://pubs.acs.org/doi/suppl/10.1021/jacs.7b01791/suppl_file/ja7b01791_si_003.xlsx', '333@C'), 'PRD': ('https://pubs.acs.org/doi/suppl/10.1021/jacs.7b01791/suppl_file/ja7b01791_si_004.xlsx', '333@C'), 'PYD': ('https://pubs.acs.org/doi/suppl/10.1021/jacs.7b01791/suppl_file/ja7b01791_si_005.xlsx', '333@C'), 'TD': ('https://pubs.acs.org/doi/suppl/10.1021/jacs.7b01791/suppl_file/ja7b01791_si_006.xlsx', '333@C')} w = widgets.ToggleButtons(options=dataset.keys(), description='Dataset:', disabled=False) # ## Select dataset display(w) dataset[w.value] url = dataset[w.value][0] mod_string = dataset[w.value][1] print(url, mod_string) # ### Read dataset from supplementary data excel file req = requests.get(url) # get redirected content df = pd.read_excel(BytesIO(req.content), sheet_name='Protein View') print("Dataset:",w.value) df.head() # ### Standardize representation of protein modification # Here we use the following notation for modified residues (amino acid, delta mass), here (C,333). df = df.assign(ptms=np.full((df.shape[0], 1), w.value)) df['modSites'] = df['Modified Sites'].map(lambda s: str(s).replace(mod_string, '')) df.head() # convert columns to string df[['Gene Family', 'Gene Name', 'Chromosome','Description', 'Modified Sites']] = df[['Gene Family', 'Gene Name', 'Chromosome','Description', 'Modified Sites']].astype(str) ds = spark.createDataFrame(df) ds = ds.withColumn('Description', split(ds.Description, "\|")) ds = ds.withColumn("unpName", ds.Description.getItem(1)) ds = ds.withColumn("unpId", ds.Description.getItem(3)) ds = ds.withColumn("name", ds.Description.getItem(4)) ds = ds.withColumn('modSites', split(ds.modSites, ",")) ds = ds.withColumn('modSites', explode(ds.modSites)) ds = ds.drop('Modified Sites') ds.limit(5).toPandas() # ## Get PDB to UniProt Residue Mappings # Download PDB to UniProt mappings and filter out residues that were not observed in the 3D structure. up = pdbToUniProt.get_cached_residue_mappings().filter("pdbResNum IS NOT NULL") st = up.join(ds, (up.uniprotId == ds.unpId) & (up.uniprotNum == ds.modSites)) # + code_folding=[0] jupyter={"source_hidden": true} def view_modifications(df, cutoff_distance, *args): def view3d(show_labels=True,show_bio_assembly=False, show_surface=False, i=0): pdb_id, chain_id = df.iloc[i]['structureChainId'].split('.') res_num = df.iloc[i]['pdbResNum'] labels = df.iloc[i]['ptms'] # print header print ("PDB Id: " + pdb_id + " chain Id: " + chain_id) # print any specified additional columns from the dataframe for a in args: print(a + ": " + df.iloc[i][a]) mod_res = {'chain': chain_id, 'resi': res_num} # select neigboring residues by distance surroundings = {'chain': chain_id, 'resi': res_num, 'byres': True, 'expand': cutoff_distance} viewer = py3Dmol.view(query='pdb:' + pdb_id, options={'doAssembly': show_bio_assembly}) # polymer style viewer.setStyle({'cartoon': {'color': 'spectrum', 'width': 0.6, 'opacity':0.8}}) # non-polymer style viewer.setStyle({'hetflag': True}, {'stick':{'radius': 0.3, 'singleBond': False}}) # style for modifications viewer.addStyle(surroundings,{'stick':{'colorscheme':'orangeCarbon', 'radius': 0.15}}) viewer.addStyle(mod_res, {'stick':{'colorscheme':'redCarbon', 'radius': 0.4}}) viewer.addStyle(mod_res, {'sphere':{'colorscheme':'gray', 'opacity': 0.7}}) # set residue labels if show_labels: for residue, label in zip(res_num, labels): viewer.addLabel(residue + ": " + label, \ {'fontColor':'black', 'fontSize': 8, 'backgroundColor': 'lightgray'}, \ {'chain': chain_id, 'resi': residue}) viewer.zoomTo(surroundings) if show_surface: viewer.addSurface(py3Dmol.SES,{'opacity':0.8,'color':'lightblue'}) return viewer.show() s_widget = IntSlider(min=0, max=len(df)-1, description='Structure', continuous_update=False) return interact(view3d, show_labels=True, show_bio_assembly=False, show_surface=False, i=s_widget) # - sp = st.toPandas() sp.head() view_modifications(sp, 6, 'uniprotId', 'unpName', 'name','Accession','modSites'); spark.stop()
notebooks/S-SulphenylationTo3DStructure.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dnn 실습 # # 주어진 코드는 import lib, data와 Modeling 파트입니다. # # 타이타닉 데이터셋을 사용했습니다. # # 이제껏 배워왔던 내용들을 기반으로 Dnn 모델을 구축해보세요. # # - 목표 : 정확도 85 넘기기 # ### Import library & data # + import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split # - df = sns.load_dataset('titanic') # ## EDA # # 목차의 KNN내용 참고 df.head() df.isna().sum() df.describe() df.info() for i in range(len(df.columns.values)): print(df.columns.values[i]) print(df[df.columns.values[i]].unique()) print() df = df.drop(['deck', 'embark_town'], axis=1) print(df.columns.values) df = df.dropna(subset=['age'], how='any', axis=0) print(len(df)) most_freq = df['embarked'].value_counts(dropna=True).idxmax() print(most_freq) df['embarked'].fillna(most_freq, inplace=True) df.isna().sum() df = df[['survived', 'pclass', 'sex', 'age', 'sibsp', 'parch', 'embarked']] onehot_sex = pd.get_dummies(df['sex']) df = pd.concat([df, onehot_sex], axis=1) onehot_embarked = pd.get_dummies(df['embarked'], prefix='town') df = pd.concat([df, onehot_embarked], axis=1) df.drop(['sex', 'embarked'], axis=1, inplace=True) df.head() # ## Modeling X=df[['pclass', 'age', 'sibsp', 'parch', 'female', 'male', 'town_C', 'town_Q', 'town_S']] y=df['survived'] X = StandardScaler().fit(X).transform(X) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=20) from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense model = Sequential() model.add(Dense(256, input_shape=(9,), activation='relu')) model.add(Dense(256, activation='relu')) model.add(Dense(16, activation='relu')) model.add(Dense(16, activation='relu')) model.add(Dense((1), activation='sigmoid')) model.compile(loss='mse', optimizer='Adam', metrics=['accuracy']) model.summary() history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=30) pd.DataFrame(history.history).plot(figsize=(12, 5)) plt.grid(True) plt.gca().set_ylim(0, 1) plt.show()
AI_Class/021/Dnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="3Eb5p_JDOQZN" colab_type="code" colab={} # # For More RAM # def function(l): # l.append([0]*500000000) # return l # l=[] # while True: # l=function(l) # + id="0ToqUXI6PMHl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="aaa2a60d-6de6-403c-de25-5c62fafd969a" executionInfo={"status": "ok", "timestamp": 1576022100092, "user_tz": 300, "elapsed": 382, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17001243474945681698"}} pwd # + id="2ydVplOiOXKO" colab_type="code" outputId="c7e4202d-173d-439b-a0e2-ac18047a5fee" executionInfo={"status": "ok", "timestamp": 1576022123783, "user_tz": 300, "elapsed": 19721, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17001243474945681698"}} colab={"base_uri": "https://localhost:8080/", "height": 128} # Mount your google drive where you've saved your assignment folder import torch from google.colab import drive drive.mount('/content/gdrive') # + id="rTp2J1llOnB-" colab_type="code" outputId="3d877554-7b98-4046-9cbe-f79b147f44e3" executionInfo={"status": "ok", "timestamp": 1576022141951, "user_tz": 300, "elapsed": 367, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17001243474945681698"}} colab={"base_uri": "https://localhost:8080/", "height": 35} # cd '/content/gdrive/My Drive/Project/transformers' # + id="aTk63FrokNAt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 546} outputId="f367b123-7ea6-4392-8540-caab385b3272" executionInfo={"status": "ok", "timestamp": 1576022160483, "user_tz": 300, "elapsed": 17505, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17001243474945681698"}} pip install pytorch-pretrained-bert # + id="nIgwsPvTO7aT" colab_type="code" outputId="77f771c1-d54d-4957-925a-0cbecf7f9708" executionInfo={"status": "ok", "timestamp": 1576022224306, "user_tz": 300, "elapsed": 31871, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17001243474945681698"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} # !pip install -r requirements.txt # !python setup.py build # !python setup.py install # + id="_NK__RKEPLKK" colab_type="code" outputId="2bde1b36-dfc3-4f5a-af1b-900bb6dc8d18" executionInfo={"status": "ok", "timestamp": 1576022831838, "user_tz": 300, "elapsed": 269991, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17001243474945681698"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} # !python examples/run_lm_finetuning.py --num_train_epochs=100 --output_dir=bert_base_qa_oxygen --overwrite_output_dir --model_type=bert --model_name_or_path=bert_base_cased_qa --do_train --train_data_file=data/Oxygen.txt --mlm # + id="6eEoDOh8UYWc" colab_type="code" outputId="5c0bef8b-bb6b-448a-beba-d258d1900997" executionInfo={"status": "ok", "timestamp": 1576010820692, "user_tz": 300, "elapsed": 3222, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17001243474945681698"}} colab={"base_uri": "https://localhost:8080/", "height": 419} # + id="TOayuqqlSP7A" colab_type="code" outputId="f6d856b7-a546-4387-c95e-85fa1d3feb2f" executionInfo={"status": "ok", "timestamp": 1576007195316, "user_tz": 300, "elapsed": 10593, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17001243474945681698"}} colab={"base_uri": "https://localhost:8080/", "height": 54} from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME import os model = BertModel.from_pretrained('bert-base-uncased') model.eval() output_dir = "bert_base_uncased" output_model_file = os.path.join(output_dir, WEIGHTS_NAME) output_config_file = os.path.join(output_dir, CONFIG_NAME) model_to_save = model.module if hasattr(model, 'module') else model # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(output_dir, WEIGHTS_NAME) output_config_file = os.path.join(output_dir, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') tokenizer.save_vocabulary(output_dir) # + id="VI40tIuqVev1" colab_type="code" colab={} # + id="Om7BDE3tWtUX" colab_type="code" outputId="59f5175b-5ebb-4551-94b3-8880b8ee7b08" executionInfo={"status": "ok", "timestamp": 1576008780611, "user_tz": 300, "elapsed": 1148, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17001243474945681698"}} colab={"base_uri": "https://localhost:8080/", "height": 181} text = "As well as granting to protect the area of Rouen from Viking invasion, Rollo had to swear not to invade further Frankish lands himself,\ accept baptism and conversion to the Roman Catholic faith of Christianity becoming Christian and swear fealty to King Charles III." model.eval() # Add the special tokens. marked_text = "[CLS] " + text + " [SEP]" # Split the sentence into tokens. tokenized_text = tokenizer.tokenize(marked_text) # Map the token strings to their vocabulary indeces. indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) # Display the words with their indeces. # for tup in zip(tokenized_text, indexed_tokens): # print('{:<12} {:>6,}'.format(tup[0], tup[1])) # Mark each of the 22 tokens as belonging to sentence "1". segments_ids = [1] * len(tokenized_text) # print (segments_ids) tokens_tensor = torch.tensor([indexed_tokens]) segments_tensors = torch.tensor([segments_ids]) with torch.no_grad(): encoded_layers, _ = model(tokens_tensor, segments_tensors) # for i, token_str in enumerate(tokenized_text): # print (i, token_str) # create a new dimension in the tensor. token_embeddings = torch.stack(encoded_layers, dim=0) token_embeddings.size() # Remove dimension 1, the "batches". token_embeddings = torch.squeeze(token_embeddings, dim=1) token_embeddings.size() token_embeddings = token_embeddings.permute(1,0,2) token_embeddings.size() token_vecs_sum = [] # `token_embeddings` is a [22 x 12 x 768] tensor. # For each token in the sentence... for token in token_embeddings: # `token` is a [12 x 768] tensor # Sum the vectors from the last four layers. sum_vec = torch.sum(token[-4:], dim=0) # Use `sum_vec` to represent `token`. token_vecs_sum.append(sum_vec) # print ('Shape is: %d x %d' % (len(token_vecs_sum), len(token_vecs_sum[0]))) # print('First 5 vector values for each instance of "bank".') print('') print("Christian ", str(token_vecs_sum[40][:5])) print("christianity ", str(token_vecs_sum[38][:5])) print("Invade ", str(token_vecs_sum[6][:5])) print("Invasion ", str(token_vecs_sum[13][:5])) print("Protect ", str(token_vecs_sum[22][:5])) from scipy.spatial.distance import cosine # Calculate the cosine similarity between the word bank # in "bank robber" vs "bank vault" (same meaning). same_bank = 1 - cosine(token_vecs_sum[40], token_vecs_sum[38]) same_invade = 1 - cosine(token_vecs_sum[6], token_vecs_sum[13]) diff_bank = 1 - cosine(token_vecs_sum[6], token_vecs_sum[22]) print('Vector similarity for *similar* meanings: %.2f' % same_bank) print('Vector similarity for *similar* meanings: %.2f' % same_invade) print('Vector similarity for *different* meanings: %.2f' % diff_bank) # + id="TcXugGFTiyZU" colab_type="code" colab={} from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME import os # model = BertModel.from_pretrained('bert-base-uncased') # model.eval() output_dir = "bert_base_uncased" # output_dir = "bert_base_uncased" model = BertModel.from_pretrained(output_dir) tokenizer = BertTokenizer.from_pretrained(output_dir, True) # Add specific options if needed # + id="BV2xV2C8kHP0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 345} outputId="12ed4fc5-db62-41d3-8ada-3765ff4ff0a5" executionInfo={"status": "ok", "timestamp": 1576011702357, "user_tz": 300, "elapsed": 1524, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17001243474945681698"}} text = "The silicates are an amazingly complex group of materials that typically consist of greater than 50 (atomic) percent oxygen in combination with silicon and one or more metallic elements." text = "Man and woman are not same as king and queen." model.eval() # Add the special tokens. marked_text = "[CLS] " + text + " [SEP]" # Split the sentence into tokens. tokenized_text = tokenizer.tokenize(marked_text) # Map the token strings to their vocabulary indeces. indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) # Display the words with their indeces. # for tup in zip(tokenized_text, indexed_tokens): # print('{:<12} {:>6,}'.format(tup[0], tup[1])) # Mark each of the 22 tokens as belonging to sentence "1". segments_ids = [1] * len(tokenized_text) # print (segments_ids) tokens_tensor = torch.tensor([indexed_tokens]) segments_tensors = torch.tensor([segments_ids]) with torch.no_grad(): encoded_layers, _ = model(tokens_tensor, segments_tensors) for i, token_str in enumerate(tokenized_text): print (i, token_str) # create a new dimension in the tensor. token_embeddings = torch.stack(encoded_layers, dim=0) token_embeddings.size() # Remove dimension 1, the "batches". token_embeddings = torch.squeeze(token_embeddings, dim=1) token_embeddings.size() token_embeddings = token_embeddings.permute(1,0,2) token_embeddings.size() token_vecs_sum = [] # `token_embeddings` is a [22 x 12 x 768] tensor. # For each token in the sentence... for token in token_embeddings: # `token` is a [12 x 768] tensor # Sum the vectors from the last four layers. sum_vec = torch.sum(token[-4:], dim=0) # Use `sum_vec` to represent `token`. token_vecs_sum.append(sum_vec) # print ('Shape is: %d x %d' % (len(token_vecs_sum), len(token_vecs_sum[0]))) # print('First 5 vector values for each instance of "bank".') print('') # print("Christian ", str(token_vecs_sum[40][:5])) # print("christianity ", str(token_vecs_sum[38][:5])) # print("Invade ", str(token_vecs_sum[6][:5])) # print("Invasion ", str(token_vecs_sum[13][:5])) # print("Protect ", str(token_vecs_sum[22][:5])) from scipy.spatial.distance import cosine # Calculate the cosine similarity between the word bank # in "bank robber" vs "bank vault" (same meaning). # same_bank = 1 - cosine(token_vecs_sum[17], token_vecs_sum[22]) # same_invade = 1 - cosine(token_vecs_sum[6], token_vecs_sum[13]) # diff_bank = 1 - cosine(token_vecs_sum[6], token_vecs_sum[22]) man = 1 -cosine(token_vecs_sum[1], token_vecs_sum[8]) woman = 1 -cosine(token_vecs_sum[3], token_vecs_sum[10]) king = 1 -cosine(token_vecs_sum[1], token_vecs_sum[3]) queen = 1 -cosine(token_vecs_sum[8], token_vecs_sum[10]) # print('Vector similarity for *similar* meanings: %.2f' % same_bank) # print('Vector similarity for *similar* meanings: %.2f' % same_invade) # print('Vector similarity for *different* meanings: %.2f' % diff_bank) print('Vector similarity for *similar* meanings: %.2f' % man) print('Vector similarity for *different* meanings: %.2f' % woman) print('Vector similarity for *similar* meanings: %.2f' % king) print('Vector similarity for *different* meanings: %.2f' % queen) # + id="9Ed3ZlankRcw" colab_type="code" colab={} model # + id="OuldLzA_moFE" colab_type="code" colab={} from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME import os # model = BertModel.from_pretrained('bert-base-uncased') # model.eval() output_dir = "bert_base_cased_qa" # output_dir = "bert_base_uncased" model = BertModel.from_pretrained(output_dir) model # + id="dnZagJKjTLuN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="66baf560-d441-4a47-f2cf-f1d2c4902004" executionInfo={"status": "ok", "timestamp": 1576023149487, "user_tz": 300, "elapsed": 445, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17001243474945681698"}} # + id="rKG6sT-LTMmI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="cb9d050a-f076-4332-8cb6-e8899ea7c172" executionInfo={"status": "ok", "timestamp": 1576023194334, "user_tz": 300, "elapsed": 2362, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17001243474945681698"}} output_dir="bert_base_qa_oxygen" model = BertModel.from_pretrained(output_dir) model # + id="3oSrcWHRTXEQ" colab_type="code" colab={}
Google collab/NLP Projec Parallel Dot CS Dot Stony Brook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:pyosim_aws] # language: python # name: conda-env-pyosim_aws-py # --- # <!--BOOK_INFORMATION--> # <img style="float: right; width: 100px" src="https://raw.github.com/pyomeca/design/master/logo/logo_cropped.svg?sanitize=true"> # # <font size="+2">Pyosim in the cloud</font> # # <font size="+1">with [pyomeca](https://github.com/pyomeca/pyom</font>a) # # <NAME> (<EMAIL> | [GitHub](https://github.com/romainmartinez)) # # <!--NAVIGATION--> # | [Contents](Index.ipynb) | [Verification](01.00-verification.ipynb) > # # Logistics # + dot = Digraph() dot.attr('node', shape='plaintext') dot.node('bio', 'biological\nCôté et al. (2012)') dot.node('kin', 'kinematics\nMartinez et al. (2019)') dot.node('emg', 'EMG\nBouffard et al. (2019)') dot.node('msk', 'musculoskeletal') # dot.attr('node', shape='doublecircle') dot.node('msd', 'MSDs') # dot.attr('node', shape='ellipse') dot.attr('node', color='lightgrey') dot.node('u', 'unobserveds') dot.edge('bio', 'kin') dot.edge('kin', 'emg') dot.edge('emg', 'msk') dot.edge('kin', 'msk') dot.edge('msk', 'msd') dot.edge('u', 'msd') dot.render('msd', view=True) # + from graphviz import Digraph dot = Digraph(comment='The Round Table') dot.node('A', '<NAME>') dot.node('B', 'Sir Bedevere the Wise') dot.node('L', 'Sir Lancelot the Brave') dot.edges(['AB', 'AL']) dot.edge('B', 'L', constraint='false') dot.render('round-table.gv', view=True) # - # <!--NAVIGATION--> # | [Contents](Index.ipynb) | [Verification](01.00-verification.ipynb) >
notebooks/00.00-logistics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests as rs from bs4 import BeautifulSoup as soup from urllib.request import urlopen as uReq url1 = 'https://ucalendar.uwaterloo.ca/2122/COURSE/course-ME.html' #uClient = uReq(url1) #page_html = uClient.read() #uClient.close() #page_soup = soup(page_html, "html.parser") r = rs.get(url1) page_soup = soup(r.content) containers = page_soup.findAll("div",{"class":"divTable"}) print(len(containers)) print(soup.prettify(containers[1])) course_nn = containers[0].findAll("strong") course_number = course_nn[0].text course_name = course_nn[1].text print(course_number) print(course_name) course_desc = containers[0].findAll("div",{"class":"divTableCell colspan-2"}) print(len(course_desc)) print(course_desc[1].text) # + import pandas as pd import numpy as np df = pd.DataFrame(columns =["Course Number","Course Name","Course Description"]) # - for i in range (len(containers)): course_nn = containers[i].findAll("strong") course_number = course_nn[0].text course_name = course_nn[1].text course_desc = containers[i].findAll("div",{"class":"divTableCell colspan-2"}) new = {"Course Number":course_number,"Course Name":course_name,"Course Description":course_desc[1].text} df = df.append(new,ignore_index=True) df df.to_csv('UWaterloo_Mechanical Eng courses.csv', index = False)
Web-Scraping Scripts and Data/Accredited Canadian English Undergrad MechEng Programs/UWaterloo/WS_Undergrad_Courses_UWatwerloo_Mechanical Engineering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Numpy y Scipy # + slideshow={"slide_type": "slide"} import numpy as np # + [markdown] slideshow={"slide_type": "fragment"} # Para aquellos que trabajen con Matlab© existe ; # # https://docs.scipy.org/doc/numpy-dev/user/numpy-for-matlab-users.html # + [markdown] slideshow={"slide_type": "slide"} # ### Creando arreglos # + [markdown] slideshow={"slide_type": "slide"} # Podemos convertir una lista o tuple a arreglo usando `np.array()`: # + slideshow={"slide_type": "fragment"} a = np.array([1,2,3], float) print(a) # + slideshow={"slide_type": "fragment"} b = np.array([[1,2,3],[4,5,6]],float) print(b) # + [markdown] slideshow={"slide_type": "slide"} # O inicializar una secuencia de valores secuenciales con `np.arange(`_inicio_, _final_, _incremento_`)`: # + slideshow={"slide_type": "fragment"} c = np.arange(-0.5, 0.5, 0.05) print(c) # + [markdown] slideshow={"slide_type": "slide"} # Lo que también se puede lograr con `np.linspace(`_inicio_, _final_, _numero de valores_`)`: # + slideshow={"slide_type": "fragment"} d = np.linspace(-0.5, 0.5, 25) print(d) # + [markdown] slideshow={"slide_type": "slide"} # Numpy tiene también métodos para inicializar arreglos de zeros (`np.zeros()`) y unos (`np.ones()`), estos métodos toman como parámetro una tupla con las dimensiones: # + slideshow={"slide_type": "fragment"} e = np.zeros((3,3)) print(e) # + slideshow={"slide_type": "fragment"} f = np.ones((3,3,3)) print(f) # + [markdown] slideshow={"slide_type": "fragment"} # __Nota__: existen también los métodos (`np.zeros_like()`) y unos (`np.ones_like()`) que toman un arreglo y crean otro con las mismas dimensiones. # + [markdown] slideshow={"slide_type": "slide"} # ### Obteniendo información sobre los arreglos # + [markdown] slideshow={"slide_type": "slide"} # Para conocer el tamaño de algún arreglo podemos usar la propiedad `arreglo.size`: # + slideshow={"slide_type": "fragment"} print("El tamaño de d es {}".format(d.size)) print("El tamaño de e es {}".format(e.size)) print("El tamaño de f es {}".format(np.size(f))) # + [markdown] slideshow={"slide_type": "slide"} # Para conocer la forma de algún arreglo podemos usar la propiedad `arreglo.shape`: # + slideshow={"slide_type": "fragment"} print("La forma de d es {}".format(d.shape)) print("La forma de e es {}".format(e.shape)) print("La forma de f es {}".format(np.shape(f))) # + [markdown] slideshow={"slide_type": "slide"} # Y, para conocer el número de dimensiones `arreglo.ndim`: # + slideshow={"slide_type": "fragment"} print("d tiene {} dimensiones".format(d.ndim)) print("e tiene {} dimensiones".format(e.ndim)) print("f tiene {} dimensiones".format(np.ndim(f))) # + [markdown] slideshow={"slide_type": "slide"} # Para accesar o modificar los elementos de un arreglo usamos __[]__, igual que las listas o tuplas. # + slideshow={"slide_type": "fragment"} print(a[1:]) # + slideshow={"slide_type": "fragment"} a[0] = 5 print(a) # + [markdown] slideshow={"slide_type": "slide"} # Pero se puede usar la coma dentro de los corchetes. # + slideshow={"slide_type": "fragment"} print(b) # + slideshow={"slide_type": "fragment"} print(b[:, 1:2]) # + [markdown] slideshow={"slide_type": "slide"} # ### Operaciones simples: # + slideshow={"slide_type": "slide"} print(b.T) # + slideshow={"slide_type": "slide"} print(b + b) # + slideshow={"slide_type": "slide"} print(b * b) # + slideshow={"slide_type": "slide"} print(b ** 2) # + slideshow={"slide_type": "slide"} print(b.dot(np.array([1, 2, 3])))
Sesion 3/A-Numpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/chiranjeev9292/The-Spark-Foundation-Tasks-GRIPJUNE/blob/main/Task_2_Clustering.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="h79JXEEi8x15" # #**Author : <NAME>** # + [markdown] id="Q9zG3nTo830z" # ## Task-2 Prediction using Unsupervised ML # + [markdown] id="BMAIEsV3CJhz" # # ## [GRIP @ The Spark Foundation](https://www.thesparksfoundationsingapore.org/) # + [markdown] id="uzxAkNf8CUDx" # ### **Objective- In this task I tried to predict the optimum number of clusters and represent it visuallyusing K-means Clustering from the given ‘Iris’ dataset.** # + [markdown] id="FQAnfuVXiyca" # Dataset URL:"https://bit.ly/3kXTdox" # + [markdown] id="2-6J6sCb1KCR" # ### Import Libraries # + id="jE5WaQKC8tSM" # importing the libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline from sklearn.cluster import KMeans from sklearn.preprocessing import MinMaxScaler from sklearn import datasets # + [markdown] id="ko5dead7BLV4" # ### Loading the Data # + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 73} id="hx00A3BCBRjt" outputId="0f0110c5-3138-4095-8e01-2d567d495308" from google.colab import files uploaded= files.upload() # + colab={"base_uri": "https://localhost:8080/"} id="Y44jba8z-xQq" outputId="dd96c2e2-109b-4690-bf6e-b4ff3e123a2c" # Import Data iris_df=pd.read_csv('Iris.csv') print("The Data is Imported") # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="gqHRpLtZ_jZl" outputId="8f0b036c-771b-4eb0-8d48-0ec04b094dbf" iris_df # + [markdown] id="0a8TM0efB5nE" # ### Exploratory Data Analysis # + colab={"base_uri": "https://localhost:8080/"} id="cPatDpWAB9b5" outputId="afe4f0ca-e86f-4cbd-fcc7-53809fbbb1f9" # checking the information of Dataset iris_df.info() # + colab={"base_uri": "https://localhost:8080/"} id="eEHLOl2OCfoy" outputId="702bd3b4-e94c-4c79-9d3d-08b188c854d8" # Checking shape of Dataset iris_df.shape # + [markdown] id="qcJNMyNEGd1C" # **We have 150 rows and 6 columns** # + colab={"base_uri": "https://localhost:8080/"} id="uiLSuqzHCxKB" outputId="0ac81829-fb05-4398-aa6d-974163aaf274" # Checking the presence of null values and Missing values iris_df.isnull().sum() # + [markdown] id="b3Xa3L0OGnL7" # **We do not have missing values in this dataset.** # + colab={"base_uri": "https://localhost:8080/"} id="wDCINE7rDQl7" outputId="43665ef6-d673-4ad8-a90e-fe48ac67bc56" # Checking the Data type of each Attribute iris_df.dtypes # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="L6ME9hhTDbBO" outputId="2ccae01c-150b-4111-840b-4a766013f66b" # Checking the Statistical details of Datasets iris_df.describe() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="OZzDBtUuDlxz" outputId="24a90065-65b3-4b32-b8f7-c611762c3b32" # Checking the co-relation b/w the Attributes iris_df.corr() # + [markdown] id="Pm5jhMUKIDOI" # ### Let's find the optimum number if KMean Clusteering and Determine the value of K # + id="-Fx81ta4ITEJ" # Finding the optimum number of clusters for k-means classification x= iris_df.iloc[:, [0,1,2,3]].values # Within_cluster_sum_of_square (WCSS) wcss = [] for i in range(1,10): kmeans = KMeans(n_clusters=i, init= 'k-means++', n_init=10, max_iter=300, random_state=0) kmeans.fit(x) wcss.append(kmeans.inertia_) # + colab={"base_uri": "https://localhost:8080/", "height": 367} id="Z2jbcamBK4y-" outputId="3a5d1c32-2435-408a-ecec-5114842c2f25" # Plotting the results onto a line graph, and `allowing us to observe 'The elbow' plt.figure(figsize=(8,5)) plt.plot(range(1,10), wcss) plt.title('The elbow Method') plt.xlabel('Numbers of Cluster') plt.ylabel('Within Cluster sum of Square') plt.show # + [markdown] id="Z6SD9r3UMNn_" # You can clearly see why it is called 'The elbow method' from the above graph, the optimum clusters is where the elbow occurs. This is when the within cluster sum of squares (WCSS) doesn't decrease significantly with every iteration. # + [markdown] id="AA3gwiB_M4Pu" # **from the above graph we can see that the elbow curve start at K=3, therefore, we choose the optimum number of clusters as 3.** # + id="yGa6kjy9MPnI" # Applying kmeans to the dataset / Creating the kmeans classifier kmeans = KMeans(n_clusters=3, init= 'k-means++', n_init=10, max_iter=300, random_state=0) y_kmeans=kmeans.fit_predict(x) # + [markdown] id="CGsNRn0ENeEC" # ### Visualization of Clusters # + colab={"base_uri": "https://localhost:8080/", "height": 367} id="TBqJHKTYNRHr" outputId="97f6d8be-e573-48fc-a418-26f01c5f737e" # Visualising the clusters - On the first two columns plt.figure(figsize = (10,5)) plt.scatter(x[y_kmeans == 0, 0], x[y_kmeans == 0, 1], s = 100, c = 'red', label = 'Iris-setosa') plt.scatter(x[y_kmeans == 1, 0], x[y_kmeans == 1, 1], s = 100, c = 'blue', label = 'Iris-versicolour') plt.scatter(x[y_kmeans == 2, 0], x[y_kmeans == 2, 1], s = 100, c = 'green', label = 'Iris-virginica') # Plotting the centroids of the clusters plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:,1], s = 100, c = 'yellow', label = 'Centroids') plt.legend() plt.xlabel('Sepal Length in cm') plt.ylabel('Petal Length in cm') plt.title('K-Means Clustering') # + [markdown] id="f8AWnzMMP4a_" # From the figure we can conclude that: Red, green, blue are the clusters with their respective centroid marked with yellow colour. # # + [markdown] id="IMxVW25xQCgm" # **Thus, we can clearly conclude from the visual that the optimum number of clusters is 3.** # + [markdown] id="vmZhg2e4QJgk" # ### **Thank You**
Task_2_Clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib.pyplot as plt import numpy as np import tifffile as tiff import keras.backend as K from keras.metrics import binary_crossentropy from math import sqrt from skimage.transform import resize import logging import sys import tensorflow as tf import sys; #sys.path.append('../') from src.models.unet_dilation_deep_original import UNet from src.utils.runtime import gpu_selection from src.utils.data import random_transforms from src.utils.model import dice_coef, jaccard_coef import cv2 import numpy as np import cv2 import glob import random from PIL import Image from matplotlib.image import imsave import mahotas as mh from scipy import ndimage from skimage.measure import regionprops import matplotlib.pyplot as plt import seaborn as sns import tqdm from src.utils.model import dice_coef, jaccard_coef,tru_pos,fls_pos,tru_neg,fls_neg sns.set_style("whitegrid", {'axes.grid' : False}) import keras # - # Our predictions will be in pixels, the microns per pixel conversion factor for these images is 0.615346875 julius_imgs = glob.glob('/well/lindgren/craig/Julius_histology/*/*') julius_imgs[0:10] print('Number of histology images from 220 individuals: {}'.format(len(julius_imgs))) #model = UNet() model = UNet('unet') model.config['data_path'] = '.' model.load_data() gpu_selection(visible_devices="3") config = tf.ConfigProto() config.gpu_options.allow_growth = True session = tf.Session(config=config) config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 1 session = tf.Session(config=config) model.compile() # THIS IS USING BEST VALIDATION LOSS WEIGHTS :] model.net.load_weights('/well/lindgren/craig/isbi-2012/checkpoints/unet_1024_dilation/weights_loss_val.weights') # model.net.summary() # Out of sample prediction on an image neither trained nor validated on. def norm_img(img,mean,std): out=[] test_sample = np.array(img,np.float32) test_sample= (test_sample - mean) / (std + 1e-10) out.append(test_sample) out=np.array(out) return(out) # + def predict_julius_areas(prd_batch): blobs = np.where(prd_batch[0] > 0.30, 0, 1) blobs = np.array(cv2.erode((blobs *1.0).astype(np.float32),np.ones((3,3))),dtype='int8') blobs = ndimage.morphology.binary_fill_holes(blobs,structure=np.ones((5,5))).astype(int) labels, no_objects = ndimage.label(blobs) props = regionprops(blobs) labelled=ndimage.label(blobs) resh_labelled=labelled[0].reshape((img.shape[0],img.shape[1])) #labelled is a tuple: only the first element matters props=regionprops(resh_labelled) size={i:props[i].area for i in range (0, no_objects)} no_of_cells=(sum(i > 200 and i < 100000 for i in size.values())) areas=[i for i in size.values() if i >= 200 and i <= 100000] areas=np.array(areas) return(blobs,np.median(areas),np.mean(areas),np.std(areas),no_of_cells) # - from skimage import img_as_uint import time start = time.time() import os import os.path out_batch=[] j=0 with open('Julius_adipocyte_preds.csv','a') as out_file: for jul_img in tqdm.tqdm(julius_imgs): if os.path.isfile('predicted_masks/julius/'+str(jul_img.split('/')[6])+'mask.png'): pass else: width = 1024 height = 768 img=Image.open(jul_img).convert('L') img = img.resize((1024,768)) img = np.array(img.crop((width-1024,height-1024,width,height))) img_mean, img_stdev = np.mean(img), np.std(img) normalised_img = np.expand_dims((img - np.mean(img)) / np.std(img),0) prd_batch = model.net.predict(normalised_img,batch_size=1) blobs,median_area,mean_area,std_area,no_cells = predict_julius_areas(prd_batch) if j ==0: out_file.write('image,median_area,mean_area,std_dev_area,no_cells\n') imsave('predicted_masks/julius/'+str(jul_img.split('/')[6])+'mask.png',blobs) else: out_file.write(str(jul_img)+','+str(median_area)+','+str(mean_area)+','+str(std_area)+','+str(no_cells) + '\n') imsave('predicted_masks/julius/'+str(jul_img.split('/')[6])+'mask.png',blobs) j+=1 end = time.time() print('Total time taken {}'.format(end-start)) width = 1024 height = 768 img=Image.open(julius_imgs[100]).convert('L') img = img.resize((1024,768)) img = np.array(img.crop((width-1024,height-1024,width,height))) img_mean, img_stdev = np.mean(img), np.std(img) normalised_img = norm_img(img,img_mean,img_stdev) prd_batch = model.net.predict(normalised_img,batch_size=1) blobs,median_area,mean_area,std_area,no_cells = predict_julius_areas(prd_batch) prd_batch = model.net.predict(normalised_img,batch_size=1) plt.imshow(img,cmap='jet') plt.imshow(prd_batch[0]) blobs = np.where(prd_batch[0] > 0.30, 0, 1) blobs = np.array(cv2.erode((blobs *1.0).astype(np.float32),np.ones((3,3))),dtype='int8') blobs = ndimage.morphology.binary_fill_holes(blobs,structure=np.ones((3,3))).astype(int) plt.figure(figsize=(8,8)) plt.imshow(blobs,cmap='gray')
notebooks/U_net_Julius_predictions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.5.0-rc4 # language: julia # name: julia-0.5 # --- include("../src/UNSflow.jl") using UNSflow # + cref = 1. bref = 10. sref = 10. patch1 = patch(0., -5., 0., 0., "FlatPlate", 1., 0., 0.11, 7) patch2 = patch(0., 0., 0., 0., "FlatPlate", 1., 0., 0.11, 7) patch3 = patch(0., 5., 0., 0., "FlatPlate", 1., 0., 0.11, 5) patchdata = [patch1; patch2; patch3] alphadef = ConstDef(15.*pi/180) hdef = ConstDef(0.) udef = ConstDef(1.) kin = KinemDef3D(alphadef, hdef, udef) surf = ThreeDSurf(cref, bref, sref, patchdata, kin, 1., 70, 35, 15) field = ThreeDFlowField() dtstar = 0.015 nsteps = round(Int,5./dtstar) + 1 # - mat = Array(Float64, 0, 4) mat = mat' surf2d = TwoDSurf[] field2d = TwoDFlowField[] kinem2d = KinemDef[] dt = dtstar*surf.cref/surf.uref t = 0. AR = surf.bref/surf.cref bc = zeros(surf.nspan) a03d = zeros(surf.nspan) cl = zeros(surf.nspan) cd = zeros(surf.nspan) cm = zeros(surf.nspan) lhs = zeros(surf.nspan, surf.nbterm) rhs = zeros(surf.nspan) bcoeff = zeros(surf.nbterm) if surf.kindef.vartype == "Constant" for i = 1:surf.nspan # Kinematics at all strips is the same push!(kinem2d, KinemDef(surf.kindef.alpha, surf.kindef.h, surf.kindef.u)) push!(surf2d, TwoDSurf(surf.patchdata[1].coord_file, surf.patchdata[1].pvt, kinem2d[i], [surf.patchdata[1].lc;])) #If 3D flow field is defined with disturbances or external vortices, these should be transferred to the 2D flowfield push!(field2d, TwoDFlowField()) end end nsteps = 10 # for istep = 1:nsteps #Udpate current time t = t + dt for i = 1:surf.nspan #Update kinematic parameters update_kinem(surf2d[i], t) #Update flow field parameters if any update_externalvel(field2d[i], t) #Update bound vortex positions update_boundpos(surf2d[i], dt) #Add a TEV with dummy strength place_tev(surf2d[i], field2d[i], dt) end kelv = KelvinConditionLLTldvm(surf, surf2d, field2d) #Solve for TEV strength to satisfy Kelvin condition soln = nlsolve(not_in_place(kelv), -0.01*ones(surf.nspan), method = :newton) for i = 1:surf.nspan field2d[i].tev[length(field2d[i].tev)].s = soln.zero[i] #Update incduced velocities on airfoil update_indbound(surf2d[i], field2d[i]) #Calculate downwash update_downwash(surf2d[i], [field2d[i].u[1],field2d[i].w[1]]) #Calculate first two fourier coefficients update_a0anda1(surf2d[i]) bc[i] = surf2d[i].a0[1] + 0.5*surf2d[i].aterm[1] end for i = 1:surf.nspan for n = 1:surf.nbterm lhs[i,n] = sin(n*surf.psi[i])*(sin(surf.psi[i]) + (n*pi/(2*AR))) end rhs[i] = pi*sin(surf.psi[i])*bc[i]/(2*AR) end bcoeff[:] = \(lhs, rhs) for i = 1:surf.nspan a03d[i] = 0 for n = 1:surf.nbterm a03d[i] = a03d[i] - real(n)*bcoeff[n]*sin(n*surf.psi[i])/sin(surf.psi[i]) end end #end nshed = Int(0) for i = 1:surf.nspan #Update 3D effect on A0 surf2d[i].a0[1] = surf2d[i].a0[1] + a03d[i] #2D iteration if LESP_crit is exceeded if abs(surf2d[i].a0[1]) > surf2d[i].lespcrit[1] #Remove the previous tev pop!(field2d[i].tev) #Add a TEV with dummy strength place_tev(surf2d[i],field2d[i],dt) #Add a LEV with dummy strength place_lev(surf2d[i],field2d[i],dt) surf2d[i].levflag[1] = 1 nshed += 1 else surf2d[i].levflag[1] = 0 end end # + if nshed > 0 kelvkutta = KelvinKuttaLLTldvm(surf,surf2d,field2d, nshed) #Solve for TEV and LEV strengths to satisfy Kelvin condition and Kutta condition at leading edge soln = nlsolve(not_in_place(kelvkutta), [-0.01*ones(surf.nspan); 0.01*ones(nshed)], method = :newton) end # + cntr = surf.nspan + 1 for i = 1:surf.nspan field2d[i].tev[length(field2d[i].tev)].s = soln.zero[i] end for i = 1:surf.nspan if surf2d[i].levflag[1] == 1 field2d[i].lev[length(field2d[i].lev)].s = soln.zero[cntr] cntr += 1 end end # + for i = 1:surf.nspan #Update incduced velocities on airfoil update_indbound(surf2d[i], field2d[i]) #Calculate downwash update_downwash(surf2d[i], [field2d[i].u[1],field2d[i].w[1]]) #Calculate first two fourier coefficients update_a0anda1(surf2d[i]) bc[i] = surf2d[i].a0[1] + 0.5*surf2d[i].aterm[1] end # - for i = 1:surf.nspan for n = 1:surf.nbterm lhs[i,n] = sin(n*surf.psi[i])*(sin(surf.psi[i]) + (n*pi/(2*AR))) end rhs[i] = pi*sin(surf.psi[i])*bc[i]/(2*AR) end bcoeff[:] = \(lhs, rhs) # + for i = 1:surf.nspan a03d[i] = 0 for n = 1:surf.nbterm a03d[i] = a03d[i] - real(n)*bcoeff[n]*sin(n*surf.psi[i])/sin(surf.psi[i]) end end # - a03d for i = 1:surf.nspan #Update 3D effect on A0 surf2d[i].a0[1] = surf2d[i].a0[1] + a03d[i] #Update rest of Fourier terms update_a2toan(surf2d[i]) #Update derivatives of Fourier coefficients update_adot(surf2d[i],dt) #Set previous values of aterm to be used for derivatives in next time step surf2d[i].a0prev[1] = surf2d[i].a0[1] for ia = 1:3 surf2d[i].aprev[ia] = surf2d[i].aterm[ia] end #Calculate bound vortex strengths update_bv(surf2d[i]) wakeroll(surf2d[i], field2d[i], dt) if (surf2d[i].levflag[1] == 1) cl[i], cd[i], cm[i] = calc_forces_E(surf2d[i], field2d[i].lev[length(field2d[i].lev)].s, dt) else cl[i], cd[i], cm[i] = calc_forces(surf2d[i]) end end # + cl3d = 0 cd3d = 0 cm3d = 0 for i = 1:surf.nspan-1 cl3d = cl3d + 0.5*(cl[i] + cl[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 cd3d = cd3d + 0.5*(cd[i] + cd[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 cm3d = cm3d + 0.5*(cm[i] + cm[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 end mat = hcat(mat, [t, cl3d, cd3d, cm3d]) # - t = t + dt for i = 1:surf.nspan #Update kinematic parameters update_kinem(surf2d[i], t) #Update flow field parameters if any update_externalvel(field2d[i], t) #Update bound vortex positions update_boundpos(surf2d[i], dt) #Add a TEV with dummy strength place_tev(surf2d[i], field2d[i], dt) end kelv = KelvinConditionLLTldvm(surf, surf2d, field2d) #Solve for TEV strength to satisfy Kelvin condition soln = nlsolve(not_in_place(kelv), -0.01*ones(surf.nspan), method = :newton) for i = 1:surf.nspan field2d[i].tev[length(field2d[i].tev)].s = soln.zero[i] #Update incduced velocities on airfoil update_indbound(surf2d[i], field2d[i]) #Calculate downwash update_downwash(surf2d[i], [field2d[i].u[1],field2d[i].w[1]]) #Calculate first two fourier coefficients update_a0anda1(surf2d[i]) bc[i] = surf2d[i].a0[1] + 0.5*surf2d[i].aterm[1] end for i = 1:surf.nspan for n = 1:surf.nbterm lhs[i,n] = sin(n*surf.psi[i])*(sin(surf.psi[i]) + (n*pi/(2*AR))) end rhs[i] = pi*sin(surf.psi[i])*bc[i]/(2*AR) end bcoeff[:] = \(lhs, rhs) for i = 1:surf.nspan a03d[i] = 0 for n = 1:surf.nbterm a03d[i] = a03d[i] - real(n)*bcoeff[n]*sin(n*surf.psi[i])/sin(surf.psi[i]) end end #end nshed = Int(0) for i = 1:surf.nspan #Update 3D effect on A0 surf2d[i].a0[1] = surf2d[i].a0[1] + a03d[i] #2D iteration if LESP_crit is exceeded if abs(surf2d[i].a0[1]) > surf2d[i].lespcrit[1] #Remove the previous tev pop!(field2d[i].tev) #Add a TEV with dummy strength place_tev(surf2d[i],field2d[i],dt) #Add a LEV with dummy strength place_lev(surf2d[i],field2d[i],dt) surf2d[i].levflag[1] = 1 nshed += 1 else surf2d[i].levflag[1] = 0 end end # + if nshed > 0 kelvkutta = KelvinKuttaLLTldvm(surf,surf2d,field2d, nshed) #Solve for TEV and LEV strengths to satisfy Kelvin condition and Kutta condition at leading edge soln = nlsolve(not_in_place(kelvkutta), [-0.01*ones(surf.nspan); 0.01*ones(nshed)], method = :newton) end # + cntr = surf.nspan + 1 for i = 1:surf.nspan field2d[i].tev[length(field2d[i].tev)].s = soln.zero[i] end for i = 1:surf.nspan if surf2d[i].levflag[1] == 1 field2d[i].lev[length(field2d[i].lev)].s = soln.zero[cntr] cntr += 1 end end for i = 1:surf.nspan #Update incduced velocities on airfoil update_indbound(surf2d[i], field2d[i]) #Calculate downwash update_downwash(surf2d[i], [field2d[i].u[1],field2d[i].w[1]]) #Calculate first two fourier coefficients update_a0anda1(surf2d[i]) bc[i] = surf2d[i].a0[1] + 0.5*surf2d[i].aterm[1] end for i = 1:surf.nspan for n = 1:surf.nbterm lhs[i,n] = sin(n*surf.psi[i])*(sin(surf.psi[i]) + (n*pi/(2*AR))) end rhs[i] = pi*sin(surf.psi[i])*bc[i]/(2*AR) end bcoeff[:] = \(lhs, rhs) for i = 1:surf.nspan a03d[i] = 0 for n = 1:surf.nbterm a03d[i] = a03d[i] - real(n)*bcoeff[n]*sin(n*surf.psi[i])/sin(surf.psi[i]) end end for i = 1:surf.nspan #Update 3D effect on A0 surf2d[i].a0[1] = surf2d[i].a0[1] + a03d[i] #Update rest of Fourier terms update_a2toan(surf2d[i]) #Update derivatives of Fourier coefficients update_adot(surf2d[i],dt) #Set previous values of aterm to be used for derivatives in next time step surf2d[i].a0prev[1] = surf2d[i].a0[1] for ia = 1:3 surf2d[i].aprev[ia] = surf2d[i].aterm[ia] end #Calculate bound vortex strengths update_bv(surf2d[i]) wakeroll(surf2d[i], field2d[i], dt) if (surf2d[i].levflag[1] == 1) cl[i], cd[i], cm[i] = calc_forces_E(surf2d[i], field2d[i].lev[length(field2d[i].lev)].s, dt) else cl[i], cd[i], cm[i] = calc_forces(surf2d[i]) end end cl3d = 0 cd3d = 0 cm3d = 0 for i = 1:surf.nspan-1 cl3d = cl3d + 0.5*(cl[i] + cl[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 cd3d = cd3d + 0.5*(cd[i] + cd[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 cm3d = cm3d + 0.5*(cm[i] + cm[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 end mat = hcat(mat, [t, cl3d, cd3d, cm3d]) # - t = t + dt for i = 1:surf.nspan #Update kinematic parameters update_kinem(surf2d[i], t) #Update flow field parameters if any update_externalvel(field2d[i], t) #Update bound vortex positions update_boundpos(surf2d[i], dt) #Add a TEV with dummy strength place_tev(surf2d[i], field2d[i], dt) end kelv = KelvinConditionLLTldvm(surf, surf2d, field2d) #Solve for TEV strength to satisfy Kelvin condition soln = nlsolve(not_in_place(kelv), -0.01*ones(surf.nspan), method = :newton) for i = 1:surf.nspan field2d[i].tev[length(field2d[i].tev)].s = soln.zero[i] #Update incduced velocities on airfoil update_indbound(surf2d[i], field2d[i]) #Calculate downwash update_downwash(surf2d[i], [field2d[i].u[1],field2d[i].w[1]]) #Calculate first two fourier coefficients update_a0anda1(surf2d[i]) bc[i] = surf2d[i].a0[1] + 0.5*surf2d[i].aterm[1] end for i = 1:surf.nspan for n = 1:surf.nbterm lhs[i,n] = sin(n*surf.psi[i])*(sin(surf.psi[i]) + (n*pi/(2*AR))) end rhs[i] = pi*sin(surf.psi[i])*bc[i]/(2*AR) end bcoeff[:] = \(lhs, rhs) for i = 1:surf.nspan a03d[i] = 0 for n = 1:surf.nbterm a03d[i] = a03d[i] - real(n)*bcoeff[n]*sin(n*surf.psi[i])/sin(surf.psi[i]) end end #end nshed = Int(0) for i = 1:surf.nspan #Update 3D effect on A0 surf2d[i].a0[1] = surf2d[i].a0[1] + a03d[i] #2D iteration if LESP_crit is exceeded if abs(surf2d[i].a0[1]) > surf2d[i].lespcrit[1] #Remove the previous tev pop!(field2d[i].tev) #Add a TEV with dummy strength place_tev(surf2d[i],field2d[i],dt) #Add a LEV with dummy strength place_lev(surf2d[i],field2d[i],dt) surf2d[i].levflag[1] = 1 nshed += 1 else surf2d[i].levflag[1] = 0 end end # + if nshed > 0 kelvkutta = KelvinKuttaLLTldvm(surf,surf2d,field2d, nshed) #Solve for TEV and LEV strengths to satisfy Kelvin condition and Kutta condition at leading edge soln = nlsolve(not_in_place(kelvkutta), [-0.01*ones(surf.nspan); 0.01*ones(nshed)], method = :newton) end # + cntr = surf.nspan + 1 for i = 1:surf.nspan field2d[i].tev[length(field2d[i].tev)].s = soln.zero[i] end for i = 1:surf.nspan if surf2d[i].levflag[1] == 1 field2d[i].lev[length(field2d[i].lev)].s = soln.zero[cntr] cntr += 1 end end for i = 1:surf.nspan #Update incduced velocities on airfoil update_indbound(surf2d[i], field2d[i]) #Calculate downwash update_downwash(surf2d[i], [field2d[i].u[1],field2d[i].w[1]]) #Calculate first two fourier coefficients update_a0anda1(surf2d[i]) bc[i] = surf2d[i].a0[1] + 0.5*surf2d[i].aterm[1] end for i = 1:surf.nspan for n = 1:surf.nbterm lhs[i,n] = sin(n*surf.psi[i])*(sin(surf.psi[i]) + (n*pi/(2*AR))) end rhs[i] = pi*sin(surf.psi[i])*bc[i]/(2*AR) end bcoeff[:] = \(lhs, rhs) for i = 1:surf.nspan a03d[i] = 0 for n = 1:surf.nbterm a03d[i] = a03d[i] - real(n)*bcoeff[n]*sin(n*surf.psi[i])/sin(surf.psi[i]) end end for i = 1:surf.nspan #Update 3D effect on A0 surf2d[i].a0[1] = surf2d[i].a0[1] + a03d[i] #Update rest of Fourier terms update_a2toan(surf2d[i]) #Update derivatives of Fourier coefficients update_adot(surf2d[i],dt) #Set previous values of aterm to be used for derivatives in next time step surf2d[i].a0prev[1] = surf2d[i].a0[1] for ia = 1:3 surf2d[i].aprev[ia] = surf2d[i].aterm[ia] end #Calculate bound vortex strengths update_bv(surf2d[i]) wakeroll(surf2d[i], field2d[i], dt) if (surf2d[i].levflag[1] == 1) cl[i], cd[i], cm[i] = calc_forces_E(surf2d[i], field2d[i].lev[length(field2d[i].lev)].s, dt) else cl[i], cd[i], cm[i] = calc_forces(surf2d[i]) end end cl3d = 0 cd3d = 0 cm3d = 0 for i = 1:surf.nspan-1 cl3d = cl3d + 0.5*(cl[i] + cl[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 cd3d = cd3d + 0.5*(cd[i] + cd[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 cm3d = cm3d + 0.5*(cm[i] + cm[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 end mat = hcat(mat, [t, cl3d, cd3d, cm3d]) # - t = t + dt for i = 1:surf.nspan #Update kinematic parameters update_kinem(surf2d[i], t) #Update flow field parameters if any update_externalvel(field2d[i], t) #Update bound vortex positions update_boundpos(surf2d[i], dt) #Add a TEV with dummy strength place_tev(surf2d[i], field2d[i], dt) end kelv = KelvinConditionLLTldvm(surf, surf2d, field2d) #Solve for TEV strength to satisfy Kelvin condition soln = nlsolve(not_in_place(kelv), -0.01*ones(surf.nspan), method = :newton) for i = 1:surf.nspan field2d[i].tev[length(field2d[i].tev)].s = soln.zero[i] #Update incduced velocities on airfoil update_indbound(surf2d[i], field2d[i]) #Calculate downwash update_downwash(surf2d[i], [field2d[i].u[1],field2d[i].w[1]]) #Calculate first two fourier coefficients update_a0anda1(surf2d[i]) bc[i] = surf2d[i].a0[1] + 0.5*surf2d[i].aterm[1] end for i = 1:surf.nspan for n = 1:surf.nbterm lhs[i,n] = sin(n*surf.psi[i])*(sin(surf.psi[i]) + (n*pi/(2*AR))) end rhs[i] = pi*sin(surf.psi[i])*bc[i]/(2*AR) end bcoeff[:] = \(lhs, rhs) for i = 1:surf.nspan a03d[i] = 0 for n = 1:surf.nbterm a03d[i] = a03d[i] - real(n)*bcoeff[n]*sin(n*surf.psi[i])/sin(surf.psi[i]) end end #end nshed = Int(0) for i = 1:surf.nspan #Update 3D effect on A0 surf2d[i].a0[1] = surf2d[i].a0[1] + a03d[i] #2D iteration if LESP_crit is exceeded if abs(surf2d[i].a0[1]) > surf2d[i].lespcrit[1] #Remove the previous tev pop!(field2d[i].tev) #Add a TEV with dummy strength place_tev(surf2d[i],field2d[i],dt) #Add a LEV with dummy strength place_lev(surf2d[i],field2d[i],dt) surf2d[i].levflag[1] = 1 nshed += 1 else surf2d[i].levflag[1] = 0 end end # + if nshed > 0 kelvkutta = KelvinKuttaLLTldvm(surf,surf2d,field2d, nshed) #Solve for TEV and LEV strengths to satisfy Kelvin condition and Kutta condition at leading edge soln = nlsolve(not_in_place(kelvkutta), [-0.01*ones(surf.nspan); 0.01*ones(nshed)], method = :newton) end # + cntr = surf.nspan + 1 for i = 1:surf.nspan field2d[i].tev[length(field2d[i].tev)].s = soln.zero[i] end for i = 1:surf.nspan if surf2d[i].levflag[1] == 1 field2d[i].lev[length(field2d[i].lev)].s = soln.zero[cntr] cntr += 1 end end for i = 1:surf.nspan #Update incduced velocities on airfoil update_indbound(surf2d[i], field2d[i]) #Calculate downwash update_downwash(surf2d[i], [field2d[i].u[1],field2d[i].w[1]]) #Calculate first two fourier coefficients update_a0anda1(surf2d[i]) bc[i] = surf2d[i].a0[1] + 0.5*surf2d[i].aterm[1] end for i = 1:surf.nspan for n = 1:surf.nbterm lhs[i,n] = sin(n*surf.psi[i])*(sin(surf.psi[i]) + (n*pi/(2*AR))) end rhs[i] = pi*sin(surf.psi[i])*bc[i]/(2*AR) end bcoeff[:] = \(lhs, rhs) for i = 1:surf.nspan a03d[i] = 0 for n = 1:surf.nbterm a03d[i] = a03d[i] - real(n)*bcoeff[n]*sin(n*surf.psi[i])/sin(surf.psi[i]) end end for i = 1:surf.nspan #Update 3D effect on A0 surf2d[i].a0[1] = surf2d[i].a0[1] + a03d[i] #Update rest of Fourier terms update_a2toan(surf2d[i]) #Update derivatives of Fourier coefficients update_adot(surf2d[i],dt) #Set previous values of aterm to be used for derivatives in next time step surf2d[i].a0prev[1] = surf2d[i].a0[1] for ia = 1:3 surf2d[i].aprev[ia] = surf2d[i].aterm[ia] end #Calculate bound vortex strengths update_bv(surf2d[i]) wakeroll(surf2d[i], field2d[i], dt) if (surf2d[i].levflag[1] == 1) cl[i], cd[i], cm[i] = calc_forces_E(surf2d[i], field2d[i].lev[length(field2d[i].lev)].s, dt) else cl[i], cd[i], cm[i] = calc_forces(surf2d[i]) end end cl3d = 0 cd3d = 0 cm3d = 0 for i = 1:surf.nspan-1 cl3d = cl3d + 0.5*(cl[i] + cl[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 cd3d = cd3d + 0.5*(cd[i] + cd[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 cm3d = cm3d + 0.5*(cm[i] + cm[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 end mat = hcat(mat, [t, cl3d, cd3d, cm3d]) # - view_vorts(surf2d[10],field2d[10]) axis("equal") t = t + dt for i = 1:surf.nspan #Update kinematic parameters update_kinem(surf2d[i], t) #Update flow field parameters if any update_externalvel(field2d[i], t) #Update bound vortex positions update_boundpos(surf2d[i], dt) #Add a TEV with dummy strength place_tev(surf2d[i], field2d[i], dt) end kelv = KelvinConditionLLTldvm(surf, surf2d, field2d) #Solve for TEV strength to satisfy Kelvin condition soln = nlsolve(not_in_place(kelv), -0.01*ones(surf.nspan), method = :newton) for i = 1:surf.nspan field2d[i].tev[length(field2d[i].tev)].s = soln.zero[i] #Update incduced velocities on airfoil update_indbound(surf2d[i], field2d[i]) #Calculate downwash update_downwash(surf2d[i], [field2d[i].u[1],field2d[i].w[1]]) #Calculate first two fourier coefficients update_a0anda1(surf2d[i]) bc[i] = surf2d[i].a0[1] + 0.5*surf2d[i].aterm[1] end for i = 1:surf.nspan for n = 1:surf.nbterm lhs[i,n] = sin(n*surf.psi[i])*(sin(surf.psi[i]) + (n*pi/(2*AR))) end rhs[i] = pi*sin(surf.psi[i])*bc[i]/(2*AR) end bcoeff[:] = \(lhs, rhs) for i = 1:surf.nspan a03d[i] = 0 for n = 1:surf.nbterm a03d[i] = a03d[i] - real(n)*bcoeff[n]*sin(n*surf.psi[i])/sin(surf.psi[i]) end end #end nshed = Int(0) for i = 1:surf.nspan #Update 3D effect on A0 surf2d[i].a0[1] = surf2d[i].a0[1] + a03d[i] #2D iteration if LESP_crit is exceeded if abs(surf2d[i].a0[1]) > surf2d[i].lespcrit[1] #Remove the previous tev pop!(field2d[i].tev) #Add a TEV with dummy strength place_tev(surf2d[i],field2d[i],dt) #Add a LEV with dummy strength place_lev(surf2d[i],field2d[i],dt) surf2d[i].levflag[1] = 1 nshed += 1 else surf2d[i].levflag[1] = 0 end end # + if nshed > 0 kelvkutta = KelvinKuttaLLTldvm(surf,surf2d,field2d, nshed) #Solve for TEV and LEV strengths to satisfy Kelvin condition and Kutta condition at leading edge soln = nlsolve(not_in_place(kelvkutta), [-0.01*ones(surf.nspan); 0.01*ones(nshed)], method = :newton) end # + cntr = surf.nspan + 1 for i = 1:surf.nspan field2d[i].tev[length(field2d[i].tev)].s = soln.zero[i] end for i = 1:surf.nspan if surf2d[i].levflag[1] == 1 field2d[i].lev[length(field2d[i].lev)].s = soln.zero[cntr] cntr += 1 end end for i = 1:surf.nspan #Update incduced velocities on airfoil update_indbound(surf2d[i], field2d[i]) #Calculate downwash update_downwash(surf2d[i], [field2d[i].u[1],field2d[i].w[1]]) #Calculate first two fourier coefficients update_a0anda1(surf2d[i]) bc[i] = surf2d[i].a0[1] + 0.5*surf2d[i].aterm[1] end for i = 1:surf.nspan for n = 1:surf.nbterm lhs[i,n] = sin(n*surf.psi[i])*(sin(surf.psi[i]) + (n*pi/(2*AR))) end rhs[i] = pi*sin(surf.psi[i])*bc[i]/(2*AR) end bcoeff[:] = \(lhs, rhs) for i = 1:surf.nspan a03d[i] = 0 for n = 1:surf.nbterm a03d[i] = a03d[i] - real(n)*bcoeff[n]*sin(n*surf.psi[i])/sin(surf.psi[i]) end end for i = 1:surf.nspan #Update 3D effect on A0 surf2d[i].a0[1] = surf2d[i].a0[1] + a03d[i] #Update rest of Fourier terms update_a2toan(surf2d[i]) #Update derivatives of Fourier coefficients update_adot(surf2d[i],dt) #Set previous values of aterm to be used for derivatives in next time step surf2d[i].a0prev[1] = surf2d[i].a0[1] for ia = 1:3 surf2d[i].aprev[ia] = surf2d[i].aterm[ia] end #Calculate bound vortex strengths update_bv(surf2d[i]) wakeroll(surf2d[i], field2d[i], dt) if (surf2d[i].levflag[1] == 1) cl[i], cd[i], cm[i] = calc_forces_E(surf2d[i], field2d[i].lev[length(field2d[i].lev)].s, dt) else cl[i], cd[i], cm[i] = calc_forces(surf2d[i]) end end cl3d = 0 cd3d = 0 cm3d = 0 for i = 1:surf.nspan-1 cl3d = cl3d + 0.5*(cl[i] + cl[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 cd3d = cd3d + 0.5*(cd[i] + cd[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 cm3d = cm3d + 0.5*(cm[i] + cm[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 end mat = hcat(mat, [t, cl3d, cd3d, cm3d]) # + t = t + dt for i = 1:surf.nspan #Update kinematic parameters update_kinem(surf2d[i], t) #Update flow field parameters if any update_externalvel(field2d[i], t) #Update bound vortex positions update_boundpos(surf2d[i], dt) #Add a TEV with dummy strength place_tev(surf2d[i], field2d[i], dt) end kelv = KelvinConditionLLTldvm(surf, surf2d, field2d) #Solve for TEV strength to satisfy Kelvin condition soln = nlsolve(not_in_place(kelv), -0.01*ones(surf.nspan), method = :newton) for i = 1:surf.nspan field2d[i].tev[length(field2d[i].tev)].s = soln.zero[i] #Update incduced velocities on airfoil update_indbound(surf2d[i], field2d[i]) #Calculate downwash update_downwash(surf2d[i], [field2d[i].u[1],field2d[i].w[1]]) #Calculate first two fourier coefficients update_a0anda1(surf2d[i]) bc[i] = surf2d[i].a0[1] + 0.5*surf2d[i].aterm[1] end for i = 1:surf.nspan for n = 1:surf.nbterm lhs[i,n] = sin(n*surf.psi[i])*(sin(surf.psi[i]) + (n*pi/(2*AR))) end rhs[i] = pi*sin(surf.psi[i])*bc[i]/(2*AR) end bcoeff[:] = \(lhs, rhs) for i = 1:surf.nspan a03d[i] = 0 for n = 1:surf.nbterm a03d[i] = a03d[i] - real(n)*bcoeff[n]*sin(n*surf.psi[i])/sin(surf.psi[i]) end end #end nshed = Int(0) for i = 1:surf.nspan #Update 3D effect on A0 surf2d[i].a0[1] = surf2d[i].a0[1] + a03d[i] #2D iteration if LESP_crit is exceeded if abs(surf2d[i].a0[1]) > surf2d[i].lespcrit[1] #Remove the previous tev pop!(field2d[i].tev) #Add a TEV with dummy strength place_tev(surf2d[i],field2d[i],dt) #Add a LEV with dummy strength place_lev(surf2d[i],field2d[i],dt) surf2d[i].levflag[1] = 1 nshed += 1 else surf2d[i].levflag[1] = 0 end end if nshed > 0 kelvkutta = KelvinKuttaLLTldvm(surf,surf2d,field2d, nshed) #Solve for TEV and LEV strengths to satisfy Kelvin condition and Kutta condition at leading edge soln = nlsolve(not_in_place(kelvkutta), [-0.01*ones(surf.nspan); 0.01*ones(nshed)], method = :newton) end cntr = surf.nspan + 1 for i = 1:surf.nspan field2d[i].tev[length(field2d[i].tev)].s = soln.zero[i] end for i = 1:surf.nspan if surf2d[i].levflag[1] == 1 field2d[i].lev[length(field2d[i].lev)].s = soln.zero[cntr] cntr += 1 end end for i = 1:surf.nspan #Update incduced velocities on airfoil update_indbound(surf2d[i], field2d[i]) #Calculate downwash update_downwash(surf2d[i], [field2d[i].u[1],field2d[i].w[1]]) #Calculate first two fourier coefficients update_a0anda1(surf2d[i]) bc[i] = surf2d[i].a0[1] + 0.5*surf2d[i].aterm[1] end for i = 1:surf.nspan for n = 1:surf.nbterm lhs[i,n] = sin(n*surf.psi[i])*(sin(surf.psi[i]) + (n*pi/(2*AR))) end rhs[i] = pi*sin(surf.psi[i])*bc[i]/(2*AR) end bcoeff[:] = \(lhs, rhs) for i = 1:surf.nspan a03d[i] = 0 for n = 1:surf.nbterm a03d[i] = a03d[i] - real(n)*bcoeff[n]*sin(n*surf.psi[i])/sin(surf.psi[i]) end end for i = 1:surf.nspan #Update 3D effect on A0 surf2d[i].a0[1] = surf2d[i].a0[1] + a03d[i] #Update rest of Fourier terms update_a2toan(surf2d[i]) #Update derivatives of Fourier coefficients update_adot(surf2d[i],dt) #Set previous values of aterm to be used for derivatives in next time step surf2d[i].a0prev[1] = surf2d[i].a0[1] for ia = 1:3 surf2d[i].aprev[ia] = surf2d[i].aterm[ia] end #Calculate bound vortex strengths update_bv(surf2d[i]) wakeroll(surf2d[i], field2d[i], dt) if (surf2d[i].levflag[1] == 1) cl[i], cd[i], cm[i] = calc_forces_E(surf2d[i], field2d[i].lev[length(field2d[i].lev)].s, dt) else cl[i], cd[i], cm[i] = calc_forces(surf2d[i]) end end cl3d = 0 cd3d = 0 cm3d = 0 for i = 1:surf.nspan-1 cl3d = cl3d + 0.5*(cl[i] + cl[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 cd3d = cd3d + 0.5*(cd[i] + cd[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 cm3d = cm3d + 0.5*(cm[i] + cm[i+1])*sin(0.5*(surf.psi[i] + surf.psi[i+1]))*(surf.psi[i+1] - surf.psi[i])/2 end mat = hcat(mat, [t, cl3d, cd3d, cm3d]) # - soln for i = 1:surf.nspan surf.a0[i] = surf2d[i].a0[1] end plot(surf.yle,surf.a0) kelv = kelvkutta # + val = zeros(kelv.surf3d.nspan + kelv.nshed) bc = zeros(kelv.surf3d.nspan) cntr = kelv.surf3d.nspan + 1 for i = 1:kelv.surf3d.nspan nlev = length(kelv.field[i].lev) ntev = length(kelv.field[i].tev) #Update incduced velocities on airfoil update_indbound(kelv.surf[i], kelv.field[i]) #Calculate downwash update_downwash(kelv.surf[i], [kelv.field[i].u[1],kelv.field[i].w[1]]) #Calculate first two fourier coefficients update_a0anda1(kelv.surf[i]) bc[i] = kelv.surf[i].a0[1] + 0.5*kelv.surf[i].aterm[1] val[i] = kelv.surf[i].uref*kelv.surf[i].c*pi*(kelv.surf[i].a0[1] + kelv.surf[i].aterm[1]/2.) for iv = 1:ntev val[i] = val[i] + kelv.field[i].tev[iv].s end for iv = 1:nlev val[i] = val[i] + kelv.field[i].lev[iv].s end if kelv.surf[i].levflag[1] == 1 if kelv.surf[i].a0[1] > 0 lesp_cond = kelv.surf[i].lespcrit[1] else lesp_cond = -kelv.surf[i].lespcrit[1] end val[cntr-1] = kelv.surf[i].a0[1] - lesp_cond end end AR = kelv.surf3d.bref/kelv.surf3d.cref lhs = zeros(kelv.surf3d.nspan,kelv.surf3d.nbterm) rhs = zeros(kelv.surf3d.nspan) bcoeff = zeros(kelv.surf3d.nbterm) for j = 1:kelv.surf3d.nspan for n = 1:kelv.surf3d.nbterm lhs[j,n] = sin(n*kelv.surf3d.psi[j])*(sin(kelv.surf3d.psi[j]) + (n*pi/(2*AR))) end rhs[j] = pi*sin(kelv.surf3d.psi[j])*bc[j]/(2*AR) end bcoeff[:] = \(lhs, rhs) a03d = zeros(kelv.surf3d.nspan) for j = 1:kelv.surf3d.nspan a03d[j] = 0 for n = 1:kelv.surf3d.nbterm a03d[j] = a03d[j] - real(n)*bcoeff[n]*sin(n*kelv.surf3d.psi[j])/sin(kelv.surf3d.psi[j]) end end cntr = kelv.surf3d.nspan + 1 for i = 1:kelv.surf3d.nspan val[i] = val[i] + kelv.surf[i].uref*kelv.surf[i].c*pi*a03d[i] if kelv.surf[i].levflag[1] == 1 val[cntr] = val[cntr] + a03d[i] cntr += 1 end end # - val
Notebooks/Untitled2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.7 64-bit (''DeepRL'': conda)' # language: python # name: python37764bitdeeprlcondabc608767d7cf433ca3c76fb9b75440c0 # --- # # Heart Disease UCI # # subset of the 74 attributes in the original: https://archive.ics.uci.edu/ml/datasets/heart+disease # # Attribute Information: # - age # - sex # - chest pain type (4 values) # - resting blood pressure # - serum cholestoral in mg/dl # - fasting blood sugar > 120 mg/dl # - resting electrocardiographic results (values 0,1,2) # - maximum heart rate achieved # - exercise induced angina # - oldpeak = ST depression induced by exercise relative to rest # - the slope of the peak exercise ST segment # - number of major vessels (0-3) colored by flourosopy # - thal: 3 = normal; 6 = fixed defect; 7 = reversable defect # # + import pandas as pd import numpy as np #designate the path where you saved your OEC data heart_data_path = "../../Datasets/heart.csv" # - # ## Explore the Data # Look at the distributions or histograms of individual attributes. # # Examine the mean and standard deviation for each attribute. # # # # # Build and train a standard pipeline with atleast one scaling/transformation stage and a classification stage. # Plot the precision and recall for the pipeline you have built # Output the accuracy, balanced accuracy and f1 score # Build a parameter grid of classifiers and transformers to use for a cross-validated search # # This could include passthrough, multiple seqeuences of transformer , a single transformer. # But msut always end up in a classifier. # # Consider the classifeirs, `Logistic Regression` `Support Vector Machines` and `Decision Trees` # # Consider the possible transformers: `MinMaxScaler`, `Normalizer`, `PowerTransformer`, `QuantileTransformer`, `StandardScaler` # # # Consider using `ParameterGrid` to generate sets of parameters for your classifiers # # # # # Through Cross Validation. Find the best pipeline seqeuence to transform the data into a more suitable space (or no transformation), # and perform classification on the target to maximise the Accuracy. # # # Consider using `GridSearchCV` # # # # # ## Plot the results of the tuned classifier # # Firstly show the precision and recall of the GridSearchCV. # # Secondly construct the Area under the reciving operator characteristic # # # # Rather than using a exhaustive grid search of all your parameters. # Which depending on dataset size could take a long time. # # We can use different types of hyper parameter optimisation, one such example is `RandomizedSearchCV` # # https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html#sklearn.model_selection.RandomizedSearchCV # # Read the documenation and try and implement our pipeline using RandomizedSearchCV instead of GridSearchCV # # Plot the ROC and Precision/Recall curves as before. # # Examine the parameters selected. # What changes? #
Labsheets/Lab5/Lab5_Classification2_Practice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # `startswith`/`endswith` accept multiple strings to test! "help".startswith(("h", 'gh', 'la')) "super".endswith(('a', 'e', 'o'))
Strings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Module 1 - Python Fundamentals # ## Sequence: String # - **Accessing String Characters with index** # - Accessing sub-strings with index slicing # - Iterating through Characters of a String # - More String Methods # # ----- # # ><font size="5" color="#00A0B2" face="verdana"> <B>Student will be able to</B></font> # - **Work with String Characters by index # ** # - Slice strings into substrings # - Iterate through String Characters # - Use String Methods # # &nbsp; # <font size="6" color="#00A0B2" face="verdana"> <B>Concepts</B></font> # ## Accessing a single String Character # [![view video](https://openclipart.org/download/219326/1432343177.svg)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/a8044252-4f2f-4960-b37b-70da8fe4769a/Unit2_Section1.1a-String_Index_Address.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/a8044252-4f2f-4960-b37b-70da8fe4769a/Unit2_Section1.1a-String_Index_Address.vtt","srclang":"en","kind":"subtitles","label":"english"}]) # ### addressing a string index # Strings are sequences of characters. Another common sequence type used in this course is a **list**. Sequences index items counting from 0 for the first item. # # ```python # # assign string to student_name # student_name = "Alton" # # first character is at index 0 # student_name[0] # ``` # # # <font size="4" color="#00A0B2" face="verdana"> <B>Examples</B></font> # [ ] review and run example - note the first element is always index = 0 student_name = "Alton" print(student_name[0], "<-- first character at index 0") print(student_name[1]) print(student_name[2]) print(student_name[3]) print(student_name[4]) # [ ] review and run example student_name = "Jin" if student_name[0].lower() == "a": print('Winner! Name starts with A:', student_name) elif student_name[0].lower() == "j": print('Winner! Name starts with J:', student_name) else: print('Not a match, try again tomorrow:', student_name) # [ ] review and run ERROR example # cannot index out of range student_name = "Tobias" print(student_name[6]) # # &nbsp; # <font size="6" color="#B24C00" face="verdana"> <B>Task 1</B></font> # # ## Work with individual string characters # # | | # |-----------------------------------------------------------------| # | **Remember:** the first character in a string is at **index 0**| # | | # # + # [ ] assign a string 5 or more letters long to the variable: street_name # [ ] print the 1st, 3rd and 5th characters # + # [ ] Create an input variable: team_name - ask that second letter = "i", "o", or "u" # [ ] Test if team_name 2nd character = "i", "o", or "u" and print a message # note: use if, elif and else # - # # &nbsp; # <font size="6" color="#00A0B2" face="verdana"> <B>Concepts</B></font> # ## Using a negative index # [![view video](https://openclipart.org/download/219326/1432343177.svg)](http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/28da3b48-538d-4412-ae7b-ce95e9892ce9/Unit2_Section1.1b-Using_a_Negative_Index.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/28da3b48-538d-4412-ae7b-ce95e9892ce9/Unit2_Section1.1b-Using_a_Negative_Index.vtt","srclang":"en","kind":"subtitles","label":"english"}]) # ### Access the end of a string using -1 # Strings assign an **index** number address to each string character # # - first character in a string is index 0 # - last character in a string is index **-1** # # To access the last character in a string # ```python # student_name[-1] # ``` # # # <font size="4" color="#00A0B2" face="verdana"> <B>Examples</B></font> # # # #### access the last character with the -1 index # negative index counts back from the last character in a string # + # [ ] review and run example student_name = "Joana" # get last letter end_letter = student_name[-1] print(student_name,"ends with", "'" + end_letter + "'") # - # [ ] review and run example # get second to last letter second_last_letter = student_name[-2] print(student_name,"has 2nd to last letter of", "'" + second_last_letter + "'") # [ ] review and run example # you can get to the same letter with index counting + or - print("for", student_name) print("index 3 =", "'" + student_name[3] + "'") print("index -2 =","'" + student_name[-2] + "'") # # &nbsp; # <font size="6" color="#B24C00" face="verdana"> <B>Task 2</B></font> # # [ ] assign a string 5 or more letters long to the variable: street_name street_name = 'Roberts' # [ ] print the last 3 characters of street_name print(street_name[4], street_name[5], street_name[6]) # + # [ ] create and assign string variable: first_name first_name = 'Logan' # [ ] print the first and last letters of name print(first_name[0], first_name[-1]) # - # # &nbsp; # <font size="6" color="#B24C00" face="verdana"> <B>Task 3</B></font> # ## Fix the Errors # + # [ ] Review, Run, Fix the error using string index shoe = "tennis" # print the last letter print(shoe[-1]) # - # # Module 1 Part 2 # ## Sequence: String # - Accessing String Character with index # - **Accessing sub-strings with index slicing** # - Iterating through Characters of a String # - More String Methods # # ----- # # ><font size="5" color="#00A0B2" face="verdana"> <B>Student will be able to</B></font> # - Work with String Characters # - **Slice strings into substrings** # - Iterate through String Characters # - Use String Methods # # &nbsp; # <font size="6" color="#00A0B2" face="verdana"> <B>Concepts</B></font> # ## Accessing sub-strings # [![view video](https://openclipart.org/download/219326/1432343177.svg)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/251ad8c1-588b-47de-8638-a5bcd0f29800/Unit2_Section1.2a-Index_Slicing-Substrings.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/251ad8c1-588b-47de-8638-a5bcd0f29800/Unit2_Section1.2a-Index_Slicing-Substrings.vtt","srclang":"en","kind":"subtitles","label":"english"}]) # ### Index Slicing [start:stop] # String slicing returns a string section by addressing the start and stop indexes # # ```python # # assign string to student_name # student_name = "Colette" # # addressing the 3rd, 4th and 5th characters # student_name[2:5] # ``` # The slice starts at index 2 and ends at index 5 (but does not include index 5) # # <font size="4 # " color="#00A0B2" face="verdana"> <B>Examples</B></font> # + # [ ] review and run example # assign string to student_name student_name = "Colette" # addressing the 3rd, 4th and 5th characters using a slice print("slice student_name[2:5]:",student_name[2:5]) # + # [ ] review and run example # assign string to student_name student_name = "Colette" # addressing the 3rd, 4th and 5th characters individually print("index 2, 3 & 4 of student_name:", student_name[2] + student_name[3] + student_name[4]) # - # [ ] review and run example long_word = 'Acknowledgement' print(long_word[2:11]) print(long_word[2:11], "is the 3rd char through the 11th char") print(long_word[2:11], "is the index 2, \"" + long_word[2] + "\",", "through index 10, \"" + long_word[10] + "\"") # # &nbsp; # <font size="6" color="#B24C00" face="verdana"> <B>Task 4</B></font> # # ## slice a string # ### start & stop index # + # [ ] slice long_word to print "act" and to print "tic" long_word = "characteristics" print(long_word[4:7], long_word[11:14]) # - # [ ] slice long_word to print "sequence" long_word = "Consequences" print(long_word[3:11]) # # &nbsp; # <font size="6" color="#00A0B2" face="verdana"> <B>Concepts</B></font> # ## Accessing beginning of sub-strings # [![view video](https://openclipart.org/download/219326/1432343177.svg)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/368b352f-6061-488c-80a4-d75e455f4416/Unit2_Section1.2b-Index_Slicing_Beginnings.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/368b352f-6061-488c-80a4-d75e455f4416/Unit2_Section1.2b-Index_Slicing_Beginnings.vtt","srclang":"en","kind":"subtitles","label":"english"}]) # ### Index Slicing [:stop] # String slicing returns a string section from index 0 by addressing only the stop index # # ```python # student_name = "Colette" # # addressing the 1st, 2nd & 3rd characters # student_name[:3] # ``` # **default start for a slice is index 0** # # <font size="4" color="#00A0B2" face="verdana"> <B>Example</B></font> # [ ] review and run example student_name = "Colette" # addressing the 1st, 2nd & 3rd characters print(student_name[:3]) # # &nbsp; # <font size="6" color="#B24C00" face="verdana"> <B>Task 5</B></font> # # + # [ ] print the first half of the long_word long_word = "Consequences" print(long_word[:6]) # - # # &nbsp; # <font size="6" color="#00A0B2" face="verdana"> <B>Concepts</B></font> # ## Accessing ending of sub-strings # [![view video](https://openclipart.org/download/219326/1432343177.svg)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/29beb75a-aee7-43df-9569-e9ad22cffac4/Unit2_Section1.2c-Index_Slicing_Endings.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/29beb75a-aee7-43df-9569-e9ad22cffac4/Unit2_Section1.2c-Index_Slicing_Endings.vtt","srclang":"en","kind":"subtitles","label":"english"}]) # ### Index Slicing [start:] # String slicing returns a string section including by addressing only the start index # # ```python # student_name = "Colette" # # addressing the 4th, 5th and 6th characters # student_name[3:] # ``` # **default end index returns up to and including the last string character** # # <font size="4" color="#00A0B2" face="verdana"> <B>Example</B></font> # [ ] review and run example student_name = "Colette" # 4th, 5th, 6th and 7th characters student_name[3:] # # &nbsp; # <font size="6" color="#B24C00" face="verdana"> <B>Task 6</B></font> # # [ ] print the second half of the long_word long_word = "Consequences" print(long_word[6:]) # # &nbsp; # <font size="6" color="#00A0B2" face="verdana"> <B>Concepts</B></font> # ## accessing sub-strings by step size # [![view video](https://openclipart.org/download/219326/1432343177.svg)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/62c65917-4979-4d26-9a05-09e1ed02cc51/Unit2_Section1.2d-Index_Slicing-Step_Sizes.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/62c65917-4979-4d26-9a05-09e1ed02cc51/Unit2_Section1.2d-Index_Slicing-Step_Sizes.vtt","srclang":"en","kind":"subtitles","label":"english"}]) # ### Index Slicing [:], [::2] # - **[:]** returns the entire string # - **[::2]** returns the first char and then steps to every other char in the string # - **[1::3]** returns the second char and then steps to every third char in the string # # the number **2**, in the print statement below, represents the **step** # # ```python # print(long_word[::2]) # ``` # # <font size="4" color="#00A0B2" face="verdana"> <B>Examples</B></font> # [ ] review and run example student_name = "Colette" # return all print(student_name[:]) # [ ] review and run example student_name = "Colette" # return every other print(student_name[::2]) # [ ] review and run example student_name = "Colette" # return every third, starting at 2nd character print(student_name[1::2]) # [ ] review and run example long_word = "Consequences" # starting at 2nd char (index 1) to 9th character, return every other character print(long_word[1:9:2]) # # &nbsp; # <font size="6" color="#B24C00" face="verdana"> <B>Task 7</B></font> # # [ ] print the 1st and every 3rd letter of long_word long_word = "Acknowledgement" print(long_word[::3]) # [ ] print every other character of long_word starting at the 3rd character long_word = "Acknowledgement" print(long_word[2::2]) # <font size="6" color="#00A0B2" face="verdana"> <B>Concepts</B></font> # # ## Accessing sub-strings continued # [![view video](https://openclipart.org/download/219326/1432343177.svg)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/2e59f526-fadb-434e-822e-afe3732f75df/Unit2_Section1.2e-Index_Slicing-Reverse.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/2e59f526-fadb-434e-822e-afe3732f75df/Unit2_Section1.2e-Index_Slicing-Reverse.vtt","srclang":"en","kind":"subtitles","label":"english"}]) # ### stepping backwards # # ```python # print(long_word[::-1]) # ``` # # use **[::-1]** to reverse a string # # <font size="4" color="#00A0B2" face="verdana"> <B>Example</B></font> # [ ] review and run example of stepping backwards using [::-1] long_word = "characteristics" # make the step increment -1 to step backwards print(long_word[::-1]) # [ ] review and run example of stepping backwards using [6::-1] long_word = "characteristics" # start at the 7th letter backwards to start print(long_word[6::-1]) # # &nbsp; # <font size="6" color="#B24C00" face="verdana"> <B>Task 8</B></font> # use slicing # [ ] reverse long_word long_word = "stressed" print(long_word[::-1]) # [ ] print the first 5 letters of long_word in reverse long_word = "characteristics" print(long_word[5::-1]) # # &nbsp; # <font size="6" color="#B24C00" face="verdana"> <B>Task 9</B></font> # use slicing # [ ] print the first 4 letters of long_word # [ ] print the first 4 letters of long_word in reverse # [ ] print the last 4 letters of long_word in reverse # [ ] print the letters spanning indexes 3 to 6 of long_word in Reverse long_word = "timeline" print(long_word[::3]) print(long_word[3::-1]) print(long_word[-4::-1]) print(long_word[-3:-7:-1]) # # Module 1 Part 3 # ## Sequence: String # - Accessing String Character with index # - Accessing sub-strings with index slicing # - **Iterating through Characters of a String** # - More String Methods # # ----- # # ><font size="5" color="#00A0B2" face="verdana"> <B>Student will be able to</B></font> # - Work with String Characters # - Slice strings into substrings # - **Iterate through String Characters** # - Use String Methods # # &nbsp; # <font size="6" color="#00A0B2" face="verdana"> <B>Concepts</B></font> # ## Iterate a String: 1 character at a time # [![view video](https://openclipart.org/download/219326/1432343177.svg)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/edd3631b-bceb-45a1-82bb-addf532aba4d/Unit2_Section1.3a-Iterate_a_String.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/edd3631b-bceb-45a1-82bb-addf532aba4d/Unit2_Section1.3a-Iterate_a_String.vtt","srclang":"en","kind":"subtitles","label":"english"}]) # ### `for letter in word:` # Python provides powerful sequence iteration features. Below, **`for letter in word:`** loops through each letter in *word*. # # ```python # word = "cello" # # for letter in word: # print(letter) # ``` # # The variable **`letter`** is an arbitrary variable name . Any valid variable name can be used. # # # <font size="4" color="#00A0B2" face="verdana"> <B>Examples</B></font> # + # [ ] review and run example word = "cello" for letter in word: print(letter) # + # [ ] review and run example # note: the variable 'letter' changed to 'item' word = "trumpet" for item in word: print(item) # + # [ ] review and run example # note: variable is now 'xyz' # using 'xyz', 'item' or 'letter' are all the same result word = "piano" for xyz in word: print(xyz) # + # [ ] review and run example # creates a new string (new_name) adding a letter (ltr) each loop # Q?: how many times will the loop run? student_name = "Skye" new_name = "" for ltr in student_name: if ltr.lower() == "y": new_name += ltr.upper() else: new_name += ltr print(student_name,"to",new_name) # - # # &nbsp; # <font size="6" color="#B24C00" face="verdana"> <B>Task 10</B></font> # # ## iterate a String # ### one character at a time # + # [ ] Get user input for first_name # [ ] iterate through letters in first_name # - print each letter on a new line first_name = input('enter a first name') for letter in first_name: print(letter) # - # # &nbsp; # <font size="6" color="#B24C00" face="verdana"> <B>Task 11</B></font> # # ## Program: capitalize-io # - get user input for first_name # - create an empty string variable: new_name # - iterate through letters in first_name # - add each letter in new_name # - capitalize if letter is an "i" or "o" *(hint: if, elif, else) # - print new_name # [ ] Create capitalize-io first_name = input("enter first name: ") new_name = "" for letter in first_name: if letter.upper() == "I": new_name += letter.upper() elif letter.upper() == "O": new_name += letter.upper() else: new_name += letter print(new_name) # # &nbsp; # <font size="6" color="#00A0B2" face="verdana"> <B>Concepts</B></font> # ## Iterate sub-strings # [![view video](https://openclipart.org/download/219326/1432343177.svg)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/257ed101-c530-406a-ba20-6d437d88e529/Unit2_Section1.3b-Iterate-Substrings.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/257ed101-c530-406a-ba20-6d437d88e529/Unit2_Section1.3b-Iterate-Substrings.vtt","srclang":"en","kind":"subtitles","label":"english"}]) # Combine String slicing and iteration # # ```python # student_name = "Skye" # # for letter in student_name[:3]: # print(letter) # ``` # # Iterate backwards using: **`student_name[::-1]`** # # <font size="4" color="#00A0B2" face="verdana"> <B>Example</B></font> # + # [ ] review and run example student_name = "Skye" for letter in student_name[:3]: print(letter) # + # Iterate BACKWARDS # [ ] review and run example student_name = "Skye" # start at "y" (student_name[2]), iterate backwards for letter in student_name[2::-1]: print(letter) # - # # &nbsp; # <font size="6" color="#B24C00" face="verdana"> <B>Task 12</B></font> # ### String slicing and iteration # + # [ ] create & print a variable, other_word, made of every other letter in long_word long_word = "juxtaposition" other_word = long_word[::2] print(other_word) # - # Mirror Color # [ ] get user input, fav_color # [ ] print fav_color backwards + fav_color # example: "Red" prints "deRRed" fav_color = input("enter your favorite color") print(fav_color[::-1] + fav_color) # # Module 1 Part 4 # ## Sequence: String # - Accessing String Character with index # - Accessing sub-strings with index slicing # - Iterating through Characters of a String # - **More String Methods** # # ----- # # ><font size="5" color="#00A0B2" face="verdana"> <B>Student will be able to</B></font> # - Work with String Characters # - Slice strings into substrings # - Iterate through String Characters # - **Use String ~~Tricks~~ Methods** # - `len()` # - `.count()` # - `.find()` # # &nbsp; # <font size="6" color="#00A0B2" face="verdana"> <B>Concepts</B></font> # ## String Methods: return string information # [![view video](https://openclipart.org/download/219326/1432343177.svg)]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c300e46e-b3c7-4bbf-8117-e72b33998cd3/Unit2_Section1.4a-String_Methods-Length.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c300e46e-b3c7-4bbf-8117-e72b33998cd3/Unit2_Section1.4a-String_Methods-Length.vtt","srclang":"en","kind":"subtitles","label":"english"}]) # ### len() # returns a strings length # ### .count() # returns number of times a character or sub-string occur # ### .find() # returns index of first character or sub-string match # returns **-1** if no match found # # ```python # work_tip = "save your code" # # # number of characters # len(work_tip) # # # letter "e" occurrences # work_tip.count("e") # # # find the index of the first space # work_tip.find(" ") # # # find the index of "u" searching a slice work_tip[3:6] # work_tip.find("u",3,6) # ``` # These methods **return** information that we can use to sort or manipulate strings # # <font size="4" color="#00A0B2" face="verdana"> <B>Examples</B></font> # run each example cell in order # + # [ ] review and run example work_tip = "save your code" print("number of characters in string") print(len(work_tip),"\n") print('letter "e" occurrences') print(work_tip.count("e"),"\n") print("find the index of the first space") print(work_tip.find(" "),"\n") print('find the index of "u" searching a slice work_tip[3:9] -', work_tip[3:9]) print(work_tip.find("u",3,9),"\n") print('find the index of "e" searching a slice work_tip[4:] -', work_tip[4:]) print(work_tip.find("e",4)) # - # ### len() # returns a strings length # + # [ ] review and run example work_tip = "good code is commented code" print("The sentence: \"" + work_tip + "\" has character length = ", len(work_tip) ) # + # [ ] review and run example # find the middle index work_tip = "good code is commented code" mid_pt = int(len(work_tip)/2) # print 1st half of sentence print(work_tip[:mid_pt]) # print the 2nd half of sentence print(work_tip[mid_pt:]) # - # ### .count() # returns number of times a character or sub-string occur # [ ] review and run example print(work_tip) print("how many w's? ", work_tip.count("w")) print("how many o's? ", work_tip.count("o")) print("uses 'code', how many times? ", work_tip.count("code")) # + # [ ] review and run example print(work_tip[:mid_pt]) print("# o's in first half") print(work_tip[:mid_pt].count("o")) print() print(work_tip[mid_pt:]) print("# o's in second half") print(work_tip[mid_pt:].count("o")) # - # ### .find(*string*) # returns index of first character or sub-string match # returns **-1** if no match found # #### .find(*string*, *start index*, *end index*) # same as above .find() but searches from optional start and to optional end index # [ ] review and run example work_tip = "good code has meaningful variable names" print(work_tip) # index where first instance of "code" starts code_here = work_tip.find("code") print(code_here, '= starting index for "code"') # [ ] review and run example # set start index = 13 and end index = 33 print('search for "meaning" in the sub-string:', work_tip[13:33],"\n") meaning_here = work_tip.find("meaning",13,33) print('"meaning" found in work_tip[13:33] sub-string search at index', meaning_here) # + # [ ] review and run example # if .find("o") has No Match, -1 is returned print ("work_tip:" , work_tip) location = work_tip.find("o") # keeps looping until location = -1 (no "o" found) while location >= 0: print("'o' at index =", location) # find("o", location + 1) looks for a "o" after index the first "o" was found location = work_tip.find("o", location + 1) print("no more o's") # - # # &nbsp; # <font size="6" color="#B24C00" face="verdana"> <B>Task 13</B></font> # # ## `len()` # + # [ ] use len() to find the midpoint of the string # [ ] print the halves on separate lines random_tip = "wear a hat when it rains" len(random_tip) print(random_tip[0:13]) print(random_tip[13:24]) # - # # &nbsp; # <font size="6" color="#B24C00" face="verdana"> <B>Task 14</B></font> # # ## `.count()` # + # for letters: "e" and "a" in random_tip # [ ] print letter counts # [ ] BONUS: print which letter is most frequent random_tip = "wear a hat when it rains" print(random_tip.count('e')) print(random_tip.count('a')) # - # # &nbsp; # <font size="6" color="#B24C00" face="verdana"> <B>Task 15</B></font> # ### `.find()` # [ ] print long_word from the location of the first and second "t" long_word = "juxtaposition" location = long_word.find("t") while location >= 0: print("'t' at index =", location) location = long_word.find("t", location + 1) # # &nbsp; # <font size="6" color="#B24C00" face="verdana"> <B>Task 16</B></font> # ## Program: print each word in a quote # ```python # start = 0 # space_index = quote.find(" ") # while space_index != -1: # # code to print word (index slice start:space_index) # ``` # # Output should look like below: # ``` # they # stumble # who # run # fast # ``` # [ ] Print each word in the quote on a new line quote = "they stumble who run fast" start = 0 space_index = quote.find(" ") while space_index != -1: print(quote[start:space_index]) start = (space_index + 1) space_index = quote.find(" ", start) # [Terms of use](http://go.microsoft.com/fwlink/?LinkID=206977) &nbsp; [Privacy & cookies](https://go.microsoft.com/fwlink/?LinkId=521839) &nbsp; © 2017 Microsoft
Python Fundamentals/Module_1.0_Tutorials_Indexes_String_Sequences_Python_Fundamentals.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="2de56a8c-44b5-a6cc-4285-cc16ab6c68a3" _uuid="923b62e73c8922a43d3a84b594cb020ece836ea4" # # Introduction # This notebook contains the basic usage of keras and sklearn on MNIST # There are 5 steps. # # 1. Stratified shuffling split on training data set # 2. Training and validating a CNN # 3. Confusion matrix # 4. Convolution fileter visualization # 5. Saliency map # # There are 2 outputs # # 1. Prediction csv on testing data # 2. Keras model # + _cell_guid="b1fee710-4d59-7eca-0aac-60e3bebdcd9c" _uuid="3dddd9a64c4d0ca6d711b7b8629be98e54f2b1b7" # %matplotlib inline import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os import matplotlib.pyplot as plt import seaborn as sns from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) # + _cell_guid="1a0ff719-d136-c47a-7574-ab841fd692ef" _uuid="a807213945886eed5513e512af850b6bbc995bdc" # read input csv file train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') # + _cell_guid="fedbe811-b145-4961-b93d-83d7d3b61af9" _uuid="18b1e99f22f559b45723c79596bdcc69467d95ed" X_train = [] Y_train = [] X_test = [] # reshape training data from rows into images for index, row in train_df.iterrows(): X_train.append(row.values[1 : ].reshape((28, 28, 1))) Y_train.append(row['label']) # reshape testing data from rows into images for index, row in test_df.iterrows(): X_test.append(row.values.reshape((28, 28, 1))) # normalization X_train = np.array(X_train) / 255. Y_train = np.array(Y_train) X_test = np.array(X_test) / 255. print('There are', X_train.shape[0], 'training data and', X_test.shape[0], 'testing data') print('Number of occurence for each number in training data (0 stands for 10):') print(np.vstack((np.unique(Y_train), np.bincount(Y_train))).T) sns.countplot(Y_train) # + _cell_guid="59b6f342-9576-47ef-a893-99c7fd7bef61" _uuid="160c9521ae9e8e4581d7ee8db6b07dbe10b03e9f" # plot first 36 images in MNIST fig, ax = plt.subplots(6, 6, figsize = (12, 12)) fig.suptitle('First 36 images in MNIST') fig.tight_layout(pad = 0.3, rect = [0, 0, 0.9, 0.9]) for x, y in [(i, j) for i in range(6) for j in range(6)]: ax[x, y].imshow(X_train[x + y * 6].reshape((28, 28)), cmap = 'gray') ax[x, y].set_title(Y_train[x + y * 6]) # + _cell_guid="8a27786a-a030-46ee-881b-0681091453e1" _uuid="321d87b1d7476a10706a197a2ec98a864d2257ed" from sklearn.model_selection import StratifiedShuffleSplit from sklearn import preprocessing # transform training label to one-hot encoding lb = preprocessing.LabelBinarizer() lb.fit(Y_train) Y_train = lb.transform(Y_train) # split training and validating data print('Stratified shuffling...') sss = StratifiedShuffleSplit(10, 0.2, random_state = 15) for train_idx, val_idx in sss.split(X_train, Y_train): X_train_tmp, X_val = X_train[train_idx], X_train[val_idx] Y_train_tmp, Y_val = Y_train[train_idx], Y_train[val_idx] X_train = X_train_tmp Y_train = Y_train_tmp print('Finish stratified shuffling...') # + _cell_guid="d8273349-4d53-4913-8f4d-68f8c9d7658b" _uuid="f34882a0697c48a97abe4d98d5bf5ce770f59675" from keras.models import Sequential, load_model from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.preprocessing.image import ImageDataGenerator img_size = (28, 28, 1) n_classes = 10 if os.path.exists('keras_model.h5'): print('Loading model...') model = load_model('keras_model.h5') else: print('Building model...') model = Sequential() model.add(Conv2D(32, (5, 5), input_shape = img_size, kernel_initializer = 'normal')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size = (2, 2))) model.add(Conv2D(64, (5, 5), kernel_initializer = 'normal')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size = (2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(n_classes)) model.add(Activation('softmax')) model.compile(loss = 'categorical_crossentropy', optimizer = 'rmsprop', metrics = ['accuracy']) # + _cell_guid="ba113fe7-ab67-490a-8815-bc982114ac16" _uuid="d7427c58db9bb6ddb7551c2dc218ba8c2f27d91a" # data augmentation datagen = ImageDataGenerator( featurewise_center = False, samplewise_center = False, featurewise_std_normalization = False, samplewise_std_normalization = False, zca_whitening = False, rotation_range = 0, zoom_range = 0.1, width_shift_range = 0.1, height_shift_range = 0.1, horizontal_flip = False, vertical_flip = False ) datagen.fit(X_train) # + _cell_guid="a7391b20-ff36-4762-bff7-9e8ce13e862a" _uuid="61dd2e13e9b819100ffb95562076d634fda90c48" print('Training model...') model.fit_generator(datagen.flow(X_train, Y_train, batch_size = 1000), epochs = 20, validation_data = (X_val, Y_val), steps_per_epoch = X_train.shape[0] / 1000, verbose = 1) print('Validating model...') score, acc = model.evaluate(X_val, Y_val, verbose = 1) print('\nLoss:', score, '\nAcc:', acc) model.save('keras_model.h5') # + _cell_guid="6f4ee46c-25a2-4e25-a04f-8a773635171e" _kg_hide-output=false _uuid="7e57d745780d5e5a044e5730e7a9e26a8d68ebe7" print('Predicting...') Y_test = model.predict(X_test) Y_test = lb.inverse_transform(Y_test) Y_test = [[y] for y in Y_test] index = [[i] for i in range(1, X_test.shape[0] + 1)] output_np = np.concatenate((index, Y_test), axis = 1) output_df = pd.DataFrame(data = output_np, columns = ['ImageId', 'Label']) output_df.to_csv('out.csv', index = False) # + _cell_guid="4e3b8fa9-e376-4211-a54f-6db34388a41d" _uuid="a509dc4c1f4354a4aa5f3ca0959b979aa0959337" from sklearn.metrics import confusion_matrix import itertools Y_pred = model.predict(X_val) Y_val_pred = lb.inverse_transform(Y_pred) Y_val_real = lb.inverse_transform(Y_val) cm = confusion_matrix(Y_val_real, Y_val_pred) cm = cm.astype('float') / cm.sum(axis = 1)[:, np.newaxis] # print(cm) plt.imshow(cm, cmap = 'gray') plt.title('Normalized confusion matrix') plt.colorbar() tick_marks = np.arange(n_classes) plt.xticks(tick_marks, np.arange(n_classes), rotation=45) plt.yticks(tick_marks, np.arange(n_classes)) fmt = '.2f' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # + _cell_guid="3f3f446e-6c00-40e3-9094-769c34e98c40" _uuid="5ed01b06314216e4029826074838316b28800f56" # Visualizating filters # https://blog.keras.io/how-convolutional-neural-networks-see-the-world.html from keras import backend as K K.set_learning_phase(1) import tensorflow as tf model = load_model('keras_model.h5') layer_dict = dict([(layer.name, layer) for layer in model.layers]) #print('Layer dict', layer_dict) print(model.summary()) # + _cell_guid="0f597323-6781-4248-8f80-1b8c94926562" _uuid="7bcbd005e65a1bedfe1915defc5afabcd534aa5f" # util function to convert a tensor into a valid image def deprocess_image(x): # normalize tensor: center on 0., ensure std is 0.1 x -= x.mean() x /= (x.std() + 1e-5) x *= 0.1 # clip to [0, 1] x += 0.5 x = np.clip(x, 0, 1) # convert to RGB array x *= 255 #x = x.transpose((1, 2, 0)) x = np.clip(x, 0, 255).astype('uint8') return x def vis_img_in_filter(img = np.array(X_train[0]).reshape((1, 28, 28, 1)).astype(np.float64), layer_name = 'conv2d_2'): layer_output = layer_dict[layer_name].output img_ascs = list() for filter_index in range(layer_output.shape[3]): # build a loss function that maximizes the activation # of the nth filter of the layer considered loss = K.mean(layer_output[:, :, :, filter_index]) # compute the gradient of the input picture wrt this loss grads = K.gradients(loss, model.input)[0] # normalization trick: we normalize the gradient grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5) # this function returns the loss and grads given the input picture iterate = K.function([model.input], [loss, grads]) # step size for gradient ascent step = 5. img_asc = np.array(img) # run gradient ascent for 20 steps for i in range(20): loss_value, grads_value = iterate([img_asc]) img_asc += grads_value * step img_asc = img_asc[0] img_ascs.append(deprocess_image(img_asc).reshape((28, 28))) if layer_output.shape[3] >= 35: plot_x, plot_y = 6, 6 elif layer_output.shape[3] >= 23: plot_x, plot_y = 4, 6 elif layer_output.shape[3] >= 11: plot_x, plot_y = 2, 6 else: plot_x, plot_y = 1, 2 fig, ax = plt.subplots(plot_x, plot_y, figsize = (12, 12)) ax[0, 0].imshow(img.reshape((28, 28)), cmap = 'gray') ax[0, 0].set_title('Input image') fig.suptitle('Input image and %s filters' % (layer_name,)) fig.tight_layout(pad = 0.3, rect = [0, 0, 0.9, 0.9]) for (x, y) in [(i, j) for i in range(plot_x) for j in range(plot_y)]: if x == 0 and y == 0: continue ax[x, y].imshow(img_ascs[x * plot_y + y - 1], cmap = 'gray') ax[x, y].set_title('filter %d' % (x * plot_y + y - 1)) vis_img_in_filter() # + _cell_guid="bf3e4ad5-d0df-4401-9ef3-63656ac5f03a" _uuid="6365194e702ff0bcbbd68f808012c483c763aee0" # Saliency map # https://github.com/experiencor/deep-viz-keras/blob/master/saliency.py from keras.layers import Input, Conv2DTranspose from keras.models import Model from keras.initializers import Ones, Zeros class SaliencyMask(object): def __init__(self, model, output_index=0): pass def get_mask(self, input_image): pass def get_smoothed_mask(self, input_image, stdev_spread=.2, nsamples=50): stdev = stdev_spread * (np.max(input_image) - np.min(input_image)) total_gradients = np.zeros_like(input_image, dtype = np.float64) for i in range(nsamples): noise = np.random.normal(0, stdev, input_image.shape) x_value_plus_noise = input_image + noise total_gradients += self.get_mask(x_value_plus_noise) return total_gradients / nsamples class GradientSaliency(SaliencyMask): def __init__(self, model, output_index = 0): # Define the function to compute the gradient input_tensors = [model.input] gradients = model.optimizer.get_gradients(model.output[0][output_index], model.input) self.compute_gradients = K.function(inputs = input_tensors, outputs = gradients) def get_mask(self, input_image): # Execute the function to compute the gradient x_value = np.expand_dims(input_image, axis=0) gradients = self.compute_gradients([x_value])[0][0] return gradients # https://github.com/experiencor/deep-viz-keras/blob/master/visual_backprop.py class VisualBackprop(SaliencyMask): def __init__(self, model, output_index = 0): inps = [model.input] # input placeholder outs = [layer.output for layer in model.layers] # all layer outputs self.forward_pass = K.function(inps, outs) # evaluation function self.model = model def get_mask(self, input_image): x_value = np.expand_dims(input_image, axis=0) visual_bpr = None layer_outs = self.forward_pass([x_value, 0]) for i in range(len(self.model.layers) - 1, -1, -1): if 'Conv2D' in str(type(self.model.layers[i])): layer = np.mean(layer_outs[i], axis = 3, keepdims = True) layer = layer - np.min(layer) layer = layer / (np.max(layer) - np.min(layer) + 1e-6) if visual_bpr is not None: if visual_bpr.shape != layer.shape: visual_bpr = self._deconv(visual_bpr) visual_bpr = visual_bpr * layer else: visual_bpr = layer return visual_bpr[0] def _deconv(self, feature_map): x = Input(shape = (None, None, 1)) y = Conv2DTranspose(filters = 1, kernel_size = (3, 3), strides = (2, 2), padding = 'same', kernel_initializer = Ones(), bias_initializer = Zeros())(x) deconv_model = Model(inputs=[x], outputs=[y]) inps = [deconv_model.input] # input placeholder outs = [deconv_model.layers[-1].output] # output placeholder deconv_func = K.function(inps, outs) # evaluation function return deconv_func([feature_map, 0])[0] # + _cell_guid="949110aa-047b-4cf5-81a5-1cbf0570275b" _uuid="fbc36c0c814affb546c2fae342f762e334daba06" Y_train_label = lb.inverse_transform(Y_train) fig, ax = plt.subplots(10, 5, figsize = (12, 16)) fig.suptitle('vanilla gradient') for i in range(n_classes): img = np.array(X_train[i]) vanilla = GradientSaliency(model, Y_train_label[i]) mask = vanilla.get_mask(img) filter_mask = (mask > 0.0).reshape((28, 28)) smooth_mask = vanilla.get_smoothed_mask(img) filter_smoothed_mask = (smooth_mask > 0.0).reshape((28, 28)) ax[i, 0].imshow(img.reshape((28, 28)), cmap = 'gray') cax = ax[i, 1].imshow(mask.reshape((28, 28)), cmap = 'jet') fig.colorbar(cax, ax = ax[i, 1]) ax[i, 2].imshow(mask.reshape((28, 28)) * filter_mask, cmap = 'gray') cax = ax[i, 3].imshow(mask.reshape((28, 28)), cmap = 'jet') fig.colorbar(cax, ax = ax[i, 3]) ax[i, 4].imshow(smooth_mask.reshape((28, 28)) * filter_smoothed_mask, cmap = 'gray') # + _cell_guid="160805f3-c99c-4858-8664-a641b2276d97" _uuid="e6cf03463c9510619833e517f221e2347cee55e0"
2 digit recognizer/mnist-with-keras-visualization-and-saliency-map.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="6rnKuld4Ydm_" # PANDAS TEST 1. 미션. 영우글로벌러닝을 지도에 표시하라 # - 4개의 문제를 풀어서 html로 저장, 코드와 html 제출하세요. # + [markdown] id="eY1K8CooYdnF" # # Folium # - 지도 시각화도구 # - 참고 사이트 : https://python-visualization.github.io/folium/quickstart.html # + [markdown] id="BJHJ51zPfA2J" # # + id="WLoPkV0-YdnF" colab={"base_uri": "https://localhost:8080/"} outputId="5da9a1d0-cb5b-4bb3-c828-ade91fc7db53" # !pip install folium import folium # + id="hSsuegrqYdnG" # 경고 무시 import warnings warnings.filterwarnings(action='ignore') # + [markdown] id="tKEwcznpYdnG" # ### 지도 그리기 # + colab={"base_uri": "https://localhost:8080/", "height": 519} id="AlfTbOy1YdnG" outputId="66fda0d2-5c32-4063-f815-f7728d5e7539" # 위도와 경도로 지도 그리기 # 구글 맵에서 영우글로벌 러닝 위치 찾기 # https://www.google.co.kr/maps/place/%EC%98%81%EC%9A%B0%EA%B8%80%EB%A1%9C%EB%B2%8C%EB%9F%AC%EB%8B%9D/@37.5138649,127.0273409,17z/data=!4m12!1m6!3m5!1s0x357ca3f1d93e9e47:0x4a16da5d4a92399b!2z7JiB7Jqw6riA66Gc67KM65-s64ud!8m2!3d37.5138649!4d127.0295296!3m4!1s0x357ca3f1d93e9e47:0x4a16da5d4a92399b!8m2!3d37.5138649!4d127.0295296?hl=ko # 위도, 경도 좌표 >> @37.5138649,127.0273409 m = folium.Map(location=[37.5138649,127.0273409]) m # + colab={"base_uri": "https://localhost:8080/", "height": 519} id="eLuGsmzuYdnH" outputId="22d6d352-052d-4e49-9614-ec13e8a8647b" # zoom_start 옵션으로 확대 비율 지정 stamen = folium.Map(location=[37.5138649,127.0273409], zoom_start=15) stamen # + colab={"base_uri": "https://localhost:8080/", "height": 519} id="2LuTqOBWYdnH" outputId="0b6d8f76-102b-48b9-cea4-30bc713aa086" # tiles 옵션 'Stamen Toner' 지정 stamen = folium.Map(location=[37.5138649,127.0273409], tiles='Stamen Toner', zoom_start=15) stamen # + colab={"base_uri": "https://localhost:8080/", "height": 519} id="Pi_9FCzCYdnI" outputId="0ba8cde0-8d2b-4528-bdec-b0e761be7751" # tiles 옵션 'Stamen Terrain' 지정 stamen = folium.Map(location=[37.5138649,127.0273409], tiles = 'Stamen Terrain') stamen # + colab={"base_uri": "https://localhost:8080/", "height": 519} id="uejKQx0UYdnI" outputId="03cb5fe0-2b01-463f-dee9-223095b3c89e" # popup 옵션 사용, icon 모양 변경하기 map_1 = folium.Map(location=[37.5138649,127.0273409], zoom_start=14, tiles='Stamen Terrain') folium.Marker([37.5138649,127.0273409], popup='YGL', icon=folium.Icon(icon='cloud')).add_to(map_1) folium.Marker ([37.5021427,127.0251832], popup='gangnam gucheong', icon=folium.Icon(icon='cloud')).add_to(map_1) map_1 # + colab={"base_uri": "https://localhost:8080/", "height": 394} id="diTlqfo_YdnI" outputId="d554be95-8d40-4a2f-cb11-9914467bde04" # marker 지정 # CircleMarker 지정 - radius(반경), color(색상) 지정 map_2 = folium.Map(location=[37.502144,127.0405041], tiles='Stamen Toner', zoom_start=11) folium.Marker([37.5138649,127.0273409], popup='YGL').add_to(map_2) folium.CircleMarker([37.4968436,127.0329199], radius=100, popup='gangnamgu', color='#3186cc', fill_color='#3186cc', ).add_to(map_2) map_2 # + [markdown] id="ENsEhKBVYdnJ" # ### 지도 저장하기 # + id="8AyW28MWYdnJ" # 지도 저장하기 map_1.save('map_1.html') map_2.save('map_2.html')
Test/[PANDAS TEST]/[PANDAS_TEST]_1_Folium_2021_05_15_sat.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="UCnEwKqXbEuF" from torch import nn from collections import OrderedDict import torch.nn.functional as F import torch from torch.utils.data import DataLoader import torchvision import random from torch.utils.data import Subset from matplotlib import pyplot as plt from torchsummary import summary from torchvision import transforms import progressbar as pb import numpy as np # + id="NsmTAzcudX01" SUM = lambda x,y : x+y # + id="__9hu3JkdUma" def check_equity(property,a,b): pa = getattr(a,property) pb = getattr(b,property) assert pa==pb, "Different {}: {}!={}".format(property,pa,pb) return pa # + id="_dmaEbq1dO54" def module_unwrap(mod:nn.Module,recursive=False): children = OrderedDict() try: for name, module in mod.named_children(): if (recursive): recursive_call = module_unwrap(module,recursive=True) if (len(recursive_call)>0): for k,v in recursive_call.items(): children[name+"_"+k] = v else: children[name] = module else: children[name] = module except AttributeError: pass return children # + id="jvbILdHidK6b" class VGGBlock(nn.Module): def __init__(self, in_channels, out_channels,batch_norm=False): super().__init__() conv2_params = {'kernel_size': (3, 3), 'stride' : (1, 1), 'padding' : 1 } noop = lambda x : x self._batch_norm = batch_norm self.conv1 = nn.Conv2d(in_channels=in_channels,out_channels=out_channels , **conv2_params) #self.bn1 = nn.BatchNorm2d(out_channels) if batch_norm else noop self.bn1 = nn.GroupNorm(16, out_channels) if batch_norm else noop self.conv2 = nn.Conv2d(in_channels=out_channels,out_channels=out_channels, **conv2_params) #self.bn2 = nn.BatchNorm2d(out_channels) if batch_norm else noop self.bn2 = nn.GroupNorm(16, out_channels) if batch_norm else noop self.max_pooling = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) @property def batch_norm(self): return self._batch_norm def forward(self,x): x = self.conv1(x) x = self.bn1(x) x = F.relu(x) x = self.conv2(x) x = self.bn2(x) x = F.relu(x) x = self.max_pooling(x) return x # + id="k4wZypnxbczs" class Classifier(nn.Module): def __init__(self,num_classes=1): super().__init__() self.classifier = nn.Sequential( nn.Linear(2048, 2048), nn.ReLU(True), nn.Dropout(p=0.5), nn.Linear(2048, 512), nn.ReLU(True), nn.Dropout(p=0.5), nn.Linear(512, num_classes) ) def forward(self,x): return self.classifier(x) # + id="6_XOTpHHbZOU" class VGG16(nn.Module): def __init__(self, input_size, batch_norm=False): super(VGG16, self).__init__() self.in_channels,self.in_width,self.in_height = input_size self.block_1 = VGGBlock(self.in_channels,64,batch_norm=batch_norm) self.block_2 = VGGBlock(64, 128,batch_norm=batch_norm) self.block_3 = VGGBlock(128, 256,batch_norm=batch_norm) self.block_4 = VGGBlock(256,512,batch_norm=batch_norm) @property def input_size(self): return self.in_channels,self.in_width,self.in_height def forward(self, x): x = self.block_1(x) x = self.block_2(x) x = self.block_3(x) x = self.block_4(x) # x = self.avgpool(x) x = torch.flatten(x,1) return x # + id="4h0BLCSfbUI1" class CombinedLoss(nn.Module): def __init__(self, loss_a, loss_b, loss_combo, _lambda=1.0): super().__init__() self.loss_a = loss_a self.loss_b = loss_b self.loss_combo = loss_combo self.register_buffer('_lambda',torch.tensor(float(_lambda),dtype=torch.float32)) def forward(self,y_hat,y): return self.loss_a(y_hat[0],y[0]) + self.loss_b(y_hat[1],y[1]) + self._lambda * self.loss_combo(y_hat[2],torch.cat(y,0)) # + [markdown] id="ihl5WS4mftpp" # ---------------------------------------------------------------------- # + id="wx0fr4tneCwh" DO='TRAIN' # + id="aoTgU1HIfqHc" random.seed(47) # + id="LIXMqkSMfpBZ" combo_fn = SUM # + id="k1gqpu8ifniL" lambda_reg = 1 # + id="Ofj6g7uffaaO" def train(nets, loaders, optimizer, criterion, epochs=20, dev=None, save_param=False, model_name="valerio"): # try: nets = [n.to(dev) for n in nets] model_a = module_unwrap(nets[0], True) model_b = module_unwrap(nets[1], True) model_c = module_unwrap(nets[2], True) reg_loss = nn.MSELoss() criterion.to(dev) reg_loss.to(dev) # Initialize history history_loss = {"train": [], "val": [], "test": []} history_accuracy = {"train": [], "val": [], "test": []} # Store the best val accuracy best_val_accuracy = 0 # Process each epoch for epoch in range(epochs): # Initialize epoch variables sum_loss = {"train": 0, "val": 0, "test": 0} sum_accuracy = {"train": [0,0,0], "val": [0,0,0], "test": [0,0,0]} progbar = None # Process each split for split in ["train", "val", "test"]: if split == "train": for n in nets: n.train() widgets = [ ' [', pb.Timer(), '] ', pb.Bar(), ' [', pb.ETA(), '] ', pb.Variable('ta','[Train Acc: {formatted_value}]') ] progbar = pb.ProgressBar(max_value=len(loaders[split][0]),widgets=widgets,redirect_stdout=True) else: for n in nets: n.eval() # Process each batch for j,((input_a, labels_a),(input_b, labels_b)) in enumerate(zip(loaders[split][0],loaders[split][1])): input_a = input_a.to(dev) input_b = input_b.to(dev) labels_a = labels_a.float().to(dev) labels_b = labels_b.float().to(dev) inputs = torch.cat([input_a,input_b],axis=0) labels = torch.cat([labels_a, labels_b]) # Reset gradients optimizer.zero_grad() # Compute output features_a = nets[0](input_a) features_b = nets[1](input_b) features_c = nets[2](inputs) pred_a = torch.squeeze(nets[3](features_a)) pred_b = torch.squeeze(nets[3](features_b)) pred_c = torch.squeeze(nets[3](features_c)) loss = criterion(pred_a, labels_a) + criterion(pred_b, labels_b) + criterion(pred_c, labels) for n in model_a: layer_a = model_a[n] layer_b = model_b[n] layer_c = model_c[n] if (isinstance(layer_a,nn.Conv2d)): loss += lambda_reg * reg_loss(combo_fn(layer_a.weight,layer_b.weight),layer_c.weight) if (layer_a.bias is not None): loss += lambda_reg * reg_loss(combo_fn(layer_a.bias, layer_b.bias), layer_c.bias) # Update loss sum_loss[split] += loss.item() # Check parameter update if split == "train": # Compute gradients loss.backward() # Optimize optimizer.step() # Compute accuracy #https://discuss.pytorch.org/t/bcewithlogitsloss-and-model-accuracy-calculation/59293/ 2 pred_labels_a = (pred_a >= 0.0).long() # Binarize predictions to 0 and 1 pred_labels_b = (pred_b >= 0.0).long() # Binarize predictions to 0 and 1 pred_labels_c = (pred_c >= 0.0).long() # Binarize predictions to 0 and 1 batch_accuracy_a = (pred_labels_a == labels_a).sum().item() / len(labels_a) batch_accuracy_b = (pred_labels_b == labels_b).sum().item() / len(labels_b) batch_accuracy_c = (pred_labels_c == labels).sum().item() / len(labels) # Update accuracy sum_accuracy[split][0] += batch_accuracy_a sum_accuracy[split][1] += batch_accuracy_b sum_accuracy[split][2] += batch_accuracy_c if (split=='train'): progbar.update(j, ta=batch_accuracy_c) if (progbar is not None): progbar.finish() # Compute epoch loss/accuracy epoch_loss = {split: sum_loss[split] / len(loaders[split][0]) for split in ["train", "val", "test"]} epoch_accuracy = {split: [sum_accuracy[split][i] / len(loaders[split][0]) for i in range(len(sum_accuracy[split])) ] for split in ["train", "val", "test"]} # # Store params at the best validation accuracy # if save_param and epoch_accuracy["val"] > best_val_accuracy: # # torch.save(net.state_dict(), f"{net.__class__.__name__}_best_val.pth") # torch.save(net.state_dict(), f"{model_name}_best_val.pth") # best_val_accuracy = epoch_accuracy["val"] print(f"Epoch {epoch + 1}:") # Update history for split in ["train", "val", "test"]: history_loss[split].append(epoch_loss[split]) history_accuracy[split].append(epoch_accuracy[split]) # Print info print(f"\t{split}\tLoss: {epoch_loss[split]:0.5}\tVGG 1:{epoch_accuracy[split][0]:0.5}" f"\tVGG 2:{epoch_accuracy[split][1]:0.5}\tVGG *:{epoch_accuracy[split][2]:0.5}") if save_param: torch.save({'vgg_a':nets[0].state_dict(),'vgg_b':nets[1].state_dict(),'vgg_star':nets[2].state_dict(),'classifier':nets[3].state_dict()},f'{model_name}.pth') # + id="wWiTuCN1fN0g" def test(net,classifier, loader): net.to(dev) classifier.to(dev) net.eval() sum_accuracy = 0 # Process each batch for j, (input, labels) in enumerate(loader): input = input.to(dev) labels = labels.float().to(dev) features = net(input) pred = torch.squeeze(classifier(features)) # https://discuss.pytorch.org/t/bcewithlogitsloss-and-model-accuracy-calculation/59293/ 2 pred_labels = (pred >= 0.0).long() # Binarize predictions to 0 and 1 batch_accuracy = (pred_labels == labels).sum().item() / len(labels) # Update accuracy sum_accuracy += batch_accuracy epoch_accuracy = sum_accuracy / len(loader) print(f"Accuracy: {epoch_accuracy:0.5}") # + id="lDeV1W6me7Ej" def parse_dataset(dataset): dataset.targets = dataset.targets % 2 return dataset # + id="7iwwrQD3e5sL" root_dir = './' # + id="Zx7dBzfre2X9" rescale_data = transforms.Lambda(lambda x : x/255) # Compose transformations data_transform = transforms.Compose([ transforms.Resize(32), transforms.RandomHorizontalFlip(), transforms.ToTensor(), rescale_data, #transforms.Normalize((-0.7376), (0.5795)) ]) test_transform = transforms.Compose([ transforms.Resize(32), transforms.ToTensor(), rescale_data, #transforms.Normalize((0.1327), (0.2919)) ]) # + id="qBYFu2DUe0xU" # Load MNIST dataset with transforms train_set = torchvision.datasets.MNIST(root=root_dir, train=True, download=True, transform=data_transform) test_set = torchvision.datasets.MNIST(root=root_dir, train=False, download=True, transform=test_transform) # + id="iS7seoNnezX7" train_set = parse_dataset(train_set) test_set = parse_dataset(test_set) # + id="6OtJiXl2es--" train_idx = np.random.permutation(np.arange(len(train_set))) test_idx = np.arange(len(test_set)) val_frac = 0.1 n_val = int(len(train_idx) * val_frac) val_idx = train_idx[0:n_val] train_idx = train_idx[n_val:] h = len(train_idx)//2 train_set_a = Subset(train_set,train_idx[0:h]) train_set_b = Subset(train_set,train_idx[h:]) h = len(val_idx)//2 val_set_a = Subset(train_set,val_idx[0:h]) val_set_b = Subset(train_set,val_idx[h:]) h = len(test_idx)//2 test_set_a = Subset(test_set,test_idx[0:h]) test_set_b = Subset(test_set,test_idx[h:]) # + id="s5WqGkR0enhP" # Define loaders train_loader_a = DataLoader(train_set_a, batch_size=128, num_workers=0, shuffle=True, drop_last=True) val_loader_a = DataLoader(val_set_a, batch_size=128, num_workers=0, shuffle=False, drop_last=False) test_loader_a = DataLoader(test_set_a, batch_size=128, num_workers=0, shuffle=False, drop_last=False) train_loader_b = DataLoader(train_set_b, batch_size=128, num_workers=0, shuffle=True, drop_last=True) val_loader_b = DataLoader(val_set_b, batch_size=128, num_workers=0, shuffle=False, drop_last=False) test_loader_b = DataLoader(test_set_b, batch_size=128, num_workers=0, shuffle=False, drop_last=False) test_loader_all = DataLoader(test_set,batch_size=128, num_workers=0,shuffle=False,drop_last=False) # Define dictionary of loaders loaders = {"train": [train_loader_a,train_loader_b], "val": [val_loader_a,val_loader_b], "test": [test_loader_a,test_loader_b]} # + id="xAmsywnHelhX" model1 = VGG16((1,32,32),batch_norm=True) model2 = VGG16((1,32,32),batch_norm=True) model3 = VGG16((1,32,32),batch_norm=True) classifier = Classifier(num_classes=1) # + id="fy81iDz9eizI" nets = [model1,model2,model3,classifier] # + id="_QQW2uVLee6L" dev = torch.device('cuda') # + id="eXegV7s_efNq" parameters = set() # + id="Vdw15S30efV7" for n in nets: parameters |= set(n.parameters()) # + id="HNk7ro7ueder" optimizer = torch.optim.SGD(parameters, lr = 0.01) # Define a loss criterion = nn.BCEWithLogitsLoss()#,nn.BCEWithLogitsLoss(),nn.BCEWithLogitsLoss(),_lambda = 1) n_params = 0 # + colab={"base_uri": "https://localhost:8080/"} id="WRPmx_uUeYBk" outputId="34dadfb2-b0ca-4bd1-e8a2-7fbe9a70dade" if (DO=='TRAIN'): train(nets, loaders, optimizer, criterion, epochs=50, dev=dev,save_param=True) else: state_dicts = torch.load('model.pth') model1.load_state_dict(state_dicts['vgg_a']) #questi state_dict vengono dalla funzione di training model2.load_state_dict(state_dicts['vgg_b']) model3.load_state_dict(state_dicts['vgg_star']) classifier.load_state_dict(state_dicts['classifier']) test(model1,classifier,test_loader_all) test(model2, classifier, test_loader_all) test(model3, classifier, test_loader_all) summed_state_dict = OrderedDict() for key in state_dicts['vgg_star']: if key.find('conv') >=0: print(key) summed_state_dict[key] = combo_fn(state_dicts['vgg_a'][key],state_dicts['vgg_b'][key]) else: summed_state_dict[key] = state_dicts['vgg_star'][key] model3.load_state_dict(summed_state_dict) test(model3, classifier, test_loader_all) # + id="UZLTGjlxZozP" colab={"base_uri": "https://localhost:8080/"} outputId="74ca9ebc-e439-476b-d4e3-e174dfb0762d" DO = 'TEST' if (DO=='TRAIN'): train(nets, loaders, optimizer, criterion, epochs=50, dev=dev,save_param=True) else: state_dicts = torch.load('valerio.pth') model1.load_state_dict(state_dicts['vgg_a']) #questi state_dict vengono dalla funzione di training model2.load_state_dict(state_dicts['vgg_b']) model3.load_state_dict(state_dicts['vgg_star']) classifier.load_state_dict(state_dicts['classifier']) test(model1,classifier,test_loader_all) test(model2, classifier, test_loader_all) test(model3, classifier, test_loader_all) summed_state_dict = OrderedDict() for key in state_dicts['vgg_star']: if key.find('conv') >=0: print(key) summed_state_dict[key] = combo_fn(state_dicts['vgg_a'][key],state_dicts['vgg_b'][key]) else: summed_state_dict[key] = state_dicts['vgg_star'][key] model3.load_state_dict(summed_state_dict) test(model3, classifier, test_loader_all) # + colab={"base_uri": "https://localhost:8080/"} id="Hx7Mt6yesHIv" outputId="1e47a8f0-57f2-462a-ea2e-3264662e84a8" weights11 = list(model1.block_1.parameters()) weights11 # + colab={"base_uri": "https://localhost:8080/"} id="cDfLTr-HtBi-" outputId="b8a640b7-b613-4253-d4fe-e2ddd78aa75f" weights12 = list(model1.block_2.parameters()) weights12 # + colab={"base_uri": "https://localhost:8080/"} id="QUbnsjRetGWt" outputId="8fc6a05e-7f88-4f0a-cb33-c58d0ac4b4fe" weights13 = list(model1.block_3.parameters()) weights13 # + colab={"base_uri": "https://localhost:8080/"} id="AOIa4hzttI0g" outputId="ac154efb-c9cb-4c2c-a33f-df7b8c89507c" weights14 = list(model1.block_4.parameters()) weights14 # + colab={"base_uri": "https://localhost:8080/"} id="firt-edqsOVz" outputId="b55ba2d8-8349-49b3-e692-b9c9d2544d32" weights21 = list(model2.block_1.parameters()) weights21 # + colab={"base_uri": "https://localhost:8080/"} id="htFLeiH_tQ99" outputId="4f9969fe-c4de-4444-bae5-199a7fa3e16d" weights22 = list(model2.block_2.parameters()) weights22 # + colab={"base_uri": "https://localhost:8080/"} id="mOQXY6P7tS_T" outputId="7a4847ad-3adc-4b71-d4ec-748f25ca801a" weights23 = list(model2.block_3.parameters()) weights23 # + colab={"base_uri": "https://localhost:8080/"} id="uVKCx5zEtU9C" outputId="d1b02cca-23b8-4573-b3e8-160108ac173c" weights24 = list(model2.block_4.parameters()) weights24 # + colab={"base_uri": "https://localhost:8080/"} id="yRJpz35PtYXp" outputId="4b4154fb-3870-495c-86f9-b7be485f041b" weights31 = list(model3.block_1.parameters()) weights31 # + colab={"base_uri": "https://localhost:8080/"} id="pxx-hG7ktbbr" outputId="8efae830-4f73-49a8-94fa-6cf16023e4e0" weights32 = list(model3.block_2.parameters()) weights32 # + id="m2_v6f0wtdiw" colab={"base_uri": "https://localhost:8080/"} outputId="cb592e87-a332-49aa-a27a-01aee9e304b6" weights33 = list(model3.block_3.parameters()) weights33 # + id="1tceyqJItfXM" colab={"base_uri": "https://localhost:8080/"} outputId="4271cec0-646b-4a2d-a12a-466e25634b68" weights34 = list(model3.block_4.parameters()) weights34 # + id="4exExPfCth38" # + colab={"base_uri": "https://localhost:8080/", "height": 281} id="wRJ7gX-in_BL" outputId="f3163007-487a-43c4-87f7-d9251e987cc2" # !pip install --upgrade progressbar2 # + id="8OkFGrWghD-D"
3_VALERIO_PT1_SHARED_CLASSIFIER_EVEN_ODD.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # + tags=["remove-cell"] library(repr) ; options(repr.plot.width = 4, repr.plot.height = 4) # Change plot sizes (in cm) # - # # Model Fitting using Non-linear Least-squares # ## Introduction # # In this Chapter, you will learn to fit non-linear mathematical models to data using Non-Linear Least Squares (NLLS). # # Specifically, you will learn to # # * Visualize the data and the mathematical model you want to fit to them # * Fit a non-linear model # * Assess the quality of the fit, and whether the model is appropriate for your data # * Compare and select between competing models # # We will work through various examples. These assume that you have at least a conceptual understanding of what Linear vs Non-linear models are, how they are fitted to data, and how the fits can be assessed statistically. You may want to see the [Linear Models lecture](https://github.com/mhasoba/TheMulQuaBio/tree/master/content/lectures/LinearModels) (you can also watch the [video](https://drive.google.com/drive/folders/12Sj56wHX6vcAnp9GE9qQ1gIXbn7QRHU2?usp=sharing)), and the [NLLS Lecture](https://github.com/mhasoba/TheMulQuaBio/blob/master/content/lectures/NLLS) lecture first (you can also watch the [video]()). # # You may also (optionally) want to see the [lecture on model fitting in Ecology and Evolution in general](https://github.com/mhasoba/TheMulQuaBio/tree/master/content/lectures/ModelFitting)). # We will use R. For starters, clear all variables and graphic devices and load necessary packages: rm(list = ls()) graphics.off() # ## Traits data as an example # # Our first set of examples will focus on traits. # # A trait is any measurable feature of an individual organism. This includes physical traits (e.g., morphology, body mass, wing length), performance traits (e.g., biochemical kinetics, respiration rate, body velocity, fecundity), and behavioral traits (e.g., feeding preference, foraging strategy, mate choice). All natural populations show variation in traits across individuals. A trait is functional when it directly (e.g., mortality rate) or indirectly (e.g., somatic development or growth rate) determines individual fitness. Therefore, variation in (functional) traits can generate variation in the rate of increase and persistence of populations. When measured in the context of life cycles, without considering interactions with other organisms (e.g., predators or prey of the focal population), functional traits are typically called life history traits (such as mortality rate and fecundity). Other traits determine interactions both within the focal population (e.g., intra-specific interference or mating frequency) and between the focal population/species and others, including the species which may act as resources (prey, for example). Thus both life history and interaction traits determine population fitness and therefore abundance, which ultimately influences dynamics and functioning of the wider ecosystem, such as carbon fixation rate or disease transmission rate. # ## Biochemical Kinetics # # The properties of an organism's metabolic pathways, and the underlying (enzyme-mediated) biochemical reactions (kinetics) are arguably its most fundamental "traits", because these drive all "performance" traits, from photosynthesis and respiration, to movement and growth rate. # # The [Michaelis-Menten](https://en.wikipedia.org/wiki/Michaelis%E2%80%93Menten_kinetics) model is widely used to quantify reaction kinetics data and estimate key biochemical parameters. This model relates biochemical reaction rate ($V$) (rate of formation of the product of the reaction), to concentration of the substrate ($S$): # # $$ # V = \frac{V_{\max} S}{K_M + S} # $$(eq:M-M) # # Here, # # * $V_{\max}$ is the maximum rate that can be achieved in the reaction system, which happens at saturating substrate concentration, and # * $K_M$ is the Michaelis or half-saturation constant, defined as the substrate concentration at which the reaction rate is half of $V_{\max }$. # # Biochemical reactions involving a single substrate are often well fitted by the Michaelis-Menten kinetics, suggesting that its assumptions are often valid. # # <img src="./graphics/MM.png" alt="Michaelis-Menten model" width="400px"> # # <small><center>The Michaelis-Menten model.</center></small> # # Let's fit the Michaelis-Menten model to some data. # # # ### Generating data # # Instead of using real experimental data, we will actually *generate* some "data" because that way we know exactly what the errors in the data are. You can also import and use your own dataset for the fitting steps further below. # # We can generate some data as follows: S_data <- seq(1,50,1) # Generate a sequence of substrate concentrations S_data V_data <- ((12.5 * S_data)/(7.1 + S_data)) # Generate a Michaelis-Menten response with V_max = 12.5 and K_M = 7.1 plot(S_data, V_data) # Note that our choice of $V_{\max} = 12.5$ and $K_M = 7.1$ is completely arbitrary. As long as we make sure that $V_{\max} > 0$, $K_H > 0$, and $K_M$ lies well within the lower half of the the range of substrate concentrations (0-50), these "data" will be physically biologically sensible. # # Now let's add some random (normally-distributed) fluctuations to the data to emulate experimental / measurement error: set.seed(1456) # To get the same random fluctuations in the "data" every time V_data <- V_data + rnorm(50,0,1) # Add random fluctuations to emulate error with standard deviation of 0.5 plot(S_data, V_data) # That looks real! # ### Fitting the model # # Now, fit the model to the data: MM_model <- nls(V_data ~ V_max * S_data / (K_M + S_data)) # This warning arises because `nls` requires "starting values" for the parameters (two in this case: `V_max` and `K_M`) to start searching for optimal combinations of parameter values (ones that minimize the RSS). Indeed, all NLLS fitting functions / algorithms require this. If you do not provide starting values, `nls` gives you a warning (as above) and uses a starting value of 1 for every parameter by default. For simple models, despite the warning, this works well enough. # # ```{tip} # Before proceeding further, have a look at what `nls()`'s arguments are using `?nls`, or looking at the documentation [online](https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/nls). # ``` # # We will address the issue of starting values soon enough, but first let's look at how good the fit that we obtained looks. The first thing to do is to see how well the model fitted the data, for which plotting is the best first option: plot(S_data,V_data, xlab = "Substrate Concentration", ylab = "Reaction Rate") # first plot the data lines(S_data,predict(MM_model),lty=1,col="blue",lwd=2) # now overlay the fitted model # This looks pretty good. # # Note that we used we used the `predict()` function here just as we did in any of the linear models chapters (e.g., [here](16-MulExp:Predicted-values)). # # ```{note} # In general, you can use most of the same commands/functions (e.g., `predict()` and `summary()`) on the output of a `nls()` model fitting object as you would on a `lm()` model fitting object. # ``` # # Now lets get some stats of this NLLS fit. Having obtained the fit object (`MM_model`), we can use `summary()` just like we would for a `lm()` fit object: summary(MM_model) # This looks a lot like the output of a linear model, and to be specific, of a [Linear Regression](./14-regress.ipynb). For starters, compare the above output with the output of `summary(genomeSizeModelDragon)` in [this section](regress:perform) of the Linear Regression chapter. # # So here are the main things to note about the output of `summary()` of an `nls()` model object: # # * `Estimate`s are, as in the output of the `lm()` function for fitting linear models, the estimated values of the coefficients of the model that you fitted ($V_(\max)$ and $K_M$). Note that although we generated our data using $V_{\max} = 12.5$ and $K_M = 7.1$, the actual coefficients are quite different from what we are getting with the NLLS fitting ($\hat{V_{\max} = 13.5$ and $\hat{K}_M = 9.4$). This is because we introduced random (normally-distributed) errors. This tell you something about how experimental and/or measurement errors can distort your image of the underlying mechanism or process. # * `Std. Error`, `t value`, and `Pr(>|t|)` and `Residual standard error` have the same interpretation as in the output of `lm()` (please look back at the [Linear Regression Chapter](./14-regress.ipynb)) # * `Number of iterations to convergence` tells you how many times the NLLS algorithm had to adjust the parameter values till it managed to find a solution that minimizes the Residual Sum of Squares (RSS) # * `Achieved convergence tolerance` tells you on what basis the algorithm decided that it was close enough to the a solution; basically if the RSS does not improve more than a certain threshold despite parameter adjustments, the algorithm stops searching. This may or may not be close to an optimal solution (but in this case it is). # # The last two items are specific to the output of an `nls()` fitting `summary()`, because unlike Ordinary Least Squares (OLS), which is what we used for Linear regression, NLLS is not an *exact* procedure, and the fitting requires computer simulations; revisit the [Lecture](https://github.com/mhasoba/TheMulQuaBio/blob/master/lectures/NLLS) for an explanation of this. This is all you need to know for now. As such, you do not need to report these last two items when presenting the results of an NLLS fit, but they are useful for problem solving in case the fitting does not work (more on this below). # # As noted above, you can use the same sort of commands on a `nls()` fitting result as you can on a `lm()` object. # # For example, you can get just the values of the estimated coefficients using: coef(MM_model) # Thus, much of the output of NLLS fitting using `nls()` is analogous to the output of an `lm()`. However, further statistical inference here cannot be done using Analysis of Variance (ANOVA), because the model is not a Linear Model. Try `anova(MM_model)`, and see what happens. We will address statistical inference with NLLS model fitting further below (with a different example). # # ## Confidence Intervals # # One particularly useful thing you can do after NLLS fitting is to calculate/construct the confidence intervals (CI's) around the estimated parameters in our fitted model, analogous to how we would in the OLS fitting used for Linear Models: confint(MM_model) # The `Waiting for profiling to be done...` message reflects the fact that calculating the standard errors from which the CI's are calculated requires a particular computational procedure (which we will not go into here) when it comes to NLLS fits. Calculating confidence intervals can be useful because, as you learned [here](13-t_F_tests:CI), [here](13-t_F_tests:CI2), and [here](15-anova:CI) (among other places) you can use a coefficient/parameter estimate's confidence intervals to test whether it is significantly different from some reference value. Note that the intervals for `K_M` do not include the original value of $K_M = 7.1$ that we used to generate the data!. # # Also, the intervals should not include zero for the coefficient to be statistically significant in itself, that is, different from zero. # ## R-squared values # # To put it simply, unlike an R$^2$ value obtained by fitting a linear model, that obtained from NLLS fitting is not reliable, and should not be used. The reason for this is somewhat technical (e.g., see this [paper](https://dx.doi.org/10.1186%2F1471-2210-10-6)) and we won't go into it here. But basically, NLLS R$^2$ values do not always accurately reflect the quality of fit, and definitely cannot be used to select between competing models (Model selection, as you learned [previously](./18-ModelSimp.ipynb)). Indeed R$^2$ values obtained from NLLS fitting even be negative when the model fits very poorly! We will learn more about model selection with non-linear models later below. # ## The starting values problem # # Now let's revisit the issue of starting values in NLLS fitting. Previously, we fitted the Michaelis-Menten Model without any starting values, and R gave us a warning but managed to fit the model to our synthetic "data" using default starting values. # # Lets try the NLLS fitting again, but with some particular starting values: MM_model2 <- nls(V_data ~ V_max * S_data / (K_M + S_data), start = list(V_max = 2, K_M = 2)) # Note that unlike before, we got no warning message about starting values. # # Let's compare the coefficient estimates from our two different model fits to the same dataset: coef(MM_model) coef(MM_model2) # Not too different, but not exactly the same! # # In contrast, when you fit linear models you will get exactly the same coefficient estimates every single time, because OLS is an *exact* procedure. # Now, let's try even more different start values: MM_model3 <- nls(V_data ~ V_max * S_data / (K_M + S_data), start = list(V_max = .01, K_M = 10)) # Compare the coefficients of this model fit to the two previous ones: coef(MM_model) coef(MM_model2) coef(MM_model3) # The estimates in our latest model fit are completely different (in fact, `K_M` is negative)! Let's plot this model's and the first model's fit together: plot(S_data,V_data) # first plot the data lines(S_data,predict(MM_model),lty=1,col="blue",lwd=2) # overlay the original model fit lines(S_data,predict(MM_model3),lty=1,col="red",lwd=2) # overlay the latest model fit # As you would have guessed from the really funky coefficient estimates that were obtained in `MM_model3`, this is a pretty poor model fit to the data, with the negative value of `K_M` causing the fitted version of the Michaelis-Menten model to behave strangely. # # Let's try with even more different starting values. nls(V_data ~ V_max * S_data / (K_M + S_data), start = list(V_max = 0, K_M = 0.1)) # The `singular gradient matrix at initial parameter estimates` error arises from the fact that the starting values you provided were so far from the optimal solution, that the parameter searching in `nls()` failed at the very first step. The algorithm could not figure out where to go from those starting values. In fact, the starting value we gave it is biologically/ physically impossible, because `V_max` can't really equal 0. # # Let's look at some more starting values that can cause the model fitting to fail: nls(V_data ~ V_max * S_data / (K_M + S_data), start = list(V_max = 0.1, K_M = 100)) nls(V_data ~ V_max * S_data / (K_M + S_data), start = list(V_max = -0.1, K_M = 100)) # In both the above cases, the model fitting was able to start, but eventually failed because the starting values were too far from the (approximately) optimal values ($V_{\max} \approx 13.5, K_M \approx 9.4$). # # And what happens if we start really close to the optimal values? Let's try: MM_model4 <- nls(V_data ~ V_max * S_data / (K_M + S_data), start = list(V_max = 13.5, K_M = 9.4)) coef(MM_model) coef(MM_model4) # The results of the first model fit and this last one are still not exactly the same! This drives home the point that NLLS is not an "exact" procedure. However, the differences between these two solutions are minuscule, so the main thing to take away is that if the starting values are reasonable, NLLS is *exact enough*. # Note that and even if you started the NLLS fitting with the exact parameter values with which you generated the data before introducing errors (so use `start = list(V_max = 12.5, K_M = 7.1)` above instead), you would still get the same result for the coefficients (try it). This is because the NLLS fitting will converge back to the parameter estimates based on the actual data, errors and all. # ## A more robust NLLS algorithm # # The standard NLLS function in R, `nls`, which we have been using so far, does the NLLS fitting by implementing an algorithm called the Gauss-Newton algorithm. While the Gauss-Newton algorithm works well for most simple non-linear models, it has a tendency to "get lost" or "stuck" while searching for optimal parameter estimates (that minimize the residual sum of squares, or RSS). Therefore, `nls` will often fail to fit your model to the data if you start off at starting values for the parameters that are too far from what the optimal values would be, as you saw above (e.g., when you got the `singular gradient matrix` error). # # Some nonlinear models are especially difficult for nls to fit to data because such model have a mathematical form that makes it hard to find parameter combinations that minimize the residual sum of squared (RSS). If this does not makes sense, don't worry about it. # # One solution to this is to use a different algorithm than Gauss-Newton. `nls()` has one other algorithm that can be more robust in some situations, called the "port" algorithm. However, there is a better solution still: the Levenberg-Marqualdt algorithm, which is less likely to get stuck (is more robust than) than Gauss-Newton (or port). If you want to learn more about the technicalities of this, [here](https://en.wikipedia.org/wiki/Gauss%E2%80%93Newton_algorithm) are [here](https://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm) are good places to start (also see the Readings list at the end of this chapter). # # # To be able to use nlsLM, we will need to switch to a different NLLS function called `nlsLM`. In order to be able to use `nlsLM`, you will need the `nls.lm` R package, which you can install using the method appropriate for your operating system (e.g., linux users will launch R in `sudo` mode first) and then use: # # ```r # > install.packages("minpack.lm") # ``` # # Now load the `minpack.lm` package: # + run_control={"marked": true} require("minpack.lm") # - # Now let's try it (using the same starting values as `MM_model2` above): MM_model5 <- nlsLM(V_data ~ V_max * S_data / (K_M + S_data), start = list(V_max = 2, K_M = 2)) # Now compare the `nls` and `nlsLM` fitted coefficients: coef(MM_model2) coef(MM_model5) # Close enough. # # Now, let's try fitting the model using all those starting parameter combinations that failed previously: MM_model6 <- nlsLM(V_data ~ V_max * S_data / (K_M + S_data), start = list(V_max = 1, K_M = 10)) MM_model7 <- nlsLM(V_data ~ V_max * S_data / (K_M + S_data), start = list(V_max = 0, K_M = 0.1)) MM_model8 <- nlsLM(V_data ~ V_max * S_data / (K_M + S_data), start = list(V_max = 0.1, K_M = 100)) MM_model9 <- nlsLM(V_data ~ V_max * S_data / (K_M + S_data), start = list(V_max = -0.1, K_M = 100)) coef(MM_model2) coef(MM_model5) coef(MM_model6) coef(MM_model7) coef(MM_model8) coef(MM_model9) # Nice, these all worked with `nlsLM` even though they had failed with `nls`! But one of them (model 7) still gives you poor values for the coefficients. # # But `nlsLM` also has its limits. Let's try more absurd starting values: nlsLM(V_data ~ V_max * S_data / (K_M + S_data), start = list(V_max = -10, K_M = -10)) # Thus, using starting values that are in a sensible range is always a good idea. Here, we know that neither $V_{\max}$ nor $K_M$ can be negative, so we can use that bit of information to assign reasonable starting values. # # ```{note} # *How do you find "sensible" starting values for NLLS fitting?* This very much depends on your understanding of the mathematical model that is being fitted to the data, and the mechanistic interpretation of its parameters. # ``` # (20-ModelFitting-NLLS:Bounding)= # ## Bounding parameter values # You can also bound the starting values, i.e., prevent them from exceeding some minimum and maximum value *during* the NLLS fitting process: nlsLM(V_data ~ V_max * S_data / (K_M + S_data), start = list(V_max = 0.5, K_M = 0.5)) nlsLM(V_data ~ V_max * S_data / (K_M + S_data), start = list(V_max = 0.5, K_M = 0.5), lower=c(0.4,0.4), upper=c(100,100)) # So the solution was found in one lesser iteration (not a spectacular improvement, but an improvement nevertheless). # # However, if you bound the parameters too much (to excessively narrow ranges), the algorithm cannot search sufficient parameter space (combinations of parameters), and will fail to converge on a good solution. For example: nlsLM(V_data ~ V_max * S_data / (K_M + S_data), start = list(V_max = 0.5, K_M = 0.5), lower=c(0.4,0.4), upper=c(20,20)) # Here the algorithm converged on a poor solution, and in fact took fewer iterations (3) than before to do so. This is because it could not explore sufficient parameter combinations of `V_max` and `K_M` as we have narrowed the range that both these parameters could be allowed to take during the optimization too much. # ## Diagnostics of an NLLS fit # # NLLS regression carries the same three key assumptions as Linear models: # # * No (in practice, minimal) measurement error in explanatory/independent/predictor variable ($x$-axis variable) # # * Data have constant normal variance --- errors in the $y$-axis are homogeneously distributed over the $x$-axis range # # * The measurement/observation errors are Normally distributed (Gaussian) # # At the very least, it is a good idea to plot the residuals of a fitted NLLS model. Let's do that for our Michaelis-Menten Model fit: hist(residuals(MM_model6)) # The residuals look OK. But this should not come as a surprise because we generated these "data" ourselves using normally-distributed errors! # # You may also want to look at further diagnostics, as we did [previously](14-regress:Diagnostics) in the case of Linear models. The most convenient way to do this is to use the `nlstools` package. We will not go into it here, but you can have a look at its [documentation](https://rdrr.io/rforge/nlstools/). Note that you will need to install this package as it is not one of the core (base) R packages. # ```{note} # For the remaining examples, we will switch to using `nlsLM` instead of `nls`. # ``` # ## Allometric scaling of traits # # Now let's move on to a very common class of traits in biology: physical traits like body weight, wing span, body length, limb length, eye size, ear width, etc. # # We will look at a very common phenomenon called [allometric scaling](https://en.wikipedia.org/wiki/Allometry). Allometric relationships between linear measurements such as body length, limb length, wing span, and thorax width are a good way to obtain estimates of body weights of individual organisms. We will look at allometric scaling of body weight vs. total body length in dragonflies and damselfiles. # # Allometric relationships take the form: # $$ # y = a x^b # $$(eq:allom) # where $x$ and $y$ are morphological measures (body length and body weight respectively, in our current example), the constant is the value of $y$ at body length $x = 1$ unit, and $b$ is the scaling "exponent". This is also called a power-law, because $y$ relates to $x$ through a simple power. # Let's fit a power low to a typical allometric relationship: The change in body weight vs change in body length. In general, this relationship is a allometry; that is, body weight does not increase proportionally with some measure of body length. # # First, let's look at the data. You can get the data [here](https://raw.githubusercontent.com/mhasoba/TheMulQuaBio/master/content/data/GenomeSize.csv) (first click on link and use "Save as" or `Ctrl+S` to download it as a csv). # # $\star$ Save the `GenomeSize.csv` data file to your `data` directory, and import it into your R workspace: # + MyData <- read.csv("../data/GenomeSize.csv") # using relative path assuming that your working directory is "code" head(MyData) # - # [Anisoptera](https://en.wikipedia.org/wiki/Dragonfly) are dragonflies, and [Zygoptera](https://en.wikipedia.org/wiki/Damselfly) are Damselflies. The variables of interest are `BodyWeight` and `TotalLength`. Let's use the dragonflies data subset. # So subset the data accordingly and remove NAs: # + Data2Fit <- subset(MyData,Suborder == "Anisoptera") Data2Fit <- Data2Fit[!is.na(Data2Fit$TotalLength),] # remove NA's # - # Plot the data: plot(Data2Fit$TotalLength, Data2Fit$BodyWeight, xlab = "Body Length", ylab = "Body Weight") # Or, using `ggplot`: # + library("ggplot2") ggplot(Data2Fit, aes(x = TotalLength, y = BodyWeight)) + geom_point(size = (3),color="red") + theme_bw() + labs(y="Body mass (mg)", x = "Wing length (mm)") # - # You can see these body weights of dragonflies does not increase proportionally with body length &ndash; they curve upwards w.r.t. wing length (so the allometric constant $b$ in eqn {eq}`eq:allom` mustbe greater than 1), instead of increasing as a straight line (in which case $b = 1$ (isometry, instead of allometry). # Now fit the model to the data using NLLS: PowFit <- nlsLM(BodyWeight ~ a * TotalLength^b, data = Data2Fit, start = list(a = .1, b = .1)) # The first thing to do is to see how well the model fitted the data, for which plotting is the best first option. So let's visualize the fit. For this, first we need to generate a vector of body lengths (the x-axis variable) for plotting: Lengths <- seq(min(Data2Fit$TotalLength),max(Data2Fit$TotalLength),len=200) coef(PowFit)["a"] coef(PowFit)["b"] Predic2PlotPow <- powMod(Lengths,coef(PowFit)["a"],coef(PowFit)["b"]) # Next, calculate the predicted line. For this, we will need to extract the coefficient from the model fit object using the `coef()`command. # Now plot the data and the fitted model line: plot(Data2Fit$TotalLength, Data2Fit$BodyWeight) lines(Lengths, Predic2PlotPow, col = 'blue', lwd = 2.5) # Now lets get some stats of this NLLS fit. Having obtained the fit object (`PowMod`), we can use `summary()` just like we would for a `lm()` fit object: summary(PowFit) # ## NLLS fitting using model objects # # Another way to tell `nlsLM` which model to fit, is to first create a function object for the power law model: powMod <- function(x, a, b) { return(a * x^b) } # Now fit the model to the data using NLLS by calling the model: PowFit <- nlsLM(BodyWeight ~ powMod(TotalLength,a,b), data = Data2Fit, start = list(a = .1, b = .1)) # Which gives the same result as before (you can check it). # ```{tip} # Remember, when you write this analysis into a stand-alone R script, you should put all commands for loading packages (`library()`, `require()`) at the start of the script. * # ``` # ##### Exercises <a id='Allom_Exercises'></a> # # (a) Make the same plot as above, fitted line and all, in `ggplot`, and add (display) the equation you estimated to your new (ggplot) plot. The equation is: $\text{Weight} = 3.94 \times 10^{-06} \times \text{Length}^{2.59}$ # # (b) Try playing with the starting values, and see if you can "break" the model fitting -- that is, change the starting values till the NLLS fitting does not converge on a solution. # # (c) Repeat the model fitting (including a-b above) using the Zygoptera data subset. # # # (d) There is an alternative (and in fact, more commonly-used) approach for fitting the allometric model to data: using Ordinary Least Squares on bi-logarithamically transformed data. That is, if you take a log of both sides of the [allometric equation](#eq:allom) we get, # # $$ # \log(y) = \log(a) + b \log(x) # $$ # # This is a straight line equation of the form $c = d + b z $, where $c = \log(c)$, $d = \log(a)$, $z = \log(x)$, and $b$ is now the slope parameter. So you can use Ordinary Least Squares and the linear models framework (with `lm()`) in R to estimate the parameters of the allometric equation. # # In this exercise, try comparing the NLLS vs OLS methods to see how much difference you get in the parameter estimates between them. For example, see the methods used in this paper by [Cohen et al 2012](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3465447/). # # (e) The allometry between Body weight and Length is not the end of the story. You have a number of other linear morphological measurements (`HeadLength`, `ThoraxLength`, `AdbdomenLength`, `ForewingLength`, `HindwingLength`, `ForewingArea`, and `HindwingArea`) that can also be investigated. In this exercise, try two lines of investigation (again, repeated separately for Dragonflies and Damselfiles): # # (i) How do each of these measures allometrically scale with Body length (obtain estimates of scaling constant and exponent)? (Hint: you may want to use the `pairs()` command in R to get an overview of all the pairs of potential scaling relationships. # # (ii) Do any of the linear morphological measurements other than body length better predict Body weight? That is, does body weight scale more tightly with a linear morphological measurement other than total body length? You would use model selection here, which we will learn next. But for now, you can just look at and compare the $R^2$ values of the models. # (Model-Fitting-R-Comparing-Models)= # ## Comparing models # # *How do we know that there isn't a better or alternative model that adequately explains the pattern in your dataset?* # # This is important consideration in all data analyses (and more generally, the scientific method!), so you must aim to compare your NLLS model with an one or more alternatives for a more extensive and reliable investigation of the problem. # # Let's use model comparison to investigate whether the relationship between body weight and length we found above is indeed allometric. For this, we need an alternative model that can be fitted to the same data. Let's try a quadratic curve, which is of the form: # # $$ # y = a + b x + c x^2 # $$ # # This can also capture curvature in data, and is an alternative model to the [allometric equation](#eq:allom). Note that this mode is linear in its parameters (a linear model), which you can fit to the simply data using your familiar `lm()` function: QuaFit <- lm(BodyWeight ~ poly(TotalLength,2), data = Data2Fit) # And like before, we obtain the predicted values (but this time using the `predict.lm` function): Predic2PlotQua <- predict.lm(QuaFit, data.frame(TotalLength = Lengths)) # Now let's plot the two fitted models together: plot(Data2Fit$TotalLength, Data2Fit$BodyWeight) lines(Lengths, Predic2PlotPow, col = 'blue', lwd = 2.5) lines(Lengths, Predic2PlotQua, col = 'red', lwd = 2.5) # Very similar fits, except that the quadratic model seems to deviate a bit from the data at the lower end of the data range. Let's do a proper, formal model comparison now to check which model better-fits the data. # # First calculate the R$^2$ values of the two fitted models: # + RSS_Pow <- sum(residuals(PowFit)^2) # Residual sum of squares TSS_Pow <- sum((Data2Fit$BodyWeight - mean(Data2Fit$BodyWeight))^2) # Total sum of squares RSq_Pow <- 1 - (RSS_Pow/TSS_Pow) # R-squared value RSS_Qua <- sum(residuals(QuaFit)^2) # Residual sum of squares TSS_Qua <- sum((Data2Fit$BodyWeight - mean(Data2Fit$BodyWeight))^2) # Total sum of squares RSq_Qua <- 1 - (RSS_Qua/TSS_Qua) # R-squared value RSq_Pow RSq_Qua # - # Not very useful. In general, R$^2$ is a good measure of model fit, but cannot be used for model selection &ndash; especially not here, given the tiny difference in R$^2$'s. # # Instead, as explained in the [lecture](https://github.com/mhasoba/TheMulQuaBio/blob/master/lectures/ModelFitting), we can use the Akaike Information Criterion (AIC): # + n <- nrow(Data2Fit) #set sample size pPow <- length(coef(PowFit)) # get number of parameters in power law model pQua <- length(coef(QuaFit)) # get number of parameters in quadratic model AIC_Pow <- n + 2 + n * log((2 * pi) / n) + n * log(RSS_Pow) + 2 * pPow AIC_Qua <- n + 2 + n * log((2 * pi) / n) + n * log(RSS_Qua) + 2 * pQua AIC_Pow - AIC_Qua # - # Of course, as you might have suspected, we can do this using an in-built function in R! AIC(PowFit) - AIC(QuaFit) # * So which model wins? * As we had dicussed in the NLLS lecture, a rule of thumb is that a AIC value difference (typically denoted as $\Delta$AIC) > 2 is a acceptable cutoff for calling a winner. So the power law (allometric model) is a better fit here. Read the [Johnson & Omland paper](https://github.com/mhasoba/TheMulQuaBio/blob/master/readings/Modelling/JohnsonOmland2004.pdf) for more on model selection in Ecology and Evolution. # ### Exercises <a id='ModelSelection_Exercises'></a> # # (a) Calculate the Bayesian Information Criterion (BIC), also know as the Schwarz Criterion (see your Lecture notes and the [Johnson & Omland paper](https://github.com/mhasoba/TheMulQuaBio/blob/master/readings/Modelling/JohnsonOmland2004.pdf), and use $\Delta$BIC to select the better fitting model. # # (b) Fit a straight line to the same data and compare with the allometric and quadratic models. # # (c) Repeat the model comparison (incuding 1-2 above) using the Damselflies (Zygoptera) data subset -- does the allometric model still win? # # (d) Repeat exercise (e)(i) and (ii) from the [above set](#Allom_Exercises), but with model comparison (e.g., again using a quadratic as an alternative model) to establish that the relationships are indeed allometric. # # (e) Repeat exercise (e)(ii) from the [above set](#Allom_Exercises), but with model comparison to establish which linear measurement is the best predictor of Body weight. # ## Albatross chick growth # # Now let's look at a different trait example: the growth of an individual albatross chick (you can find similar data for vector and non-vector arthropods in [VecTraits](https://vectorbyte.org/)). First load and plot the data: alb <- read.csv(file="../data/albatross_grow.csv") alb <- subset(x=alb, !is.na(alb$wt)) plot(alb$age, alb$wt, xlab="age (days)", ylab="weight (g)", xlim=c(0, 100)) # ##### Fitting the three models using NLLS # # Let's fit multiple models to this dataset. # # The Von Bertalanffy model is commonly used for modelling the growth of an individual. It's formulation is: # # $$ # W(t) = \rho (L_{\infty}(1-e^{-Kt})+L_0 e^{-Kt})^3 # $$ # # If we pull out $L_{\infty}$ and define $c=L_0/L_{\infty}$ and $W_{\infty}=\rho L_{\infty}^3$ this equation becomes: # # $$ # W(t) = W_{\infty}(1-e^{-Kt}+ c e^{-Kt})^3. # $$ # # $W_{\infty}$ is interpreted as the mean asymptotic weight, and $c$ the ratio between the initial and final lengths. This second equation is the one we will fit. # # We will compare this model against the classical Logistic growth equation and a straight line. # # The logistic equation is: # # $$ # N_t = \frac{N_0 K e^{r t}}{K + N_0 (e^{r t} - 1)} # $$ # # Here $N_t$ is population size at time $t$, $N_0$ is initial population size, $r$ is maximum growth rate (AKA $r_\text{max}$), and $K$ is carrying capacity. # # # First, as we did before, let's define the R functions for the two models: # + logistic1 <- function(t, r, K, N0){ N0 * K * exp(r * t)/(K+N0 * (exp(r * t)-1)) } vonbert.w <- function(t, Winf, c, K){ Winf * (1 - exp(-K * t) + c * exp(-K * t))^3 } # - # For the straight line, we use simply use R's `lm()` function, as that is a linear least squares problem. Using NLLS will give (approximately) the same answer, of course. Now fit all 3 models using least squares. # # We will scale the data before fitting to improve the stability of the estimates: # + scale <- 4000 alb.lin <- lm(wt/scale ~ age, data = alb) # + alb.log <- nlsLM(wt/scale~logistic1(age, r, K, N0), start=list(K=1, r=0.1, N0=0.1), data=alb) alb.vb <- nlsLM(wt/scale~vonbert.w(age, Winf, c, K), start=list(Winf=0.75, c=0.01, K=0.01), data=alb) # - # Next let's calculate predictions for each of the models across a range of ages. # + ages <- seq(0, 100, length=1000) pred.lin <- predict(alb.lin, newdata = list(age=ages)) * scale pred.log <- predict(alb.log, newdata = list(age=ages)) * scale pred.vb <- predict(alb.vb, newdata = list(age=ages)) * scale # - # And finally plot the data with the fits: # + plot(alb$age, alb$wt, xlab="age (days)", ylab="weight (g)", xlim=c(0,100)) lines(ages, pred.lin, col=2, lwd=2) lines(ages, pred.log, col=3, lwd=2) lines(ages, pred.vb, col=4, lwd=2) legend("topleft", legend = c("linear", "logistic", "<NAME>"), lwd=2, lty=1, col=2:4) # - # Next examine the residuals between the 3 models: par(mfrow=c(3,1), bty="n") plot(alb$age, resid(alb.lin), main="LM resids", xlim=c(0,100)) plot(alb$age, resid(alb.log), main="Logisitic resids", xlim=c(0,100)) plot(alb$age, resid(alb.vb), main="VB resids", xlim=c(0,100)) # The residuals for all 3 models still exhibit some patterns. In particular, the data seems to go down near the end of the observation period, but none of these models can capture that behavior. # # Finally, let's compare the 3 models using a simpler approach than the AIC/BIC one that we used [above](#Allom_Exercises) by calculating adjusted Sums of Squared Errors (SSE's): n <- length(alb$wt) list(lin=signif(sum(resid(alb.lin)^2)/(n-2 * 2), 3), log= signif(sum(resid(alb.log)^2)/(n-2 * 3), 3), vb= signif(sum(resid(alb.vb)^2)/(n-2 * 3), 3)) # The adjusted SSE accounts for sample size and number of parameters by dividing the RSS by the residual degrees of freedom. Adjusted SSE can also be used for model selection like AIC/BIC (but is less robust than AIC/BIC). The residual degrees of freedom is calculated as the number of response values (sample size, $n$) minus 2, times the number of fitted coefficients $m$ (= 2 or 3 in this case) estimated. # # The logistic model has the lowest adjusted SSE, so it's the best by this measure. It is also, visually, a better fit. # ### Exercises <a id='Albatross_Exercises'></a> # # (a) Use AIC/BIC to perform model selection on the Albatross data as we did for the trait allometry example. # # (b) Write this example as a self-sufficient R script, with ggplot istead of base plotting # ## Aedes aegypti fecundity # # Now let's look at a disease vector example. These data measure the reponse of * Aedes aegypti * fecundity to temperature. # # First load and visualize the data: # + aedes <- read.csv(file="../data/aedes_fecund.csv") plot(aedes$T, aedes$EFD, xlab="temperature (C)", ylab="Eggs/day") # - # (Model-Fitting-NLLS-TPCs)= # ### The Thermal Performance Curve models # # Let's define some models for Thermal Performance Curves: quad1 <- function(T, T0, Tm, c){ c * (T-T0) * (T-Tm) * as.numeric(T<Tm) * as.numeric(T>T0) } # Instead of using the inbuilt quadratic function in R, we we defined our own to make it easier to choose starting values, and so that we can force the function to be equal to zero above and below the minimum and maximum temperature thresholds (more on this below). briere <- function(T, T0, Tm, c){ c * T * (T-T0) * (abs(Tm-T)^(1/2)) * as.numeric(T<Tm) * as.numeric(T>T0) } # The Briere function is a commonly used model for temperature dependence of insect traits. See here [section](Miniproj-TPCs-Models) for more info. Unlike the original [model definition](Miniproj-TPCs-Models), we have used `abs()` to allow the NLLS algorithm to explore the full parameter space of $T_m$; if we did not do this, the NLLS could fail as soon as a value of $T_m < T$ was reached during the optimization, because the [square root of a negative number is complex](https://en.wikipedia.org/wiki/Square_root). Another way to deal with this issue is to set parameter bounds on $T_m$ so that it is can never be less than T. However, this is a more technical approach that we will not go into here. # # As in the case of the albatross growth data, we will also compare the above two models with a * straight line * (again, its a linear model, so we can just use `lm()` without needing to define a function for it). # # Now fit all 3 models using least squares. Although it's not as necessary here (as the data don't have as large values as the albatross example), lets again scale the data first: # + scale <- 20 aed.lin <- lm(EFD/scale ~ T, data=aedes) aed.quad <- nlsLM(EFD/scale~quad1(T, T0, Tm, c), start=list(T0=10, Tm=40, c=0.01), data=aedes) aed.br <- nlsLM(EFD/scale~briere(T, T0, Tm, c), start=list(T0=10, Tm=40, c=0.1), data=aedes) # - # ### Exercises <a id='Aedes_Exercises'></a> # # (a) Complete the * Aedes * data analysis by fitting the models, calculating predictions and then comparing models. Write a single, self-standing script for it. Which model fits best? By what measure? # # (b) In this script, use ggplot instead of base plotting. # ## Abundance data as an example # # Fluctuations in the abundance (density) of single populations may play a crucial role in ecosystem dynamics and emergent functional characteristics, such as rates of carbon fixation or disease transmission. For example, if vector population densities or their traits change at the same or shorter timescales than the rate of disease transmission, then (vector) abundance fluctuations can cause significant fluctuations in disease transmission rates. Indeed, most disease vectors are small ectotherms with short generation times and greater sensitivity to environmental conditions than their (invariably larger, longer-lived, and often, endothermic) hosts. So understanding how populations vary over time, space, and with respect to environmental variables such as temperature and precipitation is key. We will look at fitting models to the growth of a single population here. # # (Model-Fitting-R-Population-Growth)= # # ## Population growth rates # # A population grows exponentially while its abundance is low and resources are not limiting (the Malthusian principle). This growth then slows and eventually stops as resources become limiting. There may also be a time lag before the population growth really takes off at the start. We will focus on microbial (specifically, bacterial) growth rates. Bacterial growth in batch culture follows a distinct set of phases; lag phase, exponential phase and stationary phase. During the lag phase a suite of transcriptional machinery is activated, including genes involved in nutrient uptake and metabolic changes, as bacteria prepare for growth. During the exponential growth phase, bacteria divide at a constant rate, the population doubling with each generation. When the carrying capacity of the media is reached, growth slows and the number of cells in the culture stabilizes, beginning the stationary phase. # # Traditionally, microbial growth rates were measured by plotting cell numbers or culture density against time on a semi-log graph and fitting a straight line through the exponential growth phase &ndash; the slope of the line gives the maximum growth rate ($r_{max}$). Models have since been developed which we can use to describe the whole sigmoidal bacterial growth curve (e.g., using NLLS). Here we will take a look at these different approaches, from applying linear models to the exponential phase, through to fitting non-linear models to the full growth curve. # # Let's first generate some "data" on the number of bacterial cells as a function of time that we can play with: # + t <- seq(0, 22, 2) N <- c(32500, 33000, 38000, 105000, 445000, 1430000, 3020000, 4720000, 5670000, 5870000, 5930000, 5940000) set.seed(1234) # set seed to ensure you always get the same random sequence data <- data.frame(t, N * (1 + rnorm(length(t), sd = 0.1))) # add some random error names(data) <- c("Time", "N") head(data) # - # Note how we added some random "sampling" error using `N * (1 + rnorm(length(t), sd = .1))`. # # This means that we are adding an error at each time point $t$ (let's call this fluctuation $\epsilon_t$) as a * percentage * of the population ($N_t$) at that time point in a vectorized way. That is, we are performing the operation $N_t \times (1 + \epsilon_t)$ at all time points at one go. This is important to note because this is often the way that errors appear &ndash; proportional to the value being measured. # Now let's plot these data: ggplot(data, aes(x = Time, y = N)) + geom_point(size = 3) + labs(x = "Time (Hours)", y = "Population size (cells)") # #### Basic approach # # The size of an exponentially growing population ($N$) at any given time ($t$) is given by: # # $ # N(t) = N_0 e^{rt} , # $ # # where $N_0$ is the initial population size and $r$ is the growth rate. We can re-arrange this to give: # # $ # r = \frac{\log(N(t)) - \log(N_0)}{t} , # $ # # That is, in exponential growth at a constant rate, the growth rate can be simply calculated as the difference in the log of two population sizes, over time. We will log-transform the data and estimate by eye where growth looks exponential. # + data$LogN <- log(data$N) # visualise ggplot(data, aes(x = t, y = LogN)) + geom_point(size = 3) + labs(x = "Time (Hours)", y = "log(cell number)") # - # By eye the logged data looks fairly linear (beyond the initial "lag phase" of growth; see below) between hours 5 and 10, so we'll use that time-period to calculate the growth rate. (data[data$Time == 10,]$LogN - data[data$Time == 6,]$LogN)/(10-6) # This is our first, most basic estimate of $r$. # # Or, we can decide not to eyeball the data, but just pick the maximum observed gradient of the curve. For this, we can use the the `diff()` function: diff(data$LogN) # This gives all the (log) population size differences between successive timepoint pairs. The max of this is what we want, divided by the time-step. max(diff(data$LogN))/2 # 2 is the difference in any successive pair of timepoints # ### Using OLS # # But we can do better than this. To account for some error in measurement, we shouldn't really take the data points directly, but fit a linear model through them instead, where the slope gives our growth rate. This is pretty much the "traditional" way of calculating microbial growth rates &ndash; draw a straight line through the linear part of the log-transformed data. lm_growth <- lm(LogN ~ Time, data = data[data$Time > 2 & data$Time < 12,]) summary(lm_growth) # Npw we get $r \approx 0.62$, which is probably closer to the "truth". # # But this is still not ideal because we only guessed the exponential phase by eye. We could do it better by iterating through different windows of points, comparing the slopes and finding which the highest is to give the maximum growth rate, $r_{max}$. This is called a "rolling regression". # # Or better still, we can fit a more appropriate mathematical model using NLLS! # ### Using NLLS # # For starters, a classical, (somewhat) mechanistic model is the logistic equation: # # $$ # N_t = \frac{N_0 K e^{r t}}{K + N_0 (e^{r t} - 1)} # $$(eq:logist_growth_sol) # # Here $N_t$ is population size at time $t$, $N_0$ is initial population size, $r$ is maximum growth rate (AKA $r_\text{max}$), and $K$ is carrying capacity (maximum possible abundance of the population). Note that this model is actually the solution to the differential equation that defines the classic logistic population growth model (eqn. {eq}`eq:logist_growth`). # # ```{note} # The derivation of eqn. {eq}`eq:logist_growth_sol` is covered [here](Logistic-Population-Growth). But you don't need to know the derivation to fit eqn. {eq}`eq:logist_growth_sol` to data. # ``` # # Let's fit it to the data. First, we need to define it as a function object: logistic_model <- function(t, r_max, K, N_0){ # The classic logistic equation return(N_0 * K * exp(r_max * t)/(K + N_0 * (exp(r_max * t) - 1))) } # Now fit it: # + # first we need some starting parameters for the model N_0_start <- min(data$N) # lowest population size K_start <- max(data$N) # highest population size r_max_start <- 0.62 # use our estimate from the OLS fitting from above fit_logistic <- nlsLM(N ~ logistic_model(t = Time, r_max, K, N_0), data, list(r_max=r_max_start, N_0 = N_0_start, K = K_start)) summary(fit_logistic) # - # We did not pay much attention to what starting values we used in the simpler example of fitting the allometric model because the power-law model is easy to fit using NLLS, and starting far from the optimal parameters does not matter too much. Here, we used the actual data to generate more realistic start values for each of the three parameters (`r_max`, `N_0`, `K`) of the Logistic equation. # Now, plot the fit: # + timepoints <- seq(0, 22, 0.1) logistic_points <- logistic_model(t = timepoints, r_max = coef(fit_logistic)["r_max"], K = coef(fit_logistic)["K"], N_0 = coef(fit_logistic)["N_0"]) df1 <- data.frame(timepoints, logistic_points) df1$model <- "Logistic equation" names(df1) <- c("Time", "N", "model") ggplot(data, aes(x = Time, y = N)) + geom_point(size = 3) + geom_line(data = df1, aes(x = Time, y = N, col = model), size = 1) + theme(aspect.ratio=1)+ # make the plot square labs(x = "Time", y = "Cell number") # - # That looks nice, and the $r_{max}$ estimate we get (0.64) is fairly close to what we got above with OLS fitting. # # Note that we've done this fitting to the original non transformed data, whilst the linear regressions earlier were on log transformed data. What would this function look like on a log-transformed axis? ggplot(data, aes(x = Time, y = LogN)) + geom_point(size = 3) + geom_line(data = df1, aes(x = Time, y = log(N), col = model), size = 1) + theme(aspect.ratio=1)+ labs(x = "Time", y = "log(Cell number)") # The model actually diverges from the data at the lower end! This was not visible in the previous plot where you examined the model in linear scale (without taking a log) because the deviation of the model is small, and only becomes clear in the log scale. This is because of the way logarithms work. Let's have a look at this in our Cell counts "data": ggplot(data, aes(x = N, y = LogN)) + geom_point(size = 3) + theme(aspect.ratio = 1)+ labs(x = "N", y = "log(N)") # As you can see the logarithm is a strongly nonlinear transformation of any sequence of real numbers, with small numbers close to zero yielding disproportionately large deviations. # ```{note} # You may play with increasing the error (by increasing the value of `sd` in synthetic data generation step above) and re-evaluating all the subsequent model fitting steps above. However, note that above some values of `sd`, you will start to get negative values of populations, especially at early time points, which will raise issues with taking a logarithm. # ``` # The above seen deviation of the Logistic model from the data is because this model assumes that the population is growing right from the start (Time = 0), while in "reality" (in our synthetic "data"), this is not what's happening; the population takes a while to grow truly exponentially (i.e., there is a time lag in the population growth). This time lag is seen frequently in the lab, and is also expected in nature, because when bacteria encounter fresh growth media (in the lab) or a new resource/environment (in the field), they take some time to acclimate, activating genes involved in nutrient uptake and metabolic processes, before beginning exponential growth. This is called the lag phase and can be seen in our example data where exponential growth doesn't properly begin until around the 4th hour. # # To capture the lag phase, more complicated bacterial growth models have been designed. # # One of these is the modified Gompertz model (Zwietering et. al., 1990), which has been used frequently in the literature to model bacterial growth: # # $$ # \log(N_t) = N_0 + (N_{max} - N_0) e^{-e^{r_{max} \exp(1) \frac{t_{lag} - t}{(N_{max} - N_0) \log(10)} + 1}} # $$(eq:Gompertz) # # Here maximum growth rate ($r_{max}$) is the tangent to the inflection point, $t_{lag}$ is the x-axis intercept to this tangent (duration of the delay before the population starts growing exponentially) and $\log\left(\frac{N_{max}}{N_0}\right)$ is the asymptote of the log-transformed population growth trajectory, i.e., the log ratio of maximum population density $N_{max}$ (aka "carrying capacity") and initial cell (Population) $N_0$ density. # ```{note} # Note that unlike the Logistic growth model above, the Gompertz model is in the log scale. This is because the model is not derived from a differential equation, but was designed * specifically * to be fitted to log-transformed data. # ``` # # Now let's fit and compare the two alternative nonlinear growth models: Logistic and Gompertz. # # First, specify the function object for the Gompertz model (we already defined the function for the Logistic model above): gompertz_model <- function(t, r_max, K, N_0, t_lag){ # Modified gompertz growth model (Zwietering 1990) return(N_0 + (K - N_0) * exp(-exp(r_max * exp(1) * (t_lag - t)/((K - N_0) * log(10)) + 1))) } # Again, note that unlike the Logistic growth function above, this function has been written in the log scale. # # Now let's generate some starting values for the NLLS fitting of the Gompertz model. # # As we did above for the logistic equation, let's derive the starting values by using the actual data: N_0_start <- min(data$LogN) # lowest population size, note log scale K_start <- max(data$LogN) # highest population size, note log scale r_max_start <- 0.62 # use our previous estimate from the OLS fitting from above t_lag_start <- data$Time[which.max(diff(diff(data$LogN)))] # find last timepoint of lag phase # * So how did we find a reasonable time lag from the data? * # # Let's break the last command down: diff(data$LogN) # same as what we did above - get differentials diff(diff(data$LogN)) # get the differentials of the differentials (approx 2nd order derivatives) which.max(diff(diff(data$LogN))) # find the timepoint where this 2nd order derivative really takes off data$Time[which.max(diff(diff(data$LogN)))] # This then is a good guess for the last timepoint of the lag phase # Now fit the model using these start values: fit_gompertz <- nlsLM(LogN ~ gompertz_model(t = Time, r_max, K, N_0, t_lag), data, list(t_lag=t_lag_start, r_max=r_max_start, N_0 = N_0_start, K = K_start)) # You might one or more warning(s) that the model fitting iterations generated NaNs during the fitting procedure for these data (because at some point the NLLS fitting algorithm "wandered" to a combination of K and N_0 values that yields a NaN for log(K/N_0)). # # You can ignore these warning in this case. But not always &ndash; sometimes these NaNs mean that the equation is wrongly written, or that it generates NaNs across the whole range of the x-values, in which case the model is inappropriate for these data. # Get the model summary: summary(fit_gompertz) # And see how the fits of the two nonlinear models compare: # + timepoints <- seq(0, 24, 0.1) logistic_points <- log(logistic_model(t = timepoints, r_max = coef(fit_logistic)["r_max"], K = coef(fit_logistic)["K"], N_0 = coef(fit_logistic)["N_0"])) gompertz_points <- gompertz_model(t = timepoints, r_max = coef(fit_gompertz)["r_max"], K = coef(fit_gompertz)["K"], N_0 = coef(fit_gompertz)["N_0"], t_lag = coef(fit_gompertz)["t_lag"]) df1 <- data.frame(timepoints, logistic_points) df1$model <- "Logistic model" names(df1) <- c("Time", "LogN", "model") df2 <- data.frame(timepoints, gompertz_points) df2$model <- "Gompertz model" names(df2) <- c("Time", "LogN", "model") model_frame <- rbind(df1, df2) ggplot(data, aes(x = Time, y = LogN)) + geom_point(size = 3) + geom_line(data = model_frame, aes(x = Time, y = LogN, col = model), size = 1) + theme_bw() + # make the background white theme(aspect.ratio=1)+ # make the plot square labs(x = "Time", y = "log(Abundance)") # - # Clearly, the Gompertz model fits way better than the logistic growth equation in this case! Note also that there is a big difference in the fitted value of $r_{max}$ from the two models; the value is much lower from the Logistic model because it ignores the lag phase, including it into the exponential growth phase. # # You can now perform model selection like you did above in the allometric scaling example. # ### Exercises # # (a) Calculate the confidence intervals on the parameters of each of the two fitted models, and use model selection (using AIC and/or BIC) as you did before to see if you can determine the best-fitting model among the three. # # (b) Alternatively, for a different random sequence of fluctuations, one or more of the models may fail to fit (a `singular gradiant matrix` error). Try repeating the above fitting with a different random seed (change the integers given to the `random.seed( )` function), or increase the sampling error by increasing the standard deviation and see if it happens. If/when the NLLS optimization does fail to converge (the RSS minimum was not found), you can try to fix it by changing the starting values. # # (c) Repeat the model comparison exercise 1000 times (You will have to write a loop), and determine if/whether one model generally wins more often than the others. Note that each run will generate a slightly different dataset, because we are adding a vector of random errors every time the "data" are generated. This may result in failure of the NLLS fitting to converge, in which case you will need to use the [`try()` or `tryCatch` functions](https://nbviewer.jupyter.org/github/mhasoba/TheMulQuaBio/blob/master/notebooks/07-R.ipynb). # # (d) Repeat (b), but increase the error by increasing the standard deviation of the normal error distribution, and see if there are differences in the robustness of the models to sampling/experimental errors. You may also want to try changing the distribution of the errors to some non-normal distribution and see what happens. # ## Some tips and tricks for NLLS fitting # # (Model-Fitting-NLLS-Starting-Values)= # ### Starting values # # The main challenge for NLLS fitting is finding starting (initial) values for the parameters, which the algorithm needs to proceed with the fitting/optimization. Inappropriate starting values can result in the algorithm finding parameter combinations represent convergence to a local optimum rather than the (globally) optimal solution. Starting parameter estimates can also result in or complete "divergence", i.e., the search results in a combination of parameters that cause mathematical "singularity" (e.g., log(0) or division by zero). # # #### Obtaining them # # Finding the starting values is a [bit of an art](https://en.wikipedia.org/wiki/Non-linear_least_squares#Initial_parameter_estimates). There is no method for finding starting values that works universally (across different types of models). # # The one universal rule though, is that finding starting values requires you to understand the meaning of each of the parameters in your model. So, for example, in the population [growth rate example](Model-Fitting-R-Population-Growth), the parameters in both the nonlinear models that [we covered](Model-Fitting-R-Population-Growth) (Logistic growth, eqn. {eq}`eq:logist_growth_sol` , Gompertz model; eqn. {eq}`eq:Gompertz`) have a clear meaning. # # Furthermore, you will typically need to determine starting values *specific* to each model *and* each dataset that that you are wanting to fit that model to (e.g., every distinct functional response dataset to be [fitted to the Holling Type II model](Miniproject-FR-Models)). To do so, understanding how each parameter in the model corresponds to features of the actual data is really necessary. # # For example, in the Gompertz population growth rate model (eqn. {eq}`eq:Gompertz`), your starting values generator function would, for each dataset, # * Calculate a starting value for $r_{max}$ by searching for the steepest slope of the growth curve (e.g., with a rolling OLS regression) # * Calculate a starting value of $t_{lag}$ by intersecting the fitted line with the x (time)-axis # * Calculate a starting value for the asymptote $K$ as the highest data (abundance) value in the dataset. # # ```{tip} # Ideally, you should write a separate a function that calculates starting values for the model parameters. # ``` # # #### Sampling them # # Once you have worked out how to generate starting values for each non-linear model and dataset, a good next step for optimizing the fitting across multiple datasets (and thus maximize how many datasets are successfully fitted to the model) is to *rerun fitting attempts multiple times, sampling each of the starting values (simultaneously) randomly* (that is, randomly vary the set of starting values a bit each time). This sampling of starting values will increase the likelihood of the NLLS optimization algorithm finding a solution (optimal combination of parameters), and not getting stuck in a combination of parameters somewhere far away from that optimal solution.  # # In particular, # * You can choose a Gaussian/Normal distribution if you have high confidence in mean value of parameter, or # * You can uniform distribution if you have low confidence in the mean, but higher confidence in the range of values that the parameter can take. # In both cases, the *mean* of the sampling distribution will be the starting value you inferred from the model and the data (previous section). # # Furthermore, # * Whichever distribution you choose (gaussian vs uniform), you will need to determine what range of values to restrict each parameter's samples to. In the case of the normal distribution, this is determined by what standard deviation parameter (you choose), and in the case of the uniform distribution, this is determined by what lower and upper bound (you choose). Generally, a good approach is to set the bound to be some *percent* (say 5-10%) of the parameter's (mean) starting value. In both cases the chosen range to restrict the sampling to would typically be some subset of the model's *parameter bounds* (next section). # * *How many times to re-run* the fitting for a single dataset and model?* &ndash; this depends on how "difficult" the model is, and how much computational power you have. # # ```{tip} # For the sampling of starting values, recall that you learned to generate random numbers from probability distributions in both the [R](R-random-numbers) and [Python](Python-scipy-stats) chapters).  # ``` # You may also try and use a more sophisticated approach such as [grid searching](https://en.wikipedia.org/wiki/Hyperparameter_optimization#Approaches) for varying your starting values randomly. An example is in the [MLE chapter](ModelFitting-MLE-LikelihoodSurface). # # ### Bounding parameters revisited # # At the start, we looked at an [example](20-ModelFitting-NLLS:Bounding) of NLLS fitting where we bounded the parameters. It can be a good idea to restrict the range of values that the each of the model's parameters can take *during any one fitting/optimization run*. To "bound" a parameter in this way means to give it upper and lower limits. By doing so, during one optimization/fitting (e.g., one call to `nlsLM`, to fit one model, to one dataset), the fitting algorithm does not allow a parameter to go outside some limits. This reduces the chances of the optimization getting stuck too far from the solution, or failing completely due to some mathematical singularity (e.g., log(0)). # # The bounds are typically fixed for each parameter of a model *at the level of the model* (e.g., they do not change based on each dataset). For example, in the Gompertz model for growth rates (eqn. {eq}`eq:Gompertz`), you can limit the growth rate parameter to never be negative (the bounds would be $[0,\infty]$), or restrict it further to be some value between zero and an upper limit (say, 10) that you know organismal growth rates cannot exceed (the bounds would in this case would be $[0,10]$). # # However, as we saw in the Michaelis-Menten model fitting [example](20-ModelFitting-NLLS:Bounding), bounding the parameters too much (excessively narrow ranges) can result in poor solutions because the algorithm cannot explore sufficient parameter space. # # ```{tip} # The values of the parameter bounds you choose, of course, may depend on the *units of measurement* of the data. For example, in [SI](https://en.wikipedia.org/wiki/International_System_of_Units), growth rates in the Logistic or Gompertz models would be in units of s$^{-1}$). # ``` # Irrespective of which computer language the NLLS fitting algorithm is implemented in (`nlsLM`  in R or `lmfit` in Python), the fitting command/method will have options for setting the parameter bounds. In particular, # # * For `nlsLM` in R, look up https://www.rdocumentation.org/packages/minpack.lm/versions/1.2-1/topics/nlsLM (the `lower` and `upper` arguments to the function).  # # * For `lmfit` in Python, look up https://lmfit.github.io/lmfit-py/parameters.html (and in particular, https://lmfit.github.io/lmfit-py/parameters.html#lmfit.parameter.Parameter) (the `min` and `max` arguments). # # *Bounding the parameter values has nothing to do, per se, with sampling the starting values of each parameter, though if you choose to sample starting values (explained in previous section), you need to make sure that the samples don't exceed the pre-set bounds (explained in this section).* # # ```{note} # Python's `lmfit` has an option to also internally vary the parameter. So by using a sampling approach as described in the previous section, *and* allowing the parameter to vary (note that `vary=True` is the default) within `lmfit`, you will be in essence be imposing sampling twice. This may or may not improve fitting performance &ndash; try it out both ways. # ``` # ## Readings and Resources # # * Motulsky, Harvey, and <NAME>. Fitting models to biological data using linear and nonlinear regression: a practical guide to curve fitting. OUP USA, 2004: <https://www.facm.ucl.ac.be/cooperation/Vietnam/WBI-Vietnam-October-2011/Modelling/RegressionBook.pdf> # # * These are a pretty good series of notes on NLLS (even if you are using R instead of Python): <https://lmfit.github.io/lmfit-py/intro.html> # # * Another technical description of NLLS algorithms: <https://www.gnu.org/software/gsl/doc/html/nls.html> # # * <NAME>. & <NAME>. 2004 Model selection in ecology and evolution. Trends Ecol. Evol. 19, 101–108. # # * The *nlstools* package for NLLS fit diagnostics: <https://rdrr.io/rforge/nlstools> # * The original paper: <http://dx.doi.org/10.18637/jss.v066.i05>
content/notebooks/3-ModelFitting-NLLS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''py38'': conda)' # name: python388jvsc74a57bd0ea8efced7f9f908505a4e28889b9d0ff7d52e9a5c03dfaa80d1d80124525970c # --- # # Using EcoFOCIpy to process raw field data # # ## Mooring / Timeseries Data # # Basic workflow for each instrument grouping is *(initial archive level)*: # - Parse data from raw files into pandas dataframe # - output initial files (pandas->csv) **ERDDAP NRT** when no meta data is added # # Convert to xarray dataframe for all following work *(working or final data level): # - TODO: Add metadata from instrument yaml files and/or header info # - ingest metadata from deployment/recovery records or cast logs # - process data beyond simple file translate # - apply any calibrations or corrections # + field corrections # + offsets # + instrument compensations # + some QC were available... this would be old-school simple bounds mostly # - adjust time bounds and sample frequency (xarray dataframe) # - save as CF netcdf via xarray: so many of the steps above are optional # + **ERDDAP NRT** if no corrections, offsets or time bounds are applied but some meta data is # + **Working and awaiting QC** has no ERDDAP representation and is a holding spot # + **ERDDAP Final** fully calibrated, qc'd and populated with meta information # # Plot for preview and QC # - preview images (indiv and/or collectively) # - manual qc process # - automated qc process ML/AI # # Further refinenments for ERDDAP hosting: # # ## Example below is for EcoFluorometer with dual channels (Eco FLNTUS) but the workflow is similar for all instruments. # # Future processing of this instrument can be a simplified (no markdown) process which can be archived so that the procedure can be traced or updated # + import yaml import EcoFOCIpy.io.wetlabs_parser as wetlabs_parser #<- instrument specific import EcoFOCIpy.io.ncCFsave as ncCFsave import EcoFOCIpy.metaconfig.load_config as load_config # - # The sample_data_dir should be included in the github package but may not be included in the pip install of the package # # ## Simple Processing - first step sample_data_dir = '../' # + ############################################################### # edit to point to {instrument sepcific} raw datafile datafile = sample_data_dir+'staticdata/example_data/ecoflntus_sample.txt' instrument = 'ECO-Fluorometer FLNTUS 1075' mooring_meta_file = sample_data_dir+'staticdata/mooring_example.yaml' inst_meta_file = sample_data_dir+'staticdata/instr_metaconfig/ecofluor.yaml' inst_shortname = 'eco' ############################################################### #init and load data eco = wetlabs_parser.wetlabs() (eco_data,eco_header) = eco.parse(filename=datafile, return_header=True, datetime_index=True) # - eco_header eco_data # ### Engr2Sci Calibration (unique to wetlabs instruments) # # Must apply the known cal factors from the lab to the raw data to get science data. These are linear transformations of the form `y=mx+b`, so a slope (m) and offset (b) should be provided. Each channel has its own cal factor. Currently there is no routine to read these from a database or file, so you need to input them into the object method. # # Change the name of each channel below to be that of the column label (wavelenght), and change the 'outname' to be the netcdf CF compliant variable name (in the instrument yaml files) # + cal_coef = {695:{'scaleFactor':0.0257,'darkCounts':61.0,'outname':'chlor_fluorescence'}, 700:{'scaleFactor':0.1914,'darkCounts':61,'outname':'turbidity'}, 'channel_3':{'scaleFactor':0,'darkCounts':0,'outname':'channel_3'} } eco.engr2sci(cal_coef=cal_coef) # - # ## Time properties # # Its unusual that our clocks drift to the point of concern for our instruments (if an instrument is off by 3 minutes but only sampling hourly... regridding that data will result in minimal changes). However, there are a few time oriented modifications that may need to be made. # # The can be classified into two categories: # + interpolate: these change the parameter values in accordance with the time edits # - linear interpolation is most common # - averaging of data and rebinning/resampling is also common (this needs to have the "time lable" thought out...) # - decimating is less common but does not impact the max/min values # + shift: these do not alter the measurements, just the timestamps they are associated with # - the round function will work well to correct small time errors/drifts **common** # - dropping extra precision on time (if you want hourly measurements, just remove all minute/second info... could cause large errors if rounding would have been more appropriate) # It is very easy to use pandas interplation and resample methods on the dataframe as is. A few steps are suggested below: # - parse out on-deck (predeployment and recovery) data. This can be done via pandas or xarray but requires the mooring metadata to have been read in. See future steps below. # - even if the sample frequency is set to the desired measurement frequency, it would be good to perform a quick regridding as an assurance task # - FOCI data is usualy 1min, 10min, 1hr - and the 1min data is a fairly new (sbe56) data stream # + subsampling high frequency data to lower frequency is easy via df.resample().mean() but it will label the new datapoint per default instructions. The default is to label it with the left boundary of the bin. # + you may want to take the median instead of the mean for noisy data (fluorometer) , occasionally decimating may be more appropriate if you want to downsize the dataset size but not smear features # + shifting times can be a bit more involved. There are two primary ways to do it, interpolate or shift (round) # - to interpolate, you will need to upsample your data to a higher frequency which will generate missing values, then interpolate (with a maximum gap size), then decimate. This always has the artifact of smoothing data and decreasing the min/max values. **common on microcats and other 10min datasets** # - shifting usually just involves droping extra time "digits", if you want hourly, you could just drop the trailing minutes assuming you are just off the hour (8:05 -> 8:00) or you can round to the nearest time unit but niether of these changes the data value, just the time associated with it. **common on seacats and other hourly datasets** # - you may also be able to *shift* using the pandas datetime round function and specifing the desired frequency. # + I suggest if no change is needed... df.index.round(freq=*'your native sample freq'*) # # ### Special note for Wetlabs times # # Historically these instruments have drifted significantly. (usually to be slow) We keep track of the shift in seconds. and we do burst measurements with them. How you want to tackle this is gonna be dependent on each instrument. Goal is to stick to simple timeseries analysis via pandas though (as opposed to determining when the groups of each burst are sampled) eco_data = eco.time_correction(offset=854) #positive to catch up with GPS time - linear correction may need to be applied... this is an offset shift at the moment eco_data # + #the smaller you resample, the closer to the "minute" you will be # tehn you can determin if you round or interpolate eco_data = eco_data.resample('2T').median().interpolate() eco_data=eco_data[eco_data.index.minute==0] eco_data # - eco_data.plot() # ## Add Deployment meta information # # Two methods are available (if comming from python2 world - ordereddict was important... in py38 a dictionary is inherently ordered) #just a dictionary of dictionaries - simple with open(mooring_meta_file) as file: mooring_config = yaml.full_load(file) #Generates an ordereddict but prints better for summary #likely to be depricated as an ordered dict may not be useful and drops a dependency if its EOL mooring_config_v2 = load_config.load_config(mooring_meta_file) mooring_config['Instrumentation'][instrument] # ## Add Instrument meta information # # Time, depth, lat, lon should be added regardless (always our coordinates) but for a mooring site its going to be a (1,1,1,t) dataset # The variables of interest should be read from the data file and matched to a key for naming. That key is in the inst_config file seen below and should represent common conversion names in the raw data # + tags=[] with open(inst_meta_file) as file: inst_config = yaml.full_load(file) inst_config # - # Add meta data and prelim processing based on meta data # Convert to xarray and add meta information - save as CF netcdf file # pass -> data, instmeta, depmeta eco_data_nc = ncCFsave.EcoFOCI_CFnc(df=eco_data, instrument_yaml=inst_config, operation_yaml=mooring_config, operation_type='mooring', instrument_id=instrument, inst_shortname=inst_shortname) eco_data_nc # At this point, you could save your file with the `.xarray2netcdf_save()` method and have a functioning dataset.... but it would be very simple with no additional qc, meta-data, or tuned parameters for optimizing software like ferret or erddap. # expand the dimensions and coordinate variables # renames them appropriatley and prepares them for meta-filled values eco_data_nc.expand_dimensions() eco_data_nc.variable_meta_data(variable_keys=list(eco_data.columns.values),drop_missing=True) eco_data_nc.temporal_geospatioal_meta_data() #adding dimension meta needs to come after updating the dimension values... BUG? eco_data_nc.dimension_meta_data(variable_keys=['depth','latitude','longitude']) # The following steps can happen in just about any order and are all meta-data driven. Therefore, they are not required to have a functioning dataset, but they are required to have a well described dataset # + #add global attributes eco_data_nc.deployment_meta_add() eco_data_nc.get_xdf() #add instituitonal global attributes eco_data_nc.institution_meta_add() #add creation date/time - provenance data eco_data_nc.provinance_meta_add() #provide intial qc status field eco_data_nc.qc_status(qc_status='unknown') # - # ## Save CF Netcdf files # # Currently stick to netcdf3 classic... but migrating to netcdf4 (default) may be no problems for most modern purposes. Its easy enough to pass the `format` kwargs through to the netcdf api of xarray. # + # combine trim (not mandatory) and filename together (saves to test.nc without name) depth = str(int(mooring_config['Instrumentation'][instrument]['ActualDepth'])).zfill(4) # mooring_yaml['Instrumentation'][self.instrument_id]['DesignedDepth'])).zfill(4) #<-- alternative filename = "".join(mooring_config['MooringID'].split('-')).lower()+'_'+inst_shortname+'_'+depth+'m.nc' eco_data_nc.xarray2netcdf_save(xdf = eco_data_nc.autotrim_time(), filename=filename,format="NETCDF3_CLASSIC") # don't trim the data and pass your own filename # eco_data_nc.xarray2netcdf_save(xdf = eco_data_nc.get_xdf(), # filename=filename,format="NETCDF4_CLASSIC") # - eco_data_nc.get_xdf() # ## Next Steps # # QC of data (plot parameters with other instruments) # - be sure to updated the qc_status and the history
notebooks/EcoFOCIpy_wetlabs2channel_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pymongo connectionStr = "mongodb+srv://<username>:<password>@testcorpus.tj5ah.mongodb.net/testDatabase?retryWrites=true&w=majority" myClient = pymongo.MongoClient(connectionStr, serverSelectionTimeoutMS = 15000) try: print(myClient.server_info()) except Exception: print("Unable to establish connection to the server!") mydb = myClient.testDatabase collectionName = input("Type in the name of the collection: ") myCollection = mydb[collectionName] file = open("recordIDsToConsider.txt", "w") for document in myCollection.find(): file.write(document["_id"] + "\n") file.close()
Storing_the_IDs_of_dataset_records_in_text_file.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Context: This concerns how to further validate user input to avoid errors for invalid conversions # + # i.e. Creating a while loop to keep requesting an input until a correct/valid input is received. # For this example, create a function around: # - def userDecision(): decision = input("Please enter a number (0-9): ") return int(decision) userDecision() # + # So what is the issue with above? # - # 1. A user can enter a string userDecision() # 2. A user has the ability to enter anything they want i.e. the programmer gave them a 'hall pass'. userDecision() # + # So we need to check for or only accept the correct 'Type' and the correct 'Range' # - def userSelection(): # Set the 'flags' selection = 'Incorrect Value' withinRange = False # While the choice is not a digit, keep asking for input. while selection.isdigit() == False or withinRange == False: # we shouldn't convert here, otherwise we get an error on a wrong input selection = input("Please enter a number (0-9): ") if selection.isdigit() == False: print("Sorry that is not a digit!") if selection.isdigit() == True: if int(selection) in range(0,10): withinRange = True else: withinRange = False # We can convert once the while loop above has confirmed we have a digit. return int(selection) userSelection()
[Self Help] Python/Notes - Learning Resources/Milestone Project 1/[3. Validation ] Validating User Input.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Initialization # # Welcome to the first assignment of Improving Deep Neural Networks! # # Training your neural network requires specifying an initial value of the weights. A well-chosen initialization method helps the learning process. # # If you completed the previous course of this specialization, you probably followed the instructions for weight initialization, and seen that it's worked pretty well so far. But how do you choose the initialization for a new neural network? In this notebook, you'll try out a few different initializations, including random, zeros, and He initialization, and see how each leads to different results. # # A well-chosen initialization can: # - Speed up the convergence of gradient descent # - Increase the odds of gradient descent converging to a lower training (and generalization) error # # Let's get started! # ## Table of Contents # - [1 - Packages](#1) # - [2 - Loading the Dataset](#2) # - [3 - Neural Network Model](#3) # - [4 - Zero Initialization](#4) # - [Exercise 1 - initialize_parameters_zeros](#ex-1) # - [5 - Random Initialization](#5) # - [Exercise 2 - initialize_parameters_random](#ex-2) # - [6 - He Initialization](#6) # - [Exercise 3 - initialize_parameters_he](#ex-3) # - [7 - Conclusions](#7) # <a name='1'></a> # ## 1 - Packages # + import numpy as np import matplotlib.pyplot as plt import sklearn import sklearn.datasets from public_tests import * from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec # %matplotlib inline plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # %load_ext autoreload # %autoreload 2 # load image dataset: blue/red dots in circles # train_X, train_Y, test_X, test_Y = load_dataset() # - # <a name='2'></a> # ## 2 - Loading the Dataset train_X, train_Y, test_X, test_Y = load_dataset() # For this classifier, you want to separate the blue dots from the red dots. # <a name='3'></a> # ## 3 - Neural Network Model # You'll use a 3-layer neural network (already implemented for you). These are the initialization methods you'll experiment with: # - *Zeros initialization* -- setting `initialization = "zeros"` in the input argument. # - *Random initialization* -- setting `initialization = "random"` in the input argument. This initializes the weights to large random values. # - *He initialization* -- setting `initialization = "he"` in the input argument. This initializes the weights to random values scaled according to a paper by He et al., 2015. # # **Instructions**: Instructions: Read over the code below, and run it. In the next part, you'll implement the three initialization methods that this `model()` calls. def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"): """ Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID. Arguments: X -- input data, of shape (2, number of examples) Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples) learning_rate -- learning rate for gradient descent num_iterations -- number of iterations to run gradient descent print_cost -- if True, print the cost every 1000 iterations initialization -- flag to choose which initialization to use ("zeros","random" or "he") Returns: parameters -- parameters learnt by the model """ grads = {} costs = [] # to keep track of the loss m = X.shape[1] # number of examples layers_dims = [X.shape[0], 10, 5, 1] # Initialize parameters dictionary. if initialization == "zeros": parameters = initialize_parameters_zeros(layers_dims) elif initialization == "random": parameters = initialize_parameters_random(layers_dims) elif initialization == "he": parameters = initialize_parameters_he(layers_dims) # Loop (gradient descent) for i in range(num_iterations): # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID. a3, cache = forward_propagation(X, parameters) # Loss cost = compute_loss(a3, Y) # Backward propagation. grads = backward_propagation(X, Y, cache) # Update parameters. parameters = update_parameters(parameters, grads, learning_rate) # Print the loss every 1000 iterations if print_cost and i % 1000 == 0: print("Cost after iteration {}: {}".format(i, cost)) costs.append(cost) # plot the loss plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (per hundreds)') plt.title("Learning rate =" + str(learning_rate)) plt.show() return parameters # <a name='4'></a> # ## 4 - Zero Initialization # # There are two types of parameters to initialize in a neural network: # - the weight matrices $(W^{[1]}, W^{[2]}, W^{[3]}, ..., W^{[L-1]}, W^{[L]})$ # - the bias vectors $(b^{[1]}, b^{[2]}, b^{[3]}, ..., b^{[L-1]}, b^{[L]})$ # # <a name='ex-1'></a> # ### Exercise 1 - initialize_parameters_zeros # # Implement the following function to initialize all parameters to zeros. You'll see later that this does not work well since it fails to "break symmetry," but try it anyway and see what happens. Use `np.zeros((..,..))` with the correct shapes. # + deletable=false nbgrader={"cell_type": "code", "checksum": "27eb20f17301310c34489a2e99dccb72", "grade": false, "grade_id": "cell-0ebbf9140df0c623", "locked": false, "schema_version": 3, "solution": true, "task": false} # GRADED FUNCTION: initialize_parameters_zeros def initialize_parameters_zeros(layers_dims): """ Arguments: layer_dims -- python array (list) containing the size of each layer. Returns: parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL": W1 -- weight matrix of shape (layers_dims[1], layers_dims[0]) b1 -- bias vector of shape (layers_dims[1], 1) ... WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1]) bL -- bias vector of shape (layers_dims[L], 1) """ parameters = {} L = len(layers_dims) # number of layers in the network for l in range(1, L): #(≈ 2 lines of code) # parameters['W' + str(l)] = # parameters['b' + str(l)] = # YOUR CODE STARTS HERE parameters['W' + str(l)] = np.zeros((layers_dims[l],layers_dims[l-1])) parameters['b' + str(l)] = np.zeros((layers_dims[l],1)) # YOUR CODE ENDS HERE return parameters # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "3f658c06a0a076ada919152a16148743", "grade": true, "grade_id": "cell-cca4e25452117a41", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": false} parameters = initialize_parameters_zeros([3, 2, 1]) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) initialize_parameters_zeros_test(initialize_parameters_zeros) # - # Run the following code to train your model on 15,000 iterations using zeros initialization. parameters = model(train_X, train_Y, initialization = "zeros") print ("On the train set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) # The performance is terrible, the cost doesn't decrease, and the algorithm performs no better than random guessing. Why? Take a look at the details of the predictions and the decision boundary: print ("predictions_train = " + str(predictions_train)) print ("predictions_test = " + str(predictions_test)) plt.title("Model with Zeros initialization") axes = plt.gca() axes.set_xlim([-1.5,1.5]) axes.set_ylim([-1.5,1.5]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) # __Note__: For sake of simplicity calculations below are done using only one example at a time. # # Since the weights and biases are zero, multiplying by the weights creates the zero vector which gives 0 when the activation function is ReLU. As `z = 0` # # $$a = ReLU(z) = max(0, z) = 0$$ # # At the classification layer, where the activation function is sigmoid you then get (for either input): # # $$\sigma(z) = \frac{1}{ 1 + e^{-(z)}} = \frac{1}{2} = y_{pred}$$ # # As for every example you are getting a 0.5 chance of it being true our cost function becomes helpless in adjusting the weights. # # Your loss function: # $$ \mathcal{L}(a, y) = - y \ln(y_{pred}) - (1-y) \ln(1-y_{pred})$$ # # For `y=1`, `y_pred=0.5` it becomes: # # $$ \mathcal{L}(0, 1) = - (1) \ln(\frac{1}{2}) = 0.6931471805599453$$ # # For `y=0`, `y_pred=0.5` it becomes: # # $$ \mathcal{L}(0, 0) = - (1) \ln(\frac{1}{2}) = 0.6931471805599453$$ # # As you can see with the prediction being 0.5 whether the actual (`y`) value is 1 or 0 you get the same loss value for both, so none of the weights get adjusted and you are stuck with the same old value of the weights. # # This is why you can see that the model is predicting 0 for every example! No wonder it's doing so badly. # # In general, initializing all the weights to zero results in the network failing to break symmetry. This means that every neuron in each layer will learn the same thing, so you might as well be training a neural network with $n^{[l]}=1$ for every layer. This way, the network is no more powerful than a linear classifier like logistic regression. # <font color='blue'> # # **What you should remember**: # - The weights $W^{[l]}$ should be initialized randomly to break symmetry. # - However, it's okay to initialize the biases $b^{[l]}$ to zeros. Symmetry is still broken so long as $W^{[l]}$ is initialized randomly. # # <a name='5'></a> # ## 5 - Random Initialization # # To break symmetry, initialize the weights randomly. Following random initialization, each neuron can then proceed to learn a different function of its inputs. In this exercise, you'll see what happens when the weights are initialized randomly, but to very large values. # # <a name='ex-2'></a> # ### Exercise 2 - initialize_parameters_random # # Implement the following function to initialize your weights to large random values (scaled by \*10) and your biases to zeros. Use `np.random.randn(..,..) * 10` for weights and `np.zeros((.., ..))` for biases. You're using a fixed `np.random.seed(..)` to make sure your "random" weights match ours, so don't worry if running your code several times always gives you the same initial values for the parameters. # + deletable=false nbgrader={"cell_type": "code", "checksum": "21b040c1991d62855342338b0213efaf", "grade": false, "grade_id": "cell-b111fbe746a03ac8", "locked": false, "schema_version": 3, "solution": true, "task": false} # GRADED FUNCTION: initialize_parameters_random def initialize_parameters_random(layers_dims): """ Arguments: layer_dims -- python array (list) containing the size of each layer. Returns: parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL": W1 -- weight matrix of shape (layers_dims[1], layers_dims[0]) b1 -- bias vector of shape (layers_dims[1], 1) ... WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1]) bL -- bias vector of shape (layers_dims[L], 1) """ np.random.seed(3) # This seed makes sure your "random" numbers will be the as ours parameters = {} L = len(layers_dims) # integer representing the number of layers for l in range(1, L): #(≈ 2 lines of code) # parameters['W' + str(l)] = # parameters['b' + str(l)] = # YOUR CODE STARTS HERE parameters['W' + str(l)] = np.random.randn(layers_dims[l],layers_dims[l-1]) * 10 parameters['b' + str(l)] = np.zeros((layers_dims[l],1)) # YOUR CODE ENDS HERE return parameters # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "d8c1b69e53ab520dc3ec267f9649452f", "grade": true, "grade_id": "cell-f5d0f829aa0eb6ff", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": false} parameters = initialize_parameters_random([3, 2, 1]) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) initialize_parameters_random_test(initialize_parameters_random) # - # Run the following code to train your model on 15,000 iterations using random initialization. parameters = model(train_X, train_Y, initialization = "random") print ("On the train set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) # If you see "inf" as the cost after the iteration 0, this is because of numerical roundoff. A more numerically sophisticated implementation would fix this, but for the purposes of this notebook, it isn't really worth worrying about. # # In any case, you've now broken the symmetry, and this gives noticeably better accuracy than before. The model is no longer outputting all 0s. Progress! print (predictions_train) print (predictions_test) plt.title("Model with large random initialization") axes = plt.gca() axes.set_xlim([-1.5,1.5]) axes.set_ylim([-1.5,1.5]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) # **Observations**: # - The cost starts very high. This is because with large random-valued weights, the last activation (sigmoid) outputs results that are very close to 0 or 1 for some examples, and when it gets that example wrong it incurs a very high loss for that example. Indeed, when $\log(a^{[3]}) = \log(0)$, the loss goes to infinity. # - Poor initialization can lead to vanishing/exploding gradients, which also slows down the optimization algorithm. # - If you train this network longer you will see better results, but initializing with overly large random numbers slows down the optimization. # # <font color='blue'> # # **In summary**: # - Initializing weights to very large random values doesn't work well. # - Initializing with small random values should do better. The important question is, how small should be these random values be? Let's find out up next! # # <font color='black'> # # **Optional Read:** # # # The main difference between Gaussian variable (`numpy.random.randn()`) and uniform random variable is the distribution of the generated random numbers: # # - numpy.random.rand() produces numbers in a [uniform distribution](https://raw.githubusercontent.com/jahnog/deeplearning-notes/master/Course2/images/rand.jpg). # - and numpy.random.randn() produces numbers in a [normal distribution](https://raw.githubusercontent.com/jahnog/deeplearning-notes/master/Course2/images/randn.jpg). # # When used for weight initialization, randn() helps most the weights to Avoid being close to the extremes, allocating most of them in the center of the range. # # An intuitive way to see it is, for example, if you take the [sigmoid() activation function](https://raw.githubusercontent.com/jahnog/deeplearning-notes/master/Course2/images/sigmoid.jpg). # # You’ll remember that the slope near 0 or near 1 is extremely small, so the weights near those extremes will converge much more slowly to the solution, and having most of them near the center will speed the convergence. # <a name='6'></a> # ## 6 - He Initialization # # Finally, try "He Initialization"; this is named for the first author of He et al., 2015. (If you have heard of "Xavier initialization", this is similar except Xavier initialization uses a scaling factor for the weights $W^{[l]}$ of `sqrt(1./layers_dims[l-1])` where He initialization would use `sqrt(2./layers_dims[l-1])`.) # # <a name='ex-3'></a> # ### Exercise 3 - initialize_parameters_he # # Implement the following function to initialize your parameters with He initialization. This function is similar to the previous `initialize_parameters_random(...)`. The only difference is that instead of multiplying `np.random.randn(..,..)` by 10, you will multiply it by $\sqrt{\frac{2}{\text{dimension of the previous layer}}}$, which is what He initialization recommends for layers with a ReLU activation. # + deletable=false nbgrader={"cell_type": "code", "checksum": "dc6e68563172d4db3892e0f99b19e75f", "grade": false, "grade_id": "cell-028d29f9550d2487", "locked": false, "schema_version": 3, "solution": true, "task": false} # GRADED FUNCTION: initialize_parameters_he def initialize_parameters_he(layers_dims): """ Arguments: layer_dims -- python array (list) containing the size of each layer. Returns: parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL": W1 -- weight matrix of shape (layers_dims[1], layers_dims[0]) b1 -- bias vector of shape (layers_dims[1], 1) ... WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1]) bL -- bias vector of shape (layers_dims[L], 1) """ np.random.seed(3) parameters = {} L = len(layers_dims) - 1 # integer representing the number of layers for l in range(1, L + 1): #(≈ 2 lines of code) # parameters['W' + str(l)] = # parameters['b' + str(l)] = # YOUR CODE STARTS HERE parameters['W' + str(l)] = np.random.randn(layers_dims[l],layers_dims[l-1]) * np.sqrt(2./layers_dims[l-1]) parameters['b' + str(l)] = np.zeros((layers_dims[l],1)) # YOUR CODE ENDS HERE return parameters # + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "95bcbb6d1a4775f98da73563c218d4bf", "grade": true, "grade_id": "cell-bcf384daddbdb4db", "locked": true, "points": 10, "schema_version": 3, "solution": false, "task": false} parameters = initialize_parameters_he([2, 4, 1]) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) initialize_parameters_he_test(initialize_parameters_he) # parameters # - # **Expected output** # # ``` # W1 = [[ 1.78862847 0.43650985] # [ 0.09649747 -1.8634927 ] # [-0.2773882 -0.35475898] # [-0.08274148 -0.62700068]] # b1 = [[0.] [0.] [0.] [0.]] # W2 = [[-0.03098412 -0.33744411 -0.92904268 0.62552248]] # b2 = [[0.]] # ``` # Run the following code to train your model on 15,000 iterations using He initialization. parameters = model(train_X, train_Y, initialization = "he") print ("On the train set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) plt.title("Model with He initialization") axes = plt.gca() axes.set_xlim([-1.5,1.5]) axes.set_ylim([-1.5,1.5]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) # **Observations**: # - The model with He initialization separates the blue and the red dots very well in a small number of iterations. # # <a name='7'></a> # ## 7 - Conclusions # You've tried three different types of initializations. For the same number of iterations and same hyperparameters, the comparison is: # # <table> # <tr> # <td> # <b>Model</b> # </td> # <td> # <b>Train accuracy</b> # </td> # <td> # <b>Problem/Comment</b> # </td> # </tr> # <td> # 3-layer NN with zeros initialization # </td> # <td> # 50% # </td> # <td> # fails to break symmetry # </td> # <tr> # <td> # 3-layer NN with large random initialization # </td> # <td> # 83% # </td> # <td> # too large weights # </td> # </tr> # <tr> # <td> # 3-layer NN with He initialization # </td> # <td> # 99% # </td> # <td> # recommended method # </td> # </tr> # </table> # **Congratulations**! You've completed this notebook on Initialization. # # Here's a quick recap of the main takeaways: # # <font color='blue'> # # - Different initializations lead to very different results # - Random initialization is used to break symmetry and make sure different hidden units can learn different things # - Resist initializing to values that are too large! # - He initialization works well for networks with ReLU activations
2 - Improving Deep Neural Networks Hyperparameter Tuning, Regularization and Optimization/Initialization.ipynb