code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # # Azure Machine Learning Pipeline with DataTranferStep # This notebook is used to demonstrate the use of DataTranferStep in Azure Machine Learning Pipeline. # # In certain cases, you will need to transfer data from one data location to another. For example, your data may be in an ADLS account or Azure SQL and you want to make it available in the Blob storage. The built-in **DataTransferStep** class helps you transfer data in these situations. # # The below example shows a two-step pipeline. # 1. The first DataTransferStep transfers data in ADLS to Blob storage, and # 2. The second DataTransferStep transfers data in SQL to Blob storage. # ## Azure Machine Learning and Pipeline SDK-specific imports # + import os import azureml.core from azureml.core import Workspace, Run, Experiment from azureml.core.compute import ComputeTarget, DataFactoryCompute from azureml.core.datastore import Datastore from azureml.data.data_reference import DataReference from azureml.data.sql_data_reference import SqlDataReference from azureml.data.stored_procedure_parameter import StoredProcedureParameter, StoredProcedureParameterType from azureml.exceptions import ComputeTargetException from azureml.pipeline.core import Pipeline from azureml.pipeline.steps import DataTransferStep # Check core SDK version number print("SDK version:", azureml.core.VERSION) # - # ## Initialize Workspace # # Initialize a workspace object from persisted configuration. Make sure the config file is present at .\config.json # # If you don't have a config.json file, please go through the configuration Notebook located here: # https://github.com/Azure/MachineLearningNotebooks. # # This sets you up with a working config file that has information on your workspace, subscription id, etc. # + tags=["create workspace"] ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') # - # ## Register Datastores # # In the code cell below, you will need to fill in the appropriate values for the workspace name, datastore name, subscription id, resource group, store name, tenant id, client id, and client secret that are associated with your ADLS datastore. # # For background on registering your data store, consult [this article](https://docs.microsoft.com/en-us/azure/data-lake-store/data-lake-store-service-to-service-authenticate-using-active-directory). # ### Register ADLS # + datastore_name='MyAdlsDatastore' subscription_id=os.getenv("ADL_SUBSCRIPTION_62", "<my-subscription-id>") # subscription id of ADLS account resource_group=os.getenv("ADL_RESOURCE_GROUP_62", "<my-resource-group>") # resource group of ADLS account store_name=os.getenv("ADL_STORENAME_62", "<my-datastore-name>") # ADLS account name tenant_id=os.getenv("ADL_TENANT_62", "<my-tenant-id>") # tenant id of service principal client_id=os.getenv("ADL_CLIENTID_62", "<my-client-id>") # client id of service principal client_secret=os.getenv("ADL_CLIENT_SECRET_62", "<my-client-secret>") # the secret of service principal try: adls_datastore = Datastore.get(ws, datastore_name) print("Found datastore with name: %s" % datastore_name) except: adls_datastore = Datastore.register_azure_data_lake( workspace=ws, datastore_name=datastore_name, subscription_id=subscription_id, # subscription id of ADLS account resource_group=resource_group, # resource group of ADLS account store_name=store_name, # ADLS account name tenant_id=tenant_id, # tenant id of service principal client_id=client_id, # client id of service principal client_secret=client_secret) # the secret of service principal print("Registered datastore with name: %s" % datastore_name) # - # ### Register Blob # + blob_datastore_name='MyBlobDatastore' account_name=os.getenv("BLOB_ACCOUNTNAME_62", "<my-account-name>") # Storage account name container_name=os.getenv("BLOB_CONTAINER_62", "<my-container-name>") # Name of Azure blob container account_key=os.getenv("BLOB_ACCOUNT_KEY_62", "<my-account-key>") # Storage account key try: blob_datastore = Datastore.get(ws, blob_datastore_name) print("Found blob datastore with name: %s" % blob_datastore_name) except: blob_datastore = Datastore.register_azure_blob_container( workspace=ws, datastore_name=blob_datastore_name, account_name=account_name, # Storage account name container_name=container_name, # Name of Azure blob container account_key=account_key) # Storage account key" print("Registered blob datastore with name: %s" % blob_datastore_name) # CLI: # az ml datastore register-blob -n <datastore-name> -a <account-name> -c <container-name> -k <account-key> [-t <sas-token>] # - # ### Register Azure SQL # + sql_datastore_name='MySqlDatastore' server_name=os.getenv("SQL_SERVER_NAME", "<my-sql-server-name>") # Name of SQL server database_name=os.getenv("SQL_DB_NAME", "<database_name>") # Name of SQL database client_id=os.getenv("SQL_CLIENT_ID", "<client_id>") # client id of service principal with permissions to access database client_secret=os.getenv("SQL_CLIENT_SECRET", "<client_secret>") # the secret of service principal tenant_id=os.getenv("SQL_TENET_ID", "<tenant_id>") # tenant id of service principal try: sql_datastore = Datastore.get(ws, sql_datastore_name) print("Found SQL datastore with name: %s" % sql_datastore_name) except: sql_datastore = Datastore.register_azure_sql_database( workspace=ws, datastore_name='MySqlDatastore', server_name=server_name, database_name=database_name, client_id=client_id, client_secret=client_secret, tenant_id=tenant_id) print("Registered SQL datastore with name: %s" % sql_datastore_name) # - # ## Create DataReferences # + adls_datastore = Datastore(workspace=ws, name="MyAdlsDatastore") # adls adls_data_ref = DataReference( datastore=adls_datastore, data_reference_name="adls_test_data", path_on_datastore="testdata") blob_datastore = Datastore(workspace=ws, name="MyBlobDatastore") # blob data blob_data_ref = DataReference( datastore=blob_datastore, data_reference_name="blob_test_data", path_on_datastore="testdata") sql_datastore = Datastore(workspace=workspace, name="MySqlDatastore") ## sql table sql_table_data_ref = SqlDataReference( datastore=sql_datastore, data_reference_name="sql_table_data_ref", sql_table="TestData") ## sql query sql_query_data_ref = SqlDataReference( datastore=sql_datastore, data_reference_name="sql_query_data_ref", sql_query="select top 1 * from TestData") ## stored procedure sql_proc_data_ref = SqlDataReference( datastore=sql_datastore, data_reference_name="sql_proc_data_ref", sql_stored_procedure="TestStoredProcedure") ## stored procedure with parameters sql_proc_params_data_ref = SqlDataReference( datastore=sql_datastore, data_reference_name="sql_proc_params_data_ref", sql_stored_procedure="TestStoredProcedureWithParameters", sql_stored_procedure_params=[StoredProcedureParameter("firstName", "Joe")]) print("Obtained adls, blob, and sql data references") # - # ## Setup Data Factory Account # + data_factory_name = 'adftest' def get_or_create_data_factory(workspace, factory_name): try: return DataFactoryCompute(workspace, factory_name) except ComputeTargetException as e: if 'ComputeTargetNotFound' in e.message: print('Data factory not found, creating...') provisioning_config = DataFactoryCompute.provisioning_configuration() data_factory = ComputeTarget.create(workspace, factory_name, provisioning_config) data_factory.wait_for_completion() return data_factory else: raise e data_factory_compute = get_or_create_data_factory(ws, data_factory_name) print("Setup data factory account complete") # CLI: # Create: az ml computetarget setup datafactory -n <name> # BYOC: az ml computetarget attach datafactory -n <name> -i <resource-id> # - # ## Create a DataTransferStep # **DataTransferStep** is used to transfer data between Azure Blob, Azure Data Lake Store, and Azure SQL database. # # - **name:** Name of module # - **source_data_reference:** Input connection that serves as source of data transfer operation. # - **destination_data_reference:** Input connection that serves as destination of data transfer operation. # - **compute_target:** Azure Data Factory to use for transferring data. # - **allow_reuse:** Whether the step should reuse results of previous DataTransferStep when run with same inputs. Set as False to force data to be transferred again. # # Optional arguments to explicitly specify whether a path corresponds to a file or a directory. These are useful when storage contains both file and directory with the same name or when creating a new destination path. # # - **source_reference_type:** An optional string specifying the type of source_data_reference. Possible values include: 'file', 'directory'. When not specified, we use the type of existing path or directory if it's a new path. # - **destination_reference_type:** An optional string specifying the type of destination_data_reference. Possible values include: 'file', 'directory'. When not specified, we use the type of existing path or directory if it's a new path. # ### Define a DataTransferStep to transfer data from ADLS to Blob # + transfer_adls_to_blob = DataTransferStep( name="transfer_adls_to_blob", source_data_reference=adls_data_ref, destination_data_reference=blob_data_ref, compute_target=data_factory_compute) print("ADLS to Blob data transfer step created") # - # ### Define a DataTransferStep to transfer data from SQL to Blob # + transfer_sql_to_blob = DataTransferStep( name="transfer_sql_to_blob", source_data_reference=sql_proc_params_data_ref, destination_data_reference=blob_output_data_ref, compute_target=data_factory_compute, destination_reference_type='file') print("SQL to Blob data transfer step created") # - # ## Build and Submit the Experiment # Build the experiment with both steps. # + pipeline = Pipeline( description="data_transfer_adls_blob_sql", workspace=ws, steps=[transfer_adls_to_blob, transfer_sql_to_blob]) pipeline_run = Experiment(ws, "Data_Transfer_example").submit(pipeline) pipeline_run.wait_for_completion() # - # ### View Run Details from azureml.widgets import RunDetails RunDetails(pipeline_run).show() # # Next: Databricks as a Compute Target # To use Databricks as a compute target from Azure Machine Learning Pipeline, a DatabricksStep is used. This [notebook](./aml-pipelines-use-databricks-as-compute-target.ipynb) demonstrates the use of a DatabricksStep in an Azure Machine Learning Pipeline.
aml-pipelines-data-transfer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests import pandas as pd import json from sqlalchemy.engine import create_engine from sqlalchemy import types import sqlalchemy from sqlalchemy import create_engine from sqlalchemy import inspect from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session import psycopg2 engine = create_engine("postgresql://postgres:postgres@localhost:5432/economy_db") inspector = inspect(engine) Base = automap_base() Base.prepare(engine, reflect=True) Base.classes.keys() Countries = Base.classes.countries Unemployment = Base.classes.unemployment Population = Base.classes.population Gdp = Base.classes.gdp Cpi = Base.classes.cpi Gdp2 = Base.classes.gdpp Inflation = Base.classes.inflations # Combination = Base.classes.combine inspector.get_table_names() session = Session(engine) # + jupyter={"outputs_hidden": true} session.query(Countries.country_name,Countries.color).all() # + jupyter={"outputs_hidden": true} session.query(Unemployment.country_name,Unemployment.ump_year,Unemployment.unemployment_pct).all() # + jupyter={"outputs_hidden": true} session.query(Population.country_name,Population.pop_year,Population.population).all() # + jupyter={"outputs_hidden": true} session.query(Gdp.country_name, Gdp.last_v, Gdp.gdp_usd).all() # + jupyter={"outputs_hidden": true} session.query(Cpi.country_name,Cpi.cpi_year,Cpi.cpi).all() # - >>> from sqlalchemy import Table, MetaData >>> from sqlalchemy.sql import text >>> from sqlalchemy_views import CreateView, DropView from sqlalchemy_views import sqlalchemy_views pip install sqlalchemy_views new_emp_df =pd.read_csv('static/beeswarm/unemp_EU4.csv') new_emp_df.head(2) results = session.query(Countries.country_name,Countries.country_code,Gdp.gdp_year,Gdp.country_code,Countries.color,Unemployment.unemployment_pct,Unemployment.ump_year,Gdp.gdp_usd)\ .filter(Unemployment.country_code == Countries.country_code)\ .filter(Unemployment.country_code == Gdp.country_code,Unemployment.ump_year == Gdp.gdp_year) for i,row in new_emp_df.iterrows(): engine.execute(f"""UPDATE countries SET color = '{row.color}' WHERE country_code = '{row.CountryCode}';""") columns = [result['name'] for result in results.column_descriptions] row2dict = lambda r: {c:val for c,val in zip(columns,r)} df_df = pd.DataFrame([row2dict(rez) for rez in results]) unemployment_df = df_df.pivot(index=['country_name','country_code','color'],columns= 'ump_year', values= 'unemployment_pct').rename(columns={col:f"unemployment_{col}" for col in df_df.ump_year.unique()}) gdp_df = df_df.pivot(index=['country_name','country_code','color'],columns= 'ump_year', values= 'gdp_usd').rename(columns={col:f"gdp_{col}" for col in df_df.ump_year.unique()}).applymap(float) combined_df = gdp_df.join(unemployment_df).reset_index().rename(columns={"country_name": "CountryName", "country_code": "CountryCode"}) # + jupyter={"outputs_hidden": true} combined_df.to_dict(orient="records") # - countries_df = df_df[["country_code","country_name","color"]].drop_duplicates() # + jupyter={"outputs_hidden": true} countries_df.set_index("country_code").to_dict(orient="index") # + engine = create_engine("postgresql://postgres:postgres@localhost:5432/economy_db") # - enginelite = create_engine("sqlite:///data/economy_db.sqlite") result2 =session.query(Inflation.country_name,Inflation.country_code,Inflation.the_year,Inflation.inflation,Inflation.population,Inflation.color).all() # + jupyter={"outputs_hidden": true} result2 # - # + jupyter={"outputs_hidden": true} result3 =session.query(Gdp2.name,Gdp2.country_code,Gdp2.year,Gdp2.value,Gdp2.lastvalue).all() result3 # -
Dashboard/road2flask2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="vfch6fQlnG05" # # Guide to module validators and fixers # + [markdown] id="RKnAlq0AnQCd" # Opacus strives to enable private training of pytorch models with minimal code changes on the user side. As you might have learnt by following the README and the introductory tutorials, Opacus does this by consuming your model, dataloader, and optimizer and returning wrapped counterparts that can perform privacy related functions. # # ## Why do I need a Module Validator? # While most of the common models work with Opacus, not all of them do. # 1. Right off the bat, all non-trainable modules (such as `nn.ReLU`, `nn.Tanh`, etc.) and frozen modules (with parameters whose `requires_grad` is set to `False`) are compatible. # 2. Furthermore, modules should also be able to capture per-sample gradients in order to work under DP setting. `GradSampleModule`'s and implementations offered by `opacus.layers` have this property. # 3. Some modules such as `BatchNorm` are not DP friendly as a sample's normalized value depends on other samples, and hence are incompatible with Opacus. # 4. Some other modules such as `InstanceNorm` are DP friendly, except under certain configurations (eg, when `track_running_stats` is On). # # It is unreasonable to expect you to remember all of this and take care of it. This is why Opacus provides a `ModuleValidator` to take care of this. # # + [markdown] id="DW4DumLD0DWW" # ## `ModuleValidator` internals # The `ModuleValidator` class has two primary class methods `validate()` and `fix()`. # # As the name suggests, `validate()` validates a given module's compatibility with Opacus by ensuring it is in training mode and is of type `GradSampleModule` (i.e, the module can capture per sample gradients). More importantly, this method also checks the sub-modules and their configurations for compatibility issues (more on this in the next section). # # The `fix()` method attempts to make the module compatible with Opacus. # # In Opacus 0.x, the specific checks for each of the supported modules and the necessary replacements were done centrally in the validator with a series of `if` checks. Adding new validation checks and fixes would have necessitated modifying the core Opacus code. In Opacus 1.0, this has been modularised by allowing you to register your own custom validator and fixer. # # In the rest of the tutorial, we will consider `nn.BatchNorm` as an example and show exactly how to do that. # + [markdown] id="RcrjckqXFNSG" # ### Registering validator # We know that `BatchNorm` module is not privacy friendly and hence the validator should throw an error, say like this # + id="7pRLzbW2YgtX" def validate_bathcnorm(module): return [Exception("BatchNorm is not supported")] # + [markdown] id="dHCBtWUYLdOz" # In order to register the above, all you need to do is decorate the above method as follows. # + id="Fgnf6V6Fm730" from opacus.validators import register_module_validator @register_module_validator( [nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm] ) def validate_bathcnorm(module): return [Exception("BatchNorm is not supported")] # + [markdown] id="mm6x75oIMcU8" # That's it! The above will register `validate_bathcnorm()` for all of these modules: `[nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm]`, and this method will be automatically called along with other validators when you do `privacy_engine.make_private()`. # # The decorator essentially adds your method to `ModuleValidator`'s register for it to be cycled through during the validation phase. # # Just one nit bit: it is recommended that you make your validation exceptions as clear as possible. Opacus's validation for the above looks as follows: # + id="l3wJw5GYOo-W" @register_module_validator( [nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm] ) def validate(module) -> None: return [ ShouldReplaceModuleError( "BatchNorm cannot support training with differential privacy. " "The reason for it is that BatchNorm makes each sample's normalized value " "depend on its peers in a batch, ie the same sample x will get normalized to " "a different value depending on who else is in its batch. " "Privacy-wise, this means that we would have to put a privacy mechanism there too. " "While it can in principle be done, there are now multiple normalization layers that " "do not have this issue: LayerNorm, InstanceNorm and their generalization GroupNorm " "are all privacy-safe since they don't have this property." "We offer utilities to automatically replace BatchNorms to GroupNorms and we will " "release pretrained models to help transition, such as GN-ResNet ie a ResNet using " "GroupNorm, pretrained on ImageNet" ) ]. # quite a mouthful, but is super clear! ;) # + [markdown] id="5HTyhM6WO3DY" # ### Registering fixer # # Validating is good, but can we fix the issue when possible? The answer, of course, is yes. And the syntax is pretty much the same as that of validator. # # `BatchNorm`, for example, can be replaced with `GroupNorm` without any meaningful loss of performance and still being privacy friendly. In Opacus, we do it as follows: # + id="7aXEZspDO0pD" def _batchnorm_to_groupnorm(module) -> nn.GroupNorm: """ Converts a BatchNorm ``module`` to GroupNorm module. This is a helper function. Args: module: BatchNorm module to be replaced Returns: GroupNorm module that can replace the BatchNorm module provided Notes: A default value of 32 is chosen for the number of groups based on the paper *Group Normalization* https://arxiv.org/abs/1803.08494 """ return nn.GroupNorm( min(32, module.num_features), module.num_features, affine=module.affine ) from opacus.validators.utils import register_module_fixer @register_module_fixer( [nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm] ) def fix(module) -> nn.GroupNorm: logger.info( "The default batch_norm fixer replaces BatchNorm with GroupNorm." " The batch_norm validator module also offers implementations to replace" " it with InstanceNorm or Identity. Please check them out and override the" " fixer if those are more suitable for your needs." ) return _batchnorm_to_groupnorm(module) # + [markdown] id="0-MDSXZDP6A-" # Opacus does NOT automatically fix the module for you when you call `privacy_engine.make_private()`; it expects the module to be compliant before it is passed in. However, this can be easily be done as follows: # + id="mA2kvLGSP5pf" import torch from opacus.validators import ModuleValidator model = torch.nn.Linear(2,1) if not ModuleValidator.is_valid(model): model = ModuleValidator.fix(model) # + [markdown] id="uQw1D8vbe0yz" # If you want to use a custom fixer in place of the one provided, you can simply decorate your function using this same decorator. Note that the order of registration matters and the last function to be registered will be the one used. # # Eg: to only replace `BatchNorm2d` with `InstanceNorm` (while using the default replacement for `BatchNorm1d` and `BatchNorm3d` with `GroupNorm`), you can do: # + id="AmaalNfzROWz" import torch.nn as nn from opacus.validators import register_module_fixer @register_module_validator([nn.BatchNorm2d]) def fix_batchnorm2d(module): return nn.InstanceNorm2d(module.num_features) # + [markdown] id="BPSNEozakx-i" # Hope this tutorial was helpful! We welcome you to peek into the code under `opacus/validators/` for details. If you have any questions or comments, please don't hesitate to post them on our [forum](https://discuss.pytorch.org/c/opacus/29).
tutorials/guide_to_module_validator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Think Bayes # # This notebook presents example code and exercise solutions for Think Bayes. # # Copyright 2018 <NAME> # # MIT License: https://opensource.org/licenses/MIT # + # Configure Jupyter so figures appear in the notebook # %matplotlib inline # Configure Jupyter to display the assigned value after an assignment # %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' import pandas as pd import numpy as np # + from utils import read_gss gss = read_gss('data/gss_bayes') gss.head() # + def replace_invalid(series, bad_vals, replacement=np.nan): series.replace(bad_vals, replacement, inplace=True) replace_invalid(gss.feminist, [0, 8, 9]) replace_invalid(gss.polviews, [0, 8, 9]) replace_invalid(gss.partyid, [8, 9]) replace_invalid(gss.indus10, [0]) replace_invalid(gss.occ10, [0]) # - def values(series): return series.value_counts().sort_index() # https://gssdataexplorer.norc.org/projects/52787/variables/1698/vshow values(gss.feminist) # https://gssdataexplorer.norc.org/projects/52787/variables/178/vshow values(gss.polviews) # https://gssdataexplorer.norc.org/projects/52787/variables/141/vshow values(gss.partyid) # https://gssdataexplorer.norc.org/projects/52787/variables/82/vshow values(gss.race) # https://gssdataexplorer.norc.org/projects/52787/variables/81/vshow values(gss.sex) # https://gssdataexplorer.norc.org/projects/52787/variables/17/vshow # # 6870 Banking and related activities values(gss.indus10).head() np.mean(gss.indus10 == 6870) (gss.indus10 == 6870).mean() subset = gss.dropna(subset=['sex', 'polviews', 'partyid', 'indus10']) subset.shape # globals().update(subset) # female = sex == 2 values(female) liberal = polviews <= 2 values(liberal) democrat = partyid <= 1 values(democrat) banker = indus10 == 6870 values(banker) # + total = 0 for x in banker: if x is True: total += 1 total # - total / len(banker) def prob(A): """Probability of A""" return A.mean() def count(A): """Number of instances of A""" return A.sum() prob(female) prob(liberal) prob(democrat) prob(banker) prob(democrat & liberal) count(banker[female]) prob(banker[female]) prob(female & banker) prob(banker & female) / prob(female) def conditional(A, B): """Conditional probability of A given B""" return prob(A[B]) conditional(banker, female) conditional(liberal, democrat) conditional(democrat, liberal) conditional(democrat, female) def conjunction(A, B): """Probability of both A and B""" return prob(A) * conditional(B, A) prob(liberal & democrat) conjunction(liberal, democrat) prob(liberal) * prob(democrat) conjunction(democrat, liberal) prob(banker) * conditional(female, banker) / prob(female) def bayes_theorem(A, B): """Conditional probability of A given B, using Bayes's theorem""" return prob(A) * conditional(B, A) / prob(B) bayes_theorem(democrat, liberal) conditional(banker, female) conditional(banker, female & liberal) conditional(banker & democrat, female & liberal)
code/chap01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 問題1 数学(1) # ### (1)$xyz$直交座標系において円柱面$x^2+y^2=1$,$xy$平面,平面$z=x$により囲まれた部分の体積を求めなさい. # # - 方針 四角形を積み重ねた形に見立てて積分する。 # - もしくは三角形を連ねた形に見立てて積分する。(1-x^2)みたいな形が出てきたはず # # ### (2)関数 $f(x,y)$ は, $x$ および $y$ について偏微分可能で $x\tfrac{\partial f}{\partial x}+y\tfrac{\partial f}{\partial y}=0$ なる関係を満足する. 関数 $f(x,y)$ を $x=r\cos \theta$,$y=r\sin \theta (r>0,0\leq \theta < 2\pi)$ で変数変換したときの $f(r\cos \theta,r\sin \theta)$ は, 変数 $r$ を含まない関数となることを証明しなさい. # # 連鎖律より # $$ # \begin{eqnarray} # \tfrac{\partial f}{\partial r} &=& \tfrac{\partial f}{\partial x} \tfrac{\partial x}{\partial r} + \tfrac{\partial f}{\partial y} \tfrac{\partial y}{\partial r} \\ &=& \tfrac{\partial f}{\partial x} \cos \theta + \tfrac{\partial f}{\partial y} \sin \theta \\ &=& \frac{1}{r} \{ r \cos \theta \tfrac{\partial f}{\partial x} + r \sin \theta \tfrac{\partial f}{\partial y} \} \cdots (*) # \end{eqnarray} # $$ # $x = r \cos x , y= r \sin x$より # $$ # \begin{eqnarray} # (*) &=& \frac{1}{r} \{ x\tfrac{\partial f}{\partial x}+y\tfrac{\partial f}{\partial y} \} = 0 \\ # \therefore \tfrac{\partial f}{\partial r} &=& 0 # \end{eqnarray} # $$ # これは$r$が定数であることを示している。 # したがって変数変換後の$f(r\cos \theta,r\sin \theta)$は$r$を含まない。 # # 問題2 数学(2) # ## 二つのメーカー $X$ および $Y$ からなる市場において, 各メーカーのユーザー数を調査したい. 毎年メーカー $X$ のユーザーのうち $\frac{1}{10}$ がメーカー $Y$ のユーザーとなり, 一方で, メーカー $Y$ のユーザーのうち $\frac{1}{5}$ がメーカー $X$ のユーザーとなる. それ以外は同じメーカーのユーザーのままでいるものとし, ユーザーの総数は変化しない. このとき以下の問いに答えなさい. # ### (1) ある年におけるメーカー $X, Y$ のユーザー数をそれぞれ $x_n, y_n$ で表す. この時翌年におけるそれぞれのメーカーのユーザー数 $x_{n+1}, y_{n+1}$ を二次正方行列 $\mathbf{A}$ を使って以下の形で表す. 行列 $\mathbf{A}$ を具体的に示しなさい. # $$ # \begin{pmatrix} x_{n+1} \\ y_{n+1} \end{pmatrix} = \mathbf{A}\begin{pmatrix} x_{n} \\ y_{n} \end{pmatrix} # $$ # - 方針 連立方程式を立てて行列の積に置き換える # ### (2) $\mathbf{A}$ の固有値および固有ベクトルを求めなさい. # - 方針 特になし # ### (3) $\mathbf{P^{-1}AP}$ が対角行列となるような行列 $\mathbf{P}$ を一つ求めるとともに, $P$ の逆行列を求めなさい. # - 方針 特になし # ### (4) 行列 $A^n$ を求めなさい. # - 方針 ここまでの誘導を利用する. # (${P^{-1}AP}^n$はすぐに求まるはずだからそれを利用する) # ### (4)の結果を使って, $n\rightarrow \infty$ としたときのメーカー $X$ および $Y$ のユーザー数の比率を求めなさい. # - 方針 誘導の通りにnの極限をとると...
T-30joho.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.display import Image Image('mat04.png') import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np mpl.rcParams['font.family'] = 'serif' # - Here is the reference link to learn more: # https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html from IPython.display import Image Image('12_03_legend.png') # + fig, axes = plt.subplots(1, 1, figsize=(5, 5)) x = np.linspace(-1, 1, 100) y = x**2 plt.plot(x, y, 'r',label="y(x) = x**2") plt.axvline(x=0, color='k') plt.axhline(y=0, color='k') plt.legend(loc=1,fontsize=12) plt.title("legend(loc=%d) = 1") fig.tight_layout() #fig.savefig("matplotlib_legend.pdf", dpi=300) # + fig, axes = plt.subplots(2, 4, figsize=(10, 4)) x = np.linspace(-1, 1, 100) y = x**2 k = 0 for i in range(2): for j in range(4): axes[i,j].plot(x, y, 'r',label="y(x) = x**2") axes[i,j].legend(loc=k+1,fontsize=8) axes[i,j].axvline(x=0, color='k') axes[i,j].axhline(y=0, color='k') axes[i,j].set_title("legend(loc=%d)" % (k+1)) k+=1 fig.tight_layout() #fig.savefig("matplotlib_legend.pdf", dpi=300) # + fig, axes = plt.subplots(2, 4, figsize=(10, 4)) x = np.linspace(-1, 1, 100) y = x**2 k = np.array([[9,8,7,6],[5,4,3,2]]) for i in range(2): for j in range(4): axes[i,j].plot(x, y, 'r',label="y(x) = x**2") axes[i,j].legend(loc=k[i,j],fontsize=8) axes[i,j].axvline(x=0, color='k') axes[i,j].axhline(y=0, color='k') axes[i,j].set_title("legend(loc=%d)" % (k[i,j])) fig.tight_layout() #fig.savefig("matplotlib_legend.pdf", dpi=300) # + fig, ax = plt.subplots(1, 1, figsize=(10, 4)) x = np.linspace(-1, 1, 100) y = x**2 for n in range(3, 11): ax.plot(x, n * y, label="y(x) = %d*x^2" % n) ax.legend(ncol=4, loc=3, bbox_to_anchor=(0, 1), fontsize=12) fig.subplots_adjust(top=.75); # - # ## All done !!! # - Please feel free to let me know if there is any questions # - Please subscribe my youtube channel too # - Thank you very much
notebook/12.5 Matplotlib04 Legend.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Masking grid cells # # This tutorial will demonstrate the following: # # 1. Basics of grid masking # 1. Reading boundary, river, and island data from shapefiles # 1. Generating a focused grid # 1. Masking land cells from the shapefiles # 1. Writing grid data to shapefiles # + # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import pandas import seaborn clear_bkgd = {'axes.facecolor':'none', 'figure.facecolor':'none'} palette = seaborn.color_palette(palette='deep') seaborn.set(style='ticks', context='notebook', rc=clear_bkgd) import pygridgen as pgg import pygridtools as pgt def show_the_grid(g, colors=None): fig, (ax1, ax2) = plt.subplots(figsize=(12, 7.5), ncols=2, sharex=True, sharey=True) _ = g.plot_cells(ax=ax1, cell_kws=dict(cmap='Blues', colors=colors)) _ = g.plot_cells( ax=ax2, cell_kws=dict(cmap='Blues'), domain_kws=dict(domain_x='x', domain_y='y', beta='beta'), extent_kws=dict(extent_x='x', extent_y='y'), island_kws=dict(islands_x='x', islands_y='y', islands_name='name') ) _ = ax1.set_title('just the grid') _ = ax2.set_title('the grid + all the fixins') return fig def make_fake_bathy(grid): j_cells, i_cells = grid.cell_shape y, x = np.mgrid[:j_cells, :i_cells] z = (y - (j_cells // 2))** 2 - x return z # - # ## Masking basics # Let's consider a simple, orthogonal $5\times5$ unit grid and a basic rectangle that we will use to mask some elements of the grid: # + y, x = np.mgrid[:5, 1:6] mg = pgt.ModelGrid(x, y) mask_coords = [ (0.50, 3.25), (1.50, 3.25), (1.50, 2.75), (3.25, 2.75), (2.25, 0.75), (0.50, 0.75), ] fig, ax = plt.subplots() fig, cells = mg.plot_cells(ax=ax) mask_patch = plt.Polygon(mask_coords, color='forestgreen', alpha=0.5) ax.add_patch(mask_patch) # - # ### Applying the masks options # # You have few options when applying a mask to a grid # # 1. `inside=True` - by default, elements inside the polygon are masked. Setting this parameter to `False` will mask everything outside the polygon. # 1. `min_nodes=3` - This parameter configures how manx nodes of a cell must be inside a polygon to flag the whole cell as inside thet polygon. # 1. `use_existing=True` - When this is `True` the new mask determined from the passed polygons will be unioned (`np.bitwise_or`) with anx existing mask that may be present. When this is `False` the old mask is completely overwritten with the new mask. # ### Masking inside vs outside a polygon # + fig, (ax1, ax2) = plt.subplots(figsize=(6, 3), ncols=2, sharex=True, sharey=True) common_opts = dict(use_existing=False) # mask inside _ = ( mg.mask_centroids(mask_coords, inside=True, **common_opts) .plot_cells(ax=ax1) ) ax1.add_patch(plt.Polygon(mask_coords, color='forestgreen', alpha=0.5)) ax1.set_title('Mask inside') # mask outside _ = ( mg.mask_centroids(mask_coords, inside=False, **common_opts) .plot_cells(ax=ax2) ) ax2.add_patch(plt.Polygon(mask_coords, color='cornflowerblue', alpha=0.5)) _ = ax2.set_title("Mask outside") # - # ### Masking with nodes instead of centroids # This time, we'll mask with the nodes of the cells instead of the centroids. We'll show four different masks, each generated with a different minimum number of nodes requires to classify a cell as inside the polygon. # + fig, axes = plt.subplots(figsize=(13, 3),ncols=4, sharex=True, sharey=True) common_opts = dict(use_existing=False, inside=True) for ax, min_nodes in zip(axes.flat, [4, 3, 2, 1]): # mask inside _ = ( mg.mask_nodes(mask_coords, min_nodes=min_nodes, **common_opts) .plot_cells(ax=ax) ) ax.add_patch(plt.Polygon(mask_coords, color='forestgreen', alpha=0.5)) ax.set_title("min_nodes = {:d}".format(min_nodes)) # - # ## Loading data from shapefiles via `pygridtools.iotools` # # + boundaryfile = "masking_data/input/GridBoundary.shp" gridbounds = pgt.iotools.read_boundary( boundaryfile, sortcol='sort_order', upperleftcol='upper_left' ) riverfile = "masking_data/input/River.shp" river = pgt.iotools.read_polygons(riverfile) islandfile = "masking_data/input/Islands.shp" island_arrays = pgt.iotools.read_polygons(islandfile) islands = pandas.concat([ pandas.DataFrame({'x': arr[:, 0], 'y': arr[:, 1], 'name': n}) for n, arr in enumerate(island_arrays) ]) fig, ax = plt.subplots(figsize=(7.5, 7.5), subplot_kw={'aspect': 'equal'}) fig = pgt.viz.plot_domain(data=gridbounds, domain_x='x', domain_y='y', ax=ax) fig = pgt.viz.plot_boundaries(ax=ax, engine='mpl', extent_x=river[:, 0], extent_y=river[:, 1], islands=islands, islands_x='x', islands_y='y', islands_name='name') # - # ## Creating a `Gridgen` objects # + # number of nodes in each dimension i_nodes = 100 j_nodes = 20 # grid focus focus = pgg.Focus() # tighten the grid in the channels around the big island focus.add_focus(5. / j_nodes, 'y', 4., extent=8./j_nodes) focus.add_focus(14.5 / j_nodes, 'y', 4., extent=4./j_nodes) # coarsen the grid upstream focus.add_focus(98. / i_nodes, 'x', 0.25, extent=4./i_nodes) # tighten the grid around the big island's bend focus.add_focus(52. / i_nodes, 'x', 4., extent=20./i_nodes) # generate the main grid grid = pgt.make_grid( domain=gridbounds, ny=j_nodes, nx=i_nodes, ul_idx=17, focus=focus, rawgrid=False ) # - # ### Show the raw (unmasked) grid # + grid.domain = gridbounds grid.extent = pandas.DataFrame(river, columns=['x', 'y', 'z']) grid.islands = islands fig = show_the_grid(grid) # - # ### Mask out everything beyond the river banks # + masked_river = grid.mask_centroids(river[:, :2], inside=False) fig = show_the_grid(masked_river) # - # ### Loop through and mask out the islands # + # inside the multiple islands masked_river_islands = masked_river.copy() for island in island_arrays: masked_river_islands = masked_river_islands.mask_centroids(island[:, :2], inside=True) fig = show_the_grid(masked_river_islands) # - # ## Plotting with e.g., bathymetry data # # The key here is that you need an array that is the same shape as the centroids of your grid # + fake_bathy = make_fake_bathy(masked_river_islands) fig = show_the_grid(masked_river_islands, colors=fake_bathy) # - # ## Exporting the masked cells to a shapefile masked_river_islands.to_gis('masking_data/output/ModelCells.shp', usemask=True, which='grid', geom='Polygon') # ## View the final input and output in the QGIS file in `examples/masking_data/Grid.qgs`
docs/tutorial/02_ShapefilesAndCellMasks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a id='top'></a> # + [markdown] hideCode=false hidePrompt=false # # Db2 Regular Expressions # Updated: 2019-10-03 # + [markdown] hideCode=false hidePrompt=false # Db2 11.1 introduced extended support for regular # expressions. Regular expressions allow you to do very complex # pattern matching in character strings. Normal SQL LIKE # searches are limited to very specific patterns, but Regular # expression have a rich syntax that gives you much more # flexibility in searching. # # Set up the connection to the database and Db2 command extensions. # + hideCode=false hideOutput=false hidePrompt=false # %run db2.ipynb # %run connection.ipynb # - # # Table of Contents # # * [Sample Table Setup](#sample) # * [Regular Expression Commands](#regular) # * [Regular Expression Flags](#flags) # * [Regular Expression Search Patterns](#patterns) # * [Anchoring Patterns in a Search](#anchor) # * [Matching Patterns Across Lines](#multiple) # * [Logical OR Operator](#or) # * [Combining Patterns](#combine) # * [Matching Character Types](#chars) # * [Special Patterns](#special) # * [Negating Patterns](#negate) # * [Capturing Parenthesis](#capture) # * [Performance Considerations](#performance) # [Back to Top](#top) # <a id='sample'></a> # + [markdown] hideCode=false hidePrompt=false # ## Sample Table Setup # # The following SQL will create a new table in the SAMPLE # database (or whatever database you are currently connected # to). This table represents all of the stations of the London # Underground Central line (existing stations only, not # historical ones!). This table will be used for all of the # examples within this section. # + hideCode=false hidePrompt=false magic_args="-q" language="sql" # DROP TABLE CENTRAL_LINE; # # CREATE TABLE CENTRAL_LINE # ( # STATION_NO INTEGER GENERATED ALWAYS AS IDENTITY, # STATION VARCHAR(31), # UPPER_STATION VARCHAR(31) GENERATED ALWAYS AS (UCASE(STATION)) # ) # ; # # INSERT INTO CENTRAL_LINE(STATION) # VALUES 'West Ruislip','Ruislip Gardens','South Ruislip','Northolt','Greenford', # 'Perivale','Hanger Lane','Ealing Broadway','West Acton','North Acton', # 'East Acton','White City','Shepherd''s Bush','Holland Park','Notting Hill Gate', # 'Queensway','Lancaster Gate','Marble Arch','Bond Street','Oxford Circus', # 'Tottenham Court Road','Holborn','Chancery Lane','St. Paul''s','Bank', # 'Liverpool Street','Bethnal Green','Mile End','Stratford','Leyton', # 'Leytonstone','Wanstead','Redbridge','Gants Hill','Newbury Park', # 'Barkingside','Fairlop','Hainault','Grange Hill','Chigwell', # 'Roding Valley','Snaresbrook','South Woodford','Woodford','Buckhurst Hill', # 'Loughton','Debden','Theydon Bois','Epping' # ; # - # [Back to Top](#top) # <a id='regular'></a> # + [markdown] hideCode=false hidePrompt=false # # Regular Expression Commands # # # There are six regular expression functions within DB2 # including: # # - **REGEXP_COUNT** - Returns a count of the number of times that a regular expression pattern is matched in a string. # - **REGEXP_EXTRACT** - Returns one occurrence of a substring of a string that matches the regular expression pattern. # - **REGEXP_INSTR** - Returns the starting or ending position of the matched substring, depending on the value of the return option argument. # - **REGEXP_LIKE** - Returns a Boolean value indicating if the regular expression pattern is found in a string. The function can be used only where a predicate is supported. # - **REGEXP_MATCH_COUNT** - Returns a count of the number of times that a regular expression pattern is matched in a string. # - **REGEXP_REPLACE** - Returns a modified version of the source string where occurrences of the regular expression pattern found in the source string are replaced with the specified replacement string. # - **REGEXP_SUBSTR** - Returns one occurrence of a substring of a string that matches the regular expression pattern. # # Each one of these functions follows a similar calling # sequence: # <pre> # REGEXP_FUNCTION(source, pattern, flags, start_pos, codeunits) # </pre> # # The arguments to the function are: # # - **Source** - string to be searched # - **Pattern** - the regular expression that contains what we are searching for # - **Flag** - settings that control how matching is done # - **Start_pos** - where to start in the string # - **Codeunits** - which type unit of measurement start_pos refers to (for Unicode) # # The source can be any valid Db2 string including CHAR, # VARCHAR, CLOB, etc. Start_pos is the location in the source # string that you want to start searching from, and codeunits # tells Db2 whether to treat the start_pos as an absolute # location (think byte location) or a character location which # takes into account the unicode size of the character string. # # Codeunits can be specified as CODEUNITS16, CODEUNITS32, or # OCTETS. CODEUNITS16 specifies that start is expressed in # 16-bit UTF-16 code units. CODEUNITS32 specifies that start # is expressed in 32-bit UTF-32 code units. OCTETS specifies # that start is expressed in bytes. # # Pattern and flag values are complex and so are discussed in # the following sections. # - # [Back to Top](#top) # <a id='flags'></a> # + [markdown] hideCode=false hidePrompt=false # ## Regular Express Flag Values # # Regular expression functions have a flag specification that # can be used to change the behavior of the search. There are # six possible flags that can be specified as part of the # REGEXP command: # # |Flag |Purpose # |:----:|:-------------------- # |**c** | Specifies that matching is case-sensitive (the default value) # |**i** | Specifies that matching is case insensitive # |**m** | Specifies that the input data can contain more than one line. By default, the '^' in a pattern matches only the start of the input string; the '\$' in a pattern matches only the end of the input string. If this flag is set, "^" and "\$" also matches at the start and end of each line within the input string. # |**n** | Specifies that the '.' character in a pattern matches a line terminator in the input string. By default, the '.' character in a pattern does not match a line terminator. A carriage-return and line-feed pair in the input string behaves as a single-line terminator, and matches a single "." in a pattern. # |**s** | Specifies that the '.' character in a pattern matches a line terminator in the input string. This value is a synonym for the 'n' value. # |**x** | Specifies that white space characters in a pattern are ignored, unless escaped. # - # [Back to Top](#top) # <a id='patterns'></a> # + [markdown] hideCode=false hidePrompt=false # ## Regular Expression Search Patterns # # Regular expressions use certain characters to represent what # is matched in a string. The simplest pattern is a string by # itself. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'Ruislip') # + [markdown] hideCode=false hidePrompt=false # The pattern 'Ruislip' will look for a match of Ruislip # within the STATION column. Note that this pattern will also # match 'West Ruislip' or 'Ruislip Gardens' since we placed no # restriction on where the pattern can be found in the string. # The match will also be exact (case matters). This type of # search would be equivalent to using the SQL LIKE statement: # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE STATION LIKE '%Ruislip%' # + [markdown] hideCode=false hidePrompt=false # If you didn't place the `%` at the beginning of the LIKE # string, only the stations that start with Ruislip would be # found. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE STATION LIKE 'Ruislip%' # + [markdown] hideCode=false hidePrompt=false # If you want to match Ruislip with upper or lower case being # ignored, you would add the `'i'` flag as part of the # REGEXP_LIKE (or any REGEXP function). # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'RUISLIP','i') # - # [Back to Top](#top) # <a id='anchor'></a> # + [markdown] hideCode=false hidePrompt=false # ## Anchoring Patterns in a Search # # By default a pattern will be matched anywhere in a string. # Our previous example showed how Ruislip could be found # anywhere in a string. To force a match to start at the # beginning of a string, the carat symbol `^` can be used to # force the match to occur at the beginning of a string. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'^Ruislip') # + [markdown] hideCode=false hidePrompt=false # To match a pattern at the end of the string, the dollar sign # `$` can be used. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'Ruislip$') # + [markdown] hideCode=false hidePrompt=false # To force an exact match with a string you would use both the # beginning and end anchors. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'^Leyton$') # + [markdown] hideCode=false hidePrompt=false # Note that if we didn't use the end anchor, we are going to # get more than one result. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'^Leyton'); # - # [Back to Top](#top) # <a id='multiple'></a> # + [markdown] hideCode=false hidePrompt=false # ## Matching patterns across multiple lines # # So far the examples have dealt with strings that do not # contain newline characters (or carriage feeds). In some # applications, data from an input panel may include multiple # lines which may contain hard line feeds. What this means is # that there are actually multiple lines in the data, but from # a database perspective, there is only one line in the # VARCHAR field. You can modify the behavior of the Regular # Expression search by instructing it to honor the CRLF # characters as line delimiters. # # The following SQL will insert a single line with multiple # CRLF characters in it to simulate a multi-line text string. # + hideCode=false hidePrompt=false magic_args="-q" language="sql" # DROP TABLE LONGLINE; # # CREATE TABLE LONGLINE (NAME VARCHAR(255)); # # INSERT INTO LONGLINE # VALUES 'George' || CHR(10) || 'Katrina'; # + [markdown] hideCode=false hidePrompt=false # Searching for Katrina at the beginning and end of string # doesn't work. # + hideCode=false hidePrompt=false language="sql" # SELECT COUNT(*) FROM LONGLINE # WHERE REGEXP_LIKE(NAME,'^Katrina$') # + [markdown] hideCode=false hidePrompt=false # We can override the regular expression search by telling it # to treat each NL/CRLF as the end of a string within a # string. # + hideCode=false hidePrompt=false language="sql" # SELECT COUNT(*) FROM LONGLINE # WHERE REGEXP_LIKE(NAME,'^Katrina$','m') # - # [Back to Top](#top) # <a id='or'></a> # + [markdown] hideCode=false hidePrompt=false # ## Logical OR Operator # # Regular expressions can match more than one pattern. The OR # operator `(|)` is used to define alternative patterns that can # match in a string. The following example searches for # stations that have "ing" in their name as well as "hill". # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'way|ing') # + [markdown] hideCode=false hidePrompt=false # Some things to be aware of when creating the search pattern. # Spaces in the patterns themselves are significant. If the # previous search pattern had a space in one of the words, it # would not find it (unless of course there was a space in the # station name). # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'way| ing') # + [markdown] hideCode=false hidePrompt=false # Using the `"x"` flag will ignore blanks in your pattern, so # this would fix issues that we have in the previous example. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'way| ing','x') # + [markdown] hideCode=false hidePrompt=false # Brackets can be used to make it clear what the pattern is # that you are searching for and avoid the problem of having # blanks in the expression. Brackets do have a specific usage # in regular expressions, but here we are using it only to # separate the two search strings. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'(way)|(ing)') # - # [Back to Top](#top) # <a id='combine'></a> # + [markdown] hideCode=false hidePrompt=false # ## Combining Patterns # # As we found out in the previous section, there is an OR # operator that you can use to select between two patterns. # How do you request that multiple patterns be present? First # we must understand how matching occurs when we have multiple # strings that need to be matched that have an unknown number # of characters between them. # # For instance, how do we create a pattern that looks for # "ing" followed by "way" somewhere in the string? Regular # expression recognize the `"."` (period) character as matching # anything. Following the pattern you can add a modifier that # specifies how many times you want the pattern matched: # # - **\*** - Match zero or more times # - **?** - Match zero or one times # - **+** - Match one or more times # - **{m}** - Match exactly m times # - **{m,}** - Match as least a minimum of m times # - **{m,n}** - Match at least a minimum of m times and no more than n times # # The following regular expression searches for a pattern with # "ing" followed by any characters and then "way". # # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'(ing)*.(way)') # + [markdown] hideCode=false hidePrompt=false # The previous answer gave you two results (Ealing Broadway # and Queensway). Why two? The reason is that we used the `*` in # the wrong place (a single character in a wrong place can # result in very different results!). What we really needed to # do was place a `.*` after the (ing) to match "ing" and then # any characters, before matching "way". What our query did # above was match 0 or more occurences of "ing", followed by # any character, and then match "way". Here is the correct # query. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'(ing).*(way)') # + [markdown] hideCode=false hidePrompt=false # Finding at least one occurrence of a pattern requires the # use of the `+` operator, or the bracket operators. This # example locates at least one occurrence of the "an" string # in station names. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'(an)+') # + [markdown] hideCode=false hidePrompt=false # If we want to find an exact number of occurrences, we need # to use the {} notation to tell the regular expression # matcher how many we want to find. The syntax of the `{}` # match is: # # - **{m}** - Match exactly m times # - **{m,}** - Match as least a minimum of m times # - **{m,n}** - Match at least a minimum of m times and no more than n times # # So the `"+"` symbol is equivalent to the following regular # expression using the `{}` syntax. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'(an){1,}') # + [markdown] hideCode=false hidePrompt=false # If we want to match exactly 2 'an' patterns in a string, we # would think that changing the expression to `{2}` would work. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'(an){2}') # + [markdown] hideCode=false hidePrompt=false # Sadly, we get no results! This would appear to be the wrong # result, but it's because we got lucky with our first search! # The best way to figure out what we matched in the original # query is to use the `REGEXP_EXTRACT` and `REGEXP_INSTR` # functions. # # - **REGEXP_EXTRACT** - Returns one occurrence of a substring of a string that matches the regular expression pattern. # - **REGEXP_INSTR** - Returns the starting or ending position of the matched substring, depending on the value of the return option argument. # # The following SQL gives us a clue to what was found with the # `(an)` pattern. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION, # REGEXP_INSTR(STATION,'(an)') AS LOCATION, # REGEXP_EXTRACT(STATION,'(an)') AS EXTRACT # FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'(an)') # + [markdown] hideCode=false hidePrompt=false # What you should see in the previous result is the location # where the `"an"` pattern was found in the string. Note that # all we matched was the `"an"` pattern, nothing else. So why # can't I find two `"an"` patterns in the string? The reason is # that `(an){2}` means "an" followed by another `"an"`! We didn't # tell the pattern to match anything else! What we need to do # is modify the pattern to say that it can match `"an"` followed # by anything else. The pattern needs to be modifed to `(an).*` # where the `".*"` means any character following the "an". # # In order to tell the regular expression function to use this # entire pattern `(an).\*` twice, we need to place brackets # around it as well. The final pattern is `((an).\*){2}`. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'((an).*){2}') # + [markdown] hideCode=false hidePrompt=false # You should find that two stations match the pattern. The # following SQL shows which pattern is matched first in the # STATIONS names. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION, # REGEXP_INSTR(STATION,'((an).*){2}') AS LOCATION, # REGEXP_EXTRACT(STATION,'((an).*){2}') AS EXTRACT # FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'((an).*){2}') # - # [Back to Top](#top) # <a id='chars'></a> # + [markdown] hideCode=false hidePrompt=false # ## Matching character types # # Aside from matching entire strings, you can also use regular # expression to look for patterns of characters. The simplest # matching pattern is the period `(.)` which matches any # character. Matching a string of arbitrary length is the # pattern `".*"`. The `"+"` and `"?"` characters can also be used to # modify how many characters you want matched. # # What about situations where you want to check for certain # patterns or characters in a string? A good example would be # a social security number, or credit card number. There are # certain patterns that you would find for these objects. # Assume we have a social security number in the format # xxx-xx-xxxx. It is possible to create a regular expression # that would return true if the SSN matched the pattern above # (it doesn't tell you if the SSN itself is valid, only that # it has the proper format). # # Regular expressions allow you to create a list of characters # that need to be matched in something called a bracket # expression. A bracket expression has the format: # # <pre> # [a-z] [A-Z] [0-9] [a-zA-z] # </pre> # # The examples above represent the following search patterns: # # - **[a-z]** - match any series of lowercase characters between a and z # - **[A-Z]** - match any series of uppercase characters between A and Z # - **[0-9]** - match any valid digits # - **[a-zA-Z]** - match any lower- or uppercase letters # # You can also enumerate all of the characters you want to # match by listing them between the brackets like # `[abcdefghikjlmnopqrstuvwxyz]`. The short form `a-z` is easier # to read and less prone to typing errors! # # The following example checks for station names that start # with the letter `P-R`. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'^[P-R]') # + [markdown] hideCode=false hidePrompt=false # If you wanted to include all stations that have the letter # `P-R` or `p-e`, you could add the condition within the brackets. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'[p-rP-R]') # + [markdown] hideCode=false hidePrompt=false # Back to our SSN question. Can a regular expression pattern # be used to determine whether or not the string is in the # correct format? The data will be in the format XXX-XX-XXXX # so the regular expression needs to find the three numeric # values separated by dashes. # # The number pattern can be represented with the bracket # expression `[0-9]`. To specify the number of characters that # need to be found, we use the braces `{}` to specify the exact # number required. # # For the three numbers in the pattern we can use `[0-9]{3}`, # `[0-9]{2}`, and `[0-9]{4}`. Adding in the dashes gives us the # final pattern. The SQL below checks to see if a SSN is # correct. # + hideCode=false hidePrompt=false language="sql" # VALUES # CASE # WHEN REGEXP_LIKE('123-34-1422','[0-9]{3}-[0-9]{2}-[0-9]{4}') THEN 'Valid' # ELSE 'Invalid' # END # + [markdown] hideCode=false hidePrompt=false # The SSN is valid in the example above. Here are some other # examples to show whether or not the regular expression picks # up all of the errors. # + hideCode=false hidePrompt=false language="sql" # WITH SSNS(SSN) AS ( # VALUES # '123-34-1322', # 'ABC-34-9999', # 'X123-44-0001', # '123X-Y44-Z0001', # '111-222-111' # ) # SELECT SSN, # CASE # WHEN REGEXP_LIKE(SSN,'[0-9]{3}-[0-9]{2}-[0-9]{4}') THEN 'Valid' # ELSE 'Invalid' # END # FROM SSNS # + [markdown] hideCode=false hidePrompt=false # If you check closely, one of the strings was marked as # valid, although it is not correct `(X123-44-0001)`. The reason # this occurred is that the pattern was found after the "X" # and it was correct. To prevent this from happening, we need # to anchor the pattern at the beginning to avoid this # situation. A better pattern would be to anchor both ends of # the pattern so there is no possibility of other characters # being at the beginning or end of the pattern. # + hideCode=false hidePrompt=false language="sql" # WITH SSNS(SSN) AS ( # VALUES # '123-34-1322', # 'ABC-34-9999', # 'X123-44-0001', # '123X-Y44-Z0001', # '111-222-111' # ) # SELECT SSN, # CASE # WHEN REGEXP_LIKE(SSN,'^[0-9]{3}-[0-9]{2}-[0-9]{4}$') THEN 'Valid' # ELSE 'Invalid' # END # FROM SSNS # - # [Back to Top](#top) # <a id='special'></a> # + [markdown] hideCode=false hidePrompt=false # ## Special Patterns # # The previous example used the [0-9] syntax to request that # only numbers be found in the pattern. There are some # predefined patterns that define these common patterns. The # first argument is Posix format (if it exists), the second is # the escape character equivalent, and the final one is the # raw pattern it represents. # # |Posix | Escape | Pattern | Meaning # |:----------| :--------| :---------------|:------------------------ # |[:alnum:] | | [A-Za-z0-9] | Alphanumeric characters # | | \w | [A-Za-z0-9\_] | Alphanumeric characters plus "\_" # | | \W | [^A-Za-z0-9\_] | Non-word characters # |[:alpha:] | \a | [A-Za-z] | Alphabetic characters # |[:blank:] | \s, \t | | Space and tab # | | \b | | Word boundaries # |[:cntrl:] | | [\x00-\x1F\x7F] | Control characters # |[:digit:] | \d | [0-9] | Digits # | | \D | [^0-9] | Non-digits # |[:graph:] | | [\x21-\x7E] | Visible characters # |[:lower:] | \l | [a-z] | Lowercase letters # |[:print:] | \p | [\x20-\x7E] | Visible characters and the space character # |[:punct:] | | [][!"#\$%&'()\*+,./:;<=>?@\^\_`{<code>&#124;</code>}~-] | Punctuation characters # |[:space:] | \s | [ \t\r\n\v\f] | Whitespace characters # | | \S | [^ \t\r\n\v\f] | Non-whitespace characters # |[:upper:] | \u | [A-Z] | Uppercase letters # |[:xdigit:] | \x | [A-Fa-f0-9] | Hexadecimal digits # # For instance, the following three statements will produce # the same result. # + hideCode=false hidePrompt=false magic_args="-a" language="sql" # WITH SSNS(SSN) AS ( # VALUES # '123-34-1322', # 'ABC-34-9999', # 'X123-44-0001', # '123X-Y44-Z0001', # '111-222-111' # ) # SELECT 'Original', SSN, # CASE # WHEN REGEXP_LIKE(SSN,'^[0-9]{3}-[0-9]{2}-[0-9]{4}$') THEN 'Valid' # ELSE 'Invalid' # END # FROM SSNS # UNION ALL # SELECT 'Posix', SSN, # CASE # WHEN REGEXP_LIKE(SSN,'^[:digit:]{3}-[:digit:]{2}-[:digit:]{4}$') THEN 'Valid' # ELSE 'Invalid' # END # FROM SSNS # UNION ALL # SELECT 'Escape', SSN, # CASE # WHEN REGEXP_LIKE(SSN,'^\d{3}-\d{2}-\d{4}$') THEN 'Valid' # ELSE 'Invalid' # END # FROM SSNS # - # [Back to Top](#top) # <a id='negate'></a> # + [markdown] hideCode=false hidePrompt=false # ## Negating Patterns # # Up to this point in time, the patterns that have been used # are looking for a positive match. In some cases you may want # to find values that do not match. The easiest way is to # negate the actual REGEXP_LIKE expression. The following # expression finds all of the stations that start with "West". # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'^West') # + [markdown] hideCode=false hidePrompt=false # Adding the NOT modifier in front of the REGEXP function # gives us the stations that do not begin with West. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE NOT REGEXP_LIKE(STATION,'^West') # + [markdown] hideCode=false hidePrompt=false # You can also negate some of the searches in a pattern by # using the `[^...]` syntax where the `^` tells the regular # expression not to match the following characters. The # expression `[^0-9]` would mean match any characters which are # not numeric. # # However, regular expressions have something called negative # lookarounds which basically mean find the pattern which does # not match. You create this pattern by adding the `(?!..)` at # the beginning of the string. The same query (finding # stations that don't start with West) would be written with # this lookaround logic found in the SQL below. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'^(?!West)') # - # [Back to Top](#top) # <a id='capture'></a> # + [markdown] hideCode=false hidePrompt=false # ## Capturing Parenthesis # # The previous example used something called a negative # lookaround with capturing parenthesis. When you place a # pattern within a set of brackets `(...)` the string that # matches this pattern is "remembered". The strings that are # matched can be used in subsequent parts of your regular # expression. This allows a form of programming within your # regular expression! # # Each set of parentheses that are matched are associated with # a number, starting at one and incrementing for each # subsequent pattern match. For instance, the following # pattern will have three matches: # <pre> # ^([0-9]{3})-([0-9]{3})-([0-9]{3})$ # </pre> # This is similar to the SSN example used earlier on in this # section. The difference in this example is that each block # of numbers is exactly the same (3 digits). This pattern will # match any sequence of numbers in the format 123-456-789. # + hideCode=false hidePrompt=false language="sql" # WITH SSNS(SSN) AS ( # VALUES # '123-456-789', # '123-555-123', # '890-533-098', # '123-456-456' # ) # SELECT SSN, # CASE # WHEN REGEXP_LIKE(SSN,'^([0-9]{3})-([0-9]{3})-([0-9]{3})$') THEN 'Valid' # ELSE 'Invalid' # END # FROM SSNS # + [markdown] hideCode=false hidePrompt=false # All of these numbers fit the pattern and should be valid. # When one of the capturing parenthesis matches, it will # remember the string that it matched. For instance, in the # first example (123-456-789), the first match will find the # string '123'. The second match will find '456' and so on. We # can refer to these matched strings with the special control # characters `\n` where n represents the capturing parenthesis # location. So `\1` would refer to the '123' that was found. `\2` # would be for '456'. # # The regular expression will be updated so that the last part # of the pattern needs to be numeric (0-9) but can't be same # as the first match. # + hideCode=false hidePrompt=false language="sql" # WITH SSNS(SSN) AS ( # VALUES # '123-456-789', # '123-555-123', # '890-533-098', # '123-456-456' # ) # SELECT SSN, # CASE # WHEN REGEXP_LIKE(SSN,'^([0-9]{3})-([0-9]{3})-(?!\1)([0-9]{3})$') THEN 'Valid' # ELSE 'Invalid' # END # FROM SSNS # + [markdown] hideCode=false hidePrompt=false # In many cases it may be easier to find the patterns that # match and then negate the REGEXP statement! The `(?...)` # syntax is used for a variety of purposes in regular # expressions: # # | Pattern | Result # |:----------|:------------ # | (?: ... ) | Non-capturing parentheses. Groups the included pattern, but does not provide capturing of matching text. More efficient than capturing parentheses. # | (?> ... ) | Atomic-match parentheses. First match of the parenthesized subexpression is the only one tried. If it does not lead to an overall pattern match, back up the search for a match to a position before the "(?>" # | (?# ... ) | Free-format comment (?# comment ) # | (?= ... ) | Look-ahead assertion. True if the parenthesized pattern matches at the current input position, but does not advance the input position. # | (?! ... ) | Negative look-ahead assertion. True if the parenthesized pattern does not match at the current input position. Does not advance the input position. # | (?<= ... )| Look-behind assertion. True if the parenthesized pattern matches text that precedes the current input position. The last character of the match is the input character just before the current position. Does not alter the input position. The length of possible strings that is matched by the look-behind pattern must not be unbounded (no * or + operators.) # | (?<!... ) | Negative Look-behind assertion. True if the parenthesized pattern does not match text that precedes preceding the current input position. The last character of the match is the input character just before the current position. Does not alter the input position. The length of possible strings that is matched by the look-behind pattern must not be unbounded (no * or + operators.) # # # For efficiency in matching, the best approach is to place # strings that you are searching for in non-capturing # parentheses `(?:...)` rather than the generic () parenthesis. # The following example finds all stations with "West" in the # name. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'(West)') # + [markdown] hideCode=false hidePrompt=false # The following SQL is equivalent, except that the matched # pattern is not kept for future use in matching. # + hideCode=false hidePrompt=false language="sql" # SELECT STATION FROM CENTRAL_LINE # WHERE REGEXP_LIKE(STATION,'(?:West)') # - # [Back to Top](#top) # <a id='performance'></a> # + [markdown] hideCode=false hidePrompt=false # ## Performance Considerations of Regular Expressions # # We know that there are better ways to write Regular # Expressions (like the use of non-capturing parenthesis. How # much of a difference does this make? In order to find out, # the following SQL will take the existing table and insert # into a temporary table 1000 times (for close to 50000 # records). # # This SQL will generate the new table and fill it with the # base CENTRAL_LINE data. # + hideCode=false hidePrompt=false magic_args="-d -q" language="sql" # DROP TABLE TEMP_LINE # @ # # CREATE TABLE TEMP_LINE AS (SELECT * FROM CENTRAL_LINE) WITH DATA # @ # # BEGIN # DECLARE I INTEGER DEFAULT 0; # WHILE I <= 1000 DO # INSERT INTO TEMP_LINE SELECT * FROM CENTRAL_LINE; # SET I = I + 1; # END WHILE; # END # @ # # SELECT COUNT(*) FROM TEMP_LINE # @ # + [markdown] hideCode=false hidePrompt=false # We will run four queries in the following SQL. # # - Search for West using the LIKE statement # - Search for West using a regular expression with no matching () # - Search for West using a regular expression with matching () # - Search for West using a regular expression with non-capturing parenthesis (?:) # # The queries are run as many times as possible per second to get consistent results. Odds are that there will be variations in the results on your system! # + hideCode=false hideOutput=true hidePrompt=false # results_like = %sql -t SELECT COUNT(*) FROM TEMP_LINE WHERE STATION LIKE '%West%' # results_string = %sql -t SELECT COUNT(*) FROM TEMP_LINE WHERE REGEXP_LIKE(STATION,'West') # results_capturing = %sql -t SELECT COUNT(*) FROM TEMP_LINE WHERE REGEXP_LIKE(STATION,'(West)') # results_noncapturing = %sql -t SELECT COUNT(*) FROM TEMP_LINE WHERE REGEXP_LIKE(STATION,'(?:West)') # - # The results are placed into a temporary table for easier formatting. # %sql -q DROP TABLE RESULTS # %sql CREATE TABLE RESULTS(TYPE VARCHAR(16), RESULT DEC(9,2)) # %sql INSERT INTO RESULTS VALUES ('LIKE', {results_like} ), \ # ('REGX STRING', {results_string} ), \ # ('REGX CAPTURE', {results_capturing} ), \ # ('REGX NONCAPTURE', {results_noncapturing} ) # %sql SELECT * FROM RESULTS # The results are clearer when we plot them all on one graph! # %sql -pb SELECT * FROM RESULTS # + [markdown] hideCode=false hidePrompt=false # Every system will have different performance # characteristics, but the odds are that the LIKE statement # was faster than the regular expression. The performance is # probably ranked this way (with tx/sec being the measurement): # # <pre> # LIKE > REGEXP(Non-capturing) > REGEXP(Capturing) > REGEXP(String) # </pre> # # However, your system will probably get different results. So # while Regular expressions are very powerful, there is a # performance penalty when using them. In addition, you may # need to consider the use of indexes from a performance # perspective. # # The following SQL will add an index to the STATION name. # + hideCode=false hidePrompt=false magic_args="-q" language="sql" # DROP INDEX TEMP_STATION_INDEX; # CREATE INDEX TEMP_STATION_INDEX ON TEMP_LINE(STATION); # - # We will try the 4 queries again and plot the result. # + hideCode=false hidePrompt=false # results_like = %sql -t SELECT COUNT(*) FROM TEMP_LINE WHERE STATION LIKE '%West%' # results_string = %sql -t SELECT COUNT(*) FROM TEMP_LINE WHERE REGEXP_LIKE(STATION,'West') # results_capturing = %sql -t SELECT COUNT(*) FROM TEMP_LINE WHERE REGEXP_LIKE(STATION,'(West)') # results_noncapturing = %sql -t SELECT COUNT(*) FROM TEMP_LINE WHERE REGEXP_LIKE(STATION,'(?:West)') # %sql -q DROP TABLE RESULTS # %sql -q CREATE TABLE RESULTS(TYPE VARCHAR(16), RESULT DEC(9,2)) # %sql -q INSERT INTO RESULTS VALUES ('LIKE', {results_like} ), \ # ('REGX STRING', {results_string} ), \ # ('REGX CAPTURE', {results_capturing} ), \ # ('REGX NONCAPTURE', {results_noncapturing} ) # %sql SELECT * FROM RESULTS # %sql -pb SELECT * FROM RESULTS # + [markdown] hideCode=false hidePrompt=false # The index can help speed up processing of the LIKE statement and the regular expression calls. The results may or may not favor LIKE over regular expressions. In summary, if you are going to use regular expressions, you may get a performance advantage by using indexes if possible. # - # Close the connection to avoid running out of connection handles to Db2 on Cloud. # %sql CONNECT RESET # [Back to Top](#top) # #### Credits: IBM 2019, <NAME> [<EMAIL>]
Db2_11.1_Regular_Expressions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SINDy for neural inference # ## Examples in Lorenz Systems # # ### Introduction # In this notebook we'll demonstrate the utility of the SINDy autoencoder ([Champion et al]()) in estimating dynamics. # We'll focus on the Lorenz System (Eq 1) in this notebook just to demonstrate what SINDy is before moving to a more neuroscience-congruent dynamics. # # ### Model # We start with the general form of the Lorenz system (code built on [ipywidgets Lorenz example](https://github.com/jupyter-widgets/ipywidgets/blob/80921ac9f9a18c43b02918ce3913818c188ae34c/docs/source/examples/Lorenz%20Differential%20Equations.ipynb)). # # $$ # \begin{aligned} # \dot{x} & = \sigma(y-x) \\ # \dot{y} & = \rho x - y - xz \\ # \dot{z} & = -\beta z + xy # \end{aligned} # $$ # # Our parameters of interest are: (\\(\sigma\\), \\(\beta\\), \\(\rho\\)) # #### Imports # + jupyter={"source_hidden": true} import sys sys.path.append("../../src") import os import numpy as np import pickle import pandas as pd from scipy import integrate # Import our basic plotting libraries import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib.colors import cnames from matplotlib import animation # Import our jupyter widgets from ipywidgets import interact, interactive, fixed from IPython.display import clear_output, display, HTML import pysindy as ps from scipy.integrate import odeint from sklearn.linear_model import Lasso # %matplotlib inline # + jupyter={"source_hidden": true} '''Solve method just for the ipywidget display''' def solve_lorenz(N=10, angle=0.0, max_time=4.0, sigma=10.0, beta=8./3, rho=28.0): def lorenz_deriv(x_y_z, t0, sigma=sigma, beta=beta, rho=rho): """Compute the time-derivative of a Lorenz system.""" x, y, z = x_y_z return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z] # Choose random starting points, uniformly distributed from -15 to 15 np.random.seed(1) x0 = -15 + 30 * np.random.random((N, 3)) # Solve for the trajectories t = np.linspace(0, max_time, int(250*max_time)) x_t = np.asarray([integrate.odeint(lorenz_deriv, x0i, t) for x0i in x0]) return t, x_t '''Plot widget for the ipywidget display''' def plot_lorenz(N=10, angle=0.0, max_time=4.0, sigma=10.0, beta=8./3, rho=28.0): fig = plt.figure() ax = fig.add_axes([0, 0, 1, 1], projection='3d') ax.axis('off') t,x_t = solve_lorenz(N,angle,max_time,sigma,beta,rho) # prepare the axes limits ax.set_xlim((-25, 25)) ax.set_ylim((-35, 35)) ax.set_zlim((5, 55)) # choose a different color for each trajectory colors = plt.cm.viridis(np.linspace(0, 1, N)) for i in range(N): x, y, z = x_t[i,:,:].T lines = ax.plot(x, y, z, '-', c=colors[i]) plt.setp(lines, linewidth=2) ax.view_init(30, angle) plt.show() return t,x_t # - # <a id='choose_coeffs'></a> # ## Choosing our ground-truth model # # We're going to use an interactive widget to set up a Lorenz system with coefficients that we choose. # This wil then generate a dataset that we'll then use SINDy to try to 'reverse-engineer' the dynamics that generated it. w = interactive(plot_lorenz, angle=(0.,360.), max_time=fixed(4.0), N=fixed(10), sigma=(-2.0,50.0), rho=(0.0,50.0)) display(w) sigma = w.children[1].value beta = w.children[2].value rho = w.children[3].value # So we've got a Lorenz system with the parameters we've decided to set in the sliders. # Let's see what individual dimensions look like: # + jupyter={"source_hidden": true} t = w.result[0] x_t = w.result[1] plt.figure() plt.plot(t,x_t[0,:,0],label='x') plt.plot(t,x_t[0,:,1],label='y') plt.plot(t,x_t[0,:,2],label='z') plt.legend() # - # # Enter SINDy-based inference # What we ended with in the previous section was a *timeseries*, or a signal that changes over time. # Think of this like an EEG, an LFP, or even a spike train. # Our goal is to go from these squigles to a deeper understanding of what's happening in the brain, like 'action potentials' or 'reduced synaptic input'. # # This is where SINDy is going to be a powerful tool. # What SINDy does is it takes the sets of 'what's happening in our brain' and tries to see how much each of those things make sense given the timeseries we see. # It then tells us "how much" each of the 'what's happening in our brain' seems to be in our data and builds a model that tries to match it. # + jupyter={"source_hidden": true} def lorenz(z, t): return [ sigma * (z[1] - z[0]), z[0] * (rho - z[2]) - z[1], z[0] * z[1] - beta * z[2] ] # + dt = .002 t_train = np.arange(0, 10, dt) x0_train = [-8, 8, 27] x_train = odeint(lorenz, x0_train, t_train) # + plt.figure() plt.plot(x_train,color='blue') model = ps.SINDy() model.fit(x_train, t=dt) model.print() # - # ## Effects of noise # Finally, we'll explore the impact that noise can have on SINDy's performance. # + def noise_exercise(noise_level): x_train_noise = x_train + np.random.normal(0,noise_level,size=x_train.shape) plt.figure() plt.plot(x_train_noise,alpha=0.7,color='red') plt.plot(x_train,color='blue',alpha=0.3) model = ps.SINDy() model.fit(x_train_noise, t=dt) model.print() noise_widg = interactive(noise_exercise,noise_level=(0.0,1,0.001)) display(noise_widg) # - # Unfortunately, it looks like it's very, very sensitive to noise. # But hey, still cool.
notebooks/.ipynb_checkpoints/interactive_SINDy_lorenz-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="GoCXzNvN8g-8" # # Case Study 5 - Firewall Traffic # + [markdown] id="YBy24RcB8g-9" # __Team Members:__ <NAME>, <NAME>, <NAME>, <NAME> # + [markdown] id="O4O0up-U8g-9" # # Content # * [Business Understanding](#business-understanding) # - [Introduction](#introduction) # - [Methods](#methods) # - [Results](#results) # * [Data Engineering](#data-evaluation) # - [Data Summary](#data-summary) # - [Loading Data](#loading-data) # - [Missing Values](#missing-values) # - [Exploratory Data Analysis (EDA)](#eda) # - [Assumptions](#assumptions) # * [Model Preparations](#model-preparations) # * [Model Building & Evaluations](#model-building) # * [Conclusion](#conclusion) # - [Final Model Proposal](#final-model-proposal) # - [Future Considerations and Model Enhancements](#model-enhancements) # # + [markdown] id="TRedT-FB8g_A" # # Business Understanding & Executive Summary <a id='business-understanding'/> # + [markdown] id="F-4BiuuQOEh4" # ### Objective <a id='scope'/> # # - # ### Introduction <a id='introduction'/> # # ### Methods <a id='methods'/> # #### Data Wrangling # # # #### Modeling # # # #### Metrics # # # #### Modeling Objective # # The team's objective was to maximize __accuracy__ and __F1__ while minimizing the __processing time__ to make predictions in real time. # ### Results <a id='results'/> # # + [markdown] id="PVtcYu5j8g_B" # # Data Engineering <a id='data-evaluation'> # # - # ## Data Summary # # + id="qcBcP8Jy8g_C" outputId="a6a85b4a-a106-48bb-bc2a-e6a5fb8ca494" # standard libraries import pandas as pd import numpy as np #import re import os #from IPython.display import Image #import sklearn #import time # email # visualization import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline from IPython.display import clear_output #from tabulate import tabulate #from collections import defaultdict #from collections import Counter # data pre-processing from sklearn.model_selection import train_test_split from sklearn.model_selection import StratifiedShuffleSplit from sklearn.preprocessing import StandardScaler # prediction models from sklearn.svm import SVC #metrics from sklearn.metrics import f1_score from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score # import warnings filter '''import warnings warnings.filterwarnings('ignore') from warnings import simplefilter simplefilter(action='ignore', category=FutureWarning)''' # + [markdown] id="4WcvKI3y8g_C" # ### Loading Data and Cleanup <a id='loading-data'> # + [markdown] id="ZDhb7boF8g_D" # As part of data cleanup, team removed HTML tags, stop words, and non-alphanumeric characters. # - fp = os.path.join(os.getcwd()+'/'+'log2.csv') fp df = pd.read_csv(fp) df.shape df.head() df.info() df['Action'].value_counts() # #### Recode 'Action' as binary response df.loc[ df['Action']!='allow', 'Action' ]='not allow' df['Action'].value_counts() df.describe() # ### Recode "Port" variables as object/string in df1 port_vars = ['Source Port', 'Destination Port', 'NAT Source Port', 'NAT Destination Port'] cont_vars = list( set(df.columns) - set(port_vars) - set(['Action']) ) cont_vars # + [markdown] id="Aws5HAx98g_E" # ## Missing Values <a id='missing-values'> # No missing values in the dataset # - df.isna().values.sum() # + [markdown] id="CbAmkozvN5Dz" # ## Exploratory Data Analysis (EDA) <a id='eda'> # - count_Class=pd.value_counts(df['Action'], sort= True) count_Class.plot(kind= 'bar', color= ["blue", "orange"]) plt.show() df['Action'].value_counts(normalize=True) # + rows = 2 cols = 2 fig, axes = plt.subplots(rows, cols, figsize=(12, 5)) for i,j in zip( port_vars, range(len(port_vars)) ): sns.histplot(ax=axes[int(j/cols),j%cols], data = df, x=df[i], hue = 'Action', bins=25, log_scale=[False,False], multiple='dodge') axes[int(j/cols),j%cols].set_xlabel(i) #axes[int(j/cols),j%cols].set_ylabel('Count (log scale)') #axes[int(j/cols),j%cols].set_ylim([0, 35000]) plt.suptitle('All Ports') fig.tight_layout() plt.show() # + rows = 2 cols = 2 fig, axes = plt.subplots(rows, cols, figsize=(12, 5)) for i,j in zip( port_vars, range(len(port_vars)) ): sns.histplot(ax=axes[int(j/cols),j%cols], data = df, x=df[i], hue = 'Action', bins=25, log_scale=[False,False], multiple='dodge') axes[int(j/cols),j%cols].set_xlabel(i) #axes[int(j/cols),j%cols].set_ylabel('Count (log scale)') axes[int(j/cols),j%cols].set_ylim([0, 35000]) plt.suptitle('All Ports') fig.tight_layout() plt.show() # + rows = 1 cols = 4 fig, axes = plt.subplots(rows, cols, figsize=(12, 5)) for i,j in zip( port_vars, range(len(port_vars)) ): sns.histplot(ax=axes[j%cols], data = df, x=df[df[i]==0][i], hue = 'Action', log_scale=[False,False], multiple='stack') axes[j%cols].set_xlabel(i) axes[j%cols].set_xlim([0, 2]) axes[j%cols].set_ylim([0, 30000]) #axes[j%cols].set_ylabel('Count (log scale)') plt.suptitle('Ports = 0') fig.tight_layout() plt.show() # - # ### Most of 'not allow' occurs near continuous features minimum values # The firewall is working - little to no traffic occurs for 'not allow' # + rows = 2 cols = 4 fig, axes = plt.subplots(rows, cols, figsize=(12, 6)) for i,j in zip(cont_vars,range(len(cont_vars))): sns.histplot(ax=axes[int(j/cols),j%cols], data = df, x=np.log(df[i]+1), hue = 'Action', bins=25) axes[int(j/cols),j%cols].set_xlabel('Log('+i+'+1)') axes[int(j/cols),j%cols].set_ylabel('Count') fig.tight_layout() plt.show() # + [markdown] id="z6qUPkzRN5D4" # ## Assumptions <a id='assumptions'> # + [markdown] id="SaEb4apWN5D4" # # + [markdown] id="zmuI_mep8g_b" # # Model Preparations <a id='model-preparations'/> # - # ### Methods # #### Models # # # #### Metrics # # + [markdown] id="UuRjMsjg8g_d" # # Model Building & Evaluations <a id='model-building'/> # - # # ## Split into Training and Test # Data was split into training and test sets with a 70/30 ratio, respectively. def split_dependant_and_independant_variables(df: pd.DataFrame, y_var: str): X = df.copy() y = X[y_var] X = X.drop([y_var], axis=1) return X, y def shuffle_split(X, y, test_size, random_state): stratified_shuffle_split = StratifiedShuffleSplit(n_splits=1, test_size=test_size, random_state=random_state) for train_index, test_index in stratified_shuffle_split.split(X, y): X_train, X_test = X.iloc[train_index], X.iloc[test_index] y_train, y_test = y[train_index], y[test_index] return X_train, X_test, y_train, y_test # #### Positive class = 'allow' X, y = split_dependant_and_independant_variables(df, 'Action') y = (y=='allow').astype(int) X_train, X_test, y_train, y_test = shuffle_split(X, y, test_size=0.3, random_state=123432) y_train.value_counts(normalize=True) y_test.value_counts(normalize=True) sc = StandardScaler() sc.fit(X_train) X_train_sc = pd.DataFrame( sc.transform(X_train), columns = X_train.columns, index = X_train.index) X_test_sc = pd.DataFrame( sc.transform(X_test), columns = X_test.columns, index = X_test.index) # ## Feature Selection # # ## Modeling # Model Performance is discussed in the conclusion. # ### Baseline Accuracy # If the model simply predicted everything as 'allow', it would be 57% accurate. # Baseline accuracy of 57% with class imbalance df['Action'].value_counts(normalize=True) # ### Linear SVM svm_linear_acc = pd.DataFrame() for i in [0.005, 0.01, 0.05, 0.1, 0.2, 0.5, 1, 2, 10, 100]: svm_linear = SVC(kernel='linear', C=i) svm_linear.fit(X_train_sc, y_train) y_hat_train = svm_linear.predict(X_train_sc) y_hat_test = svm_linear.predict(X_test_sc) svm_linear_acc = svm_linear_acc.append({'C': i, '# support vectors': len(svm_linear.support_vectors_), 'accuracy_train': accuracy_score(y_train, y_hat_train), 'accuracy_test': accuracy_score(y_test, y_hat_test)}, ignore_index=True) print("C =", i, "complete") clear_output() svm_linear_acc sns.lineplot(data = svm_linear_acc, x='C', y='accuracy_train', color='blue') sns.lineplot(data = svm_linear_acc, x='C', y='accuracy_test', color='red') plt.legend(['Train','Test'], loc='center right') plt.axvline(0.2, color='black', ls='--') plt.xscale('log') plt.show() # #### Linear SVM Coefficients # + svm_linear = SVC(kernel='linear', C=0.2) svm_linear.fit(X_train_sc, y_train) y_hat_train_full = svm_linear.predict(X_train_sc) y_hat_test_full = svm_linear.predict(X_test_sc) weights = pd.DataFrame(svm_linear.coef_[0], X_train_sc.columns, columns=['coef']).sort_values('coef', ascending=False) weights.plot(kind='bar') plt.show() # - # look at the linear support vectors print("Total # of support vectors =", len(svm_linear.support_vectors_)) print("# of support vectors for each class=", svm_linear.n_support_) # #### Make Simplified Linear Model with Two Terms that have Highest Coefficients top2_terms = [weights.index[0], weights.index[1]] svm_linear2 = SVC(kernel='linear', C=0.2) svm_linear2.fit(X_train_sc[top2_terms], y_train) y_hat_train2 = svm_linear2.predict(X_train_sc[top2_terms]) y_hat_test2 = svm_linear2.predict(X_test_sc[top2_terms]) print('# support vectors', len(svm_linear2.support_vectors_)) print('accuracy_train', accuracy_score(y_train, y_hat_train2)) print('accuracy_test', accuracy_score(y_test, y_hat_test2)) # #### Plot the support vectors and decision boundary def plot_svc_linear_vectors(model, X, y): weights = pd.DataFrame(model.coef_[0], X.columns, columns=['coef']).sort_values('coef', ascending=False) # plot X data for Allow pos_class_index = np.where(y==1) plt.scatter(X.iloc[pos_class_index][weights.index[0]], X.iloc[pos_class_index][weights.index[1]], color='red', marker='x', label='Allow') # plot X data for Not Allow neg_class_index = np.where(y==0) plt.scatter(X.iloc[neg_class_index][weights.index[0]], X.iloc[neg_class_index][weights.index[1]], color='blue', marker='o', label='Not Allow') # add labels plt.xlabel('Scaled: ' + weights.index[0]) plt.ylabel('Scaled: ' + weights.index[1]) plt.legend(loc='upper left') # plot support vectors ax = plt.gca() ax.scatter(model.support_vectors_[:, X.columns.get_loc(weights.index[0])], model.support_vectors_[:, X.columns.get_loc(weights.index[1])], s=100, linewidth=0.2, facecolors='none', edgecolor='black') def plot_svc_decision_boundary(model, ax=None, plot_support=True): """Plot the decision function for a 2D SVC""" if ax is None: ax = plt.gca() xlim = ax.get_xlim() ylim = ax.get_ylim() # create grid to evaluate model x = np.linspace(xlim[0], xlim[1], 30) y = np.linspace(ylim[0], ylim[1], 30) Y, X = np.meshgrid(y, x) xy = np.vstack([X.ravel(), Y.ravel()]).T P = model.decision_function(xy).reshape(X.shape) # plot decision boundary and margins ax.contour(X, Y, P, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) # plot support vectors if plot_support: ax.scatter(model.support_vectors_[:, 0], model.support_vectors_[:, 1], s=100, linewidth=1, facecolors='none', edgecolor='black'); ax.set_xlim(xlim) ax.set_ylim(ylim) plt.figure(figsize=(15,4)) plot_svc_linear_vectors(svm_linear2, X_train_sc[top2_terms], y_train) plot_svc_decision_boundary(svm_linear2) plt.suptitle('Plot: 2-Term Scaled Linear SVM Model') plt.title('(Scaled: Elapsed Time < 0)') plt.xlim([-0.25, 0]) plt.show() # #### Shift scaled values by +1 so they're all >0 for log scaling svm_linear2_pos = SVC(kernel='linear', C=0.2) svm_linear2_pos.fit(X_train_sc[top2_terms]+1, y_train) y_hat_train2_pos = svm_linear2_pos.predict(X_train_sc[top2_terms]+1) y_hat_test2_pos = svm_linear2_pos.predict(X_test_sc[top2_terms]+1) print('# support vectors', len(svm_linear2_pos.support_vectors_)) print('accuracy_train', accuracy_score(y_train, y_hat_train2_pos)) print('accuracy_test', accuracy_score(y_test, y_hat_test2_pos)) plt.figure(figsize=(15,4)) plot_svc_linear_vectors(svm_linear2_pos, X_train_sc[top2_terms]+1, y_train) plot_svc_decision_boundary(svm_linear2_pos) plt.suptitle('Log-Linear Plot: 2-Term Positive-Scaled Linear SVM Model') plt.title('(Positive-Scaled: Elapsed Time < 0.95)') plt.xlim([0.75, 0.95]) plt.ylim([0.1, 4]) plt.yscale('log') plt.show() plt.figure(figsize=(15,4)) plot_svc_linear_vectors(svm_linear2_pos, X_train_sc[top2_terms]+1, y_train) plot_svc_decision_boundary(svm_linear2_pos) plt.suptitle('Log-Log Plot: 2-Term Positive-Scaled Linear SVM Model') plt.title('(All Data Plotted)') plt.legend(loc='lower right') plt.xlim([0.7, 40]) plt.ylim([0.1, 4]) plt.xscale('log') plt.yscale('log') plt.show() # ### Gaussian SVM Model svm_rbf2_acc = pd.DataFrame() for i in [0.1, 0.5, 1, 5, 10, 100, 200, 300, 500, 700]: svm_rbf2 = SVC(kernel='rbf', C=i) svm_rbf2.fit(X_train_sc[top2_terms], y_train) y_hat_train = svm_rbf2.predict(X_train_sc[top2_terms]) y_hat_test = svm_rbf2.predict(X_test_sc[top2_terms]) svm_rbf2_acc = svm_rbf2_acc.append({'C': i, '# support vectors': len(svm_rbf2.support_vectors_), 'accuracy_train': accuracy_score(y_train, y_hat_train), 'accuracy_test': accuracy_score(y_test, y_hat_test)}, ignore_index=True) print("C =", i, "complete") clear_output() svm_rbf2_acc sns.lineplot(data = svm_rbf2_acc, x='C', y='accuracy_train', color='blue') sns.lineplot(data = svm_rbf2_acc, x='C', y='accuracy_test', color='red') plt.legend(['Train','Test'], loc='center right') plt.axvline(300, color='black', ls='--') plt.xscale('log') plt.show() svm_rbf2 = SVC(kernel='rbf', C=300) svm_rbf2.fit(X_train_sc[top2_terms], y_train) y_hat_train_rbf2 = svm_rbf2.predict(X_train_sc[top2_terms]) y_hat_test_rbf2 = svm_rbf2.predict(X_test_sc[top2_terms]) def plot_svc_vectors(model, X, y, top2_terms): # no weights with non-linear SVM, need to manually provide top 2 terms # plot X data for Allow pos_class_index = np.where(y==1) plt.scatter(X.iloc[pos_class_index][top2_terms[0]], X.iloc[pos_class_index][top2_terms[1]], color='red', marker='x', label='Allow') # plot X data for Not Allow neg_class_index = np.where(y==0) plt.scatter(X.iloc[neg_class_index][top2_terms[0]], X.iloc[neg_class_index][top2_terms[1]], color='blue', marker='o', label='Not Allow') # add labels plt.xlabel('Scaled: ' + top2_terms[0]) plt.ylabel('Scaled: ' + top2_terms[1]) plt.legend(loc='lower left') # plot support vectors ax = plt.gca() ax.scatter(model.support_vectors_[:, X.columns.get_loc(top2_terms[0])], model.support_vectors_[:, X.columns.get_loc(top2_terms[1])], s=100, linewidth=0.2, facecolors='none', edgecolor='black') plt.figure(figsize=(15,4)) plot_svc_vectors(svm_rbf2, X_train_sc[top2_terms], y_train, top2_terms) plot_svc_decision_boundary(svm_rbf2) plt.suptitle('Plot: 2-Term Scaled Gaussian SVM Model') plt.title('(Scaled: Elapsed Time < 0.2)') plt.xlim([-0.3, 0.3]) plt.show() # #### Shift scaled values by +1 so they're all >0 for log scaling svm_rbf2_pos = SVC(kernel='rbf', C=300) svm_rbf2_pos.fit(X_train_sc[top2_terms]+1, y_train) y_hat_train_rbf2_pos = svm_rbf2_pos.predict(X_train_sc[top2_terms]+1) y_hat_test_rbf2_pos = svm_rbf2_pos.predict(X_test_sc[top2_terms]+1) print('accuracy_train', accuracy_score(y_train, y_hat_train_rbf2_pos)) print('accuracy_test', accuracy_score(y_test, y_hat_test_rbf2_pos)) plt.figure(figsize=(15,4)) plot_svc_vectors(svm_rbf2_pos, X_train_sc[top2_terms]+1, y_train, top2_terms) plot_svc_decision_boundary(svm_rbf2_pos) plt.suptitle('Log-Linear Plot: 2-Term Positive-Scaled Gaussian SVM Model') plt.title('(Positive-Scaled: Elapsed Time < 0.95)') plt.xlim([0.7, 1.3]) plt.ylim([0.1, 4]) plt.yscale('log') plt.show() plt.figure(figsize=(15,4)) plot_svc_vectors(svm_rbf2_pos, X_train_sc[top2_terms]+1, y_train, top2_terms) plot_svc_decision_boundary(svm_rbf2_pos) plt.suptitle('Log-Linear Plot: 2-Term Positive-Scaled Gaussian SVM Model') plt.title('(All Data Plotted)') plt.legend(loc='lower right') plt.xlim([0.6, 40]) plt.ylim([0.1, 4]) plt.xscale('log') plt.yscale('log') plt.show() # #### Full Gaussian Model (All Terms) svm_rbf_acc = pd.DataFrame() for i in [0.1, 0.5, 1, 10, 100, 200, 300, 500, 800]: svm_rbf = SVC(kernel='rbf', gamma='auto', C=i) svm_rbf.fit(X_train_sc, y_train) y_hat_train = svm_rbf.predict(X_train_sc) y_hat_test = svm_rbf.predict(X_test_sc) svm_rbf_acc = svm_rbf_acc.append({'C': i, '# support vectors': len(svm_rbf.support_vectors_), 'accuracy_train': accuracy_score(y_train, y_hat_train), 'accuracy_test': accuracy_score(y_test, y_hat_test)}, ignore_index=True) print("C =", i, "complete") clear_output() svm_rbf_acc sns.lineplot(data = svm_rbf_acc, x='C', y='accuracy_train', color='blue') sns.lineplot(data = svm_rbf_acc, x='C', y='accuracy_test', color='red') plt.legend(['Train','Test'], loc='center right') plt.axvline(200, color='black', ls='--') plt.xscale('log') plt.show() svm_rbf = SVC(kernel='rbf', gamma='auto', C=200) svm_rbf.fit(X_train_sc, y_train) y_hat_train_rbf_full = svm_rbf.predict(X_train_sc) y_hat_test_rbf_full = svm_rbf.predict(X_test_sc) print('# support vectors', len(svm_rbf.support_vectors_)) print('accuracy_train', accuracy_score(y_train, y_hat_train_rbf_full)) print('accuracy_test', accuracy_score(y_test, y_hat_test_rbf_full)) print("2-Factor Linear Conf Matrix:\n", confusion_matrix(y_test, y_hat_test2)) print("Full Linear Conf Matrix:\n", confusion_matrix(y_test, y_hat_test_full)) print("2-Factor Gaussian Conf Matrix:\n", confusion_matrix(y_test, y_hat_test_rbf2)) print("Full Gaussian Conf Matrix:\n", confusion_matrix(y_test, y_hat_test_rbf_full)) # + [markdown] id="JbAhMB1x8g_e" # # Conclusion <a id='conclusion'> # - # # + [markdown] id="xnsadV7M8g_e" # ### Final Model Proposal <a id='final-model-proposal'/> # - # # + [markdown] id="oX8fXYczN5D-" # ### Future Considerations and Model Enhancements <a id='model-enhancements'/> # + [markdown] id="xzeTkIEWN5D-" # # - # # References <a id='references'> # [1]
ds7333_case_study_5/case_study5_AL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="H7LoMj4GA4n_" # # aitextgen — Train a GPT-2 (or GPT Neo) Text-Generating Model w/ GPU # # by [<NAME>](https://minimaxir.com) # # *Last updated: May 16th, 2021 (aitextgen v0.5.2)* # # Retrain an advanced text generating neural network on any text dataset **for free on a GPU using Colaboratory** using `aitextgen`! # # For more about `aitextgen`, you can visit [this GitHub repository](https://github.com/minimaxir/aitextgen) or [read the documentation](https://docs.aitextgen.io/). # # # To get started: # # 1. Copy this notebook to your Google Drive to keep it and save your changes. (File -> Save a Copy in Drive) # 2. Run the cells below: # # + id="KBkpRgBCBS2_" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1649686677847, "user_tz": -120, "elapsed": 38429, "user": {"displayName": "<NAME>\u0107", "userId": "09411250063536912562"}} outputId="62da7aec-1fd5-4240-9d68-0035fc6f2e94" # !pip install -q aitextgen import logging logging.basicConfig( format='%(asctime)s — %(levelname)s — %(name)s — %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) from aitextgen import aitextgen from aitextgen.colab import mount_gdrive, copy_file_from_gdrive # + [markdown] id="Bj2IJLHP3KwE" # ## GPU # # Colaboratory uses a Nvidia P4, an Nvidia T4, an Nvidia P100, or an Nvidia V100. For finetuning GPT-2 124M, any of these GPUs will be fine, but for text generation, a T4 or a P100 is ideal since they have more VRAM. **If you receive a T4 or a V100 GPU, you can enable `fp16=True` during training for faster/more memory efficient training.** # # You can verify which GPU is active by running the cell below. If you want to try for a different GPU, go to **Runtime -> Factory Reset Runtime**. # + id="sUmTooTW3osf" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1649686677848, "user_tz": -120, "elapsed": 23, "user": {"displayName": "<NAME>\u0107", "userId": "09411250063536912562"}} outputId="d2397217-4e5b-4952-b62e-c287dac96f31" # !nvidia-smi # + [markdown] id="trRhgNvsH4Wn" # ## Loading GPT-2 or GPT Neo # # If you're retraining a model on new text, you need to download and load the GPT-2 model into the GPU. # # There are several sizes of GPT-2: # # * `124M` (default): the "small" model, 500MB on disk. # * `355M` (default): the "medium" model, 1.5GB on disk. # * `774M` (default): the "large" model, 3GB on disk. # # You can also finetune a GPT Neo model instead, which is more suitable for longer texts and the base model has more recent data: # # * `125M`: Analogous to the GPT-2 124M model. # * `350M`: Analogous to the GPT-2 355M model # # The next cell downloads the model and saves it in the Colaboratory VM. If the model has already been downloaded, running this cell will reload it. # + id="5R0DI2QeOg9f" #model='124M' #model='355M' #model='774M' model='gpt-neo-125M' #model='gpt-neo-350M' # + id="flqSlHjMIeIw" colab={"base_uri": "https://localhost:8080/", "height": 812, "referenced_widgets": ["5b6b48718e4a4fffa7f9cc39eccc87ea", "2c04afebc7414372bcf69794d6e47423", "5621ba20221844548717bdcfc9beabc7", "19c9b11e896b4f71962af7c826224d4c", "0a89f37047f946748f50e82de58c01e4", "05c8339c35b74d3a95b03c026ffddc1b", "91e4dc36271942f09f45826e97be4381", "7aebfcdc63bc444b898b398181e7265e", "856189818b1940689a2049dfed71802d", "91f148fd51f049e09567a49a2d09aad3", "<KEY>", "918623201e8a4ed7990ab3f9f400354a", "fd90f20feb214055b96ab27aba27c718", "86225f11a92a42be9c6fc04459efe0cf", "e1d22f2cb8ab41a2bdab880e3684265c", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "32ebccafc3aa420dba741646c727507b", "<KEY>", "<KEY>", "38f60cab65194a18839432d42044eeb4", "<KEY>", "64f49d812fe445949e0693302ced8d60", "<KEY>", "<KEY>", "<KEY>", "45777c02adc440849e3539228c908d46", "a1dea1597dd04d6585e08d364b4a14a3", "b78a1d5548314b8eaea4ed831012de9e", "3581ed4ee0ef40b3af3761a8ee987010", "46ea71722ceb407db1c37f3671e5e4b3", "cab145a6eae748b9830311ed52751da1", "b94ed870a225472a8676a1b8e62f40d4", "363dbc24abe8448580e33572d1a3a0bb", "<KEY>", "8222d62c7adf4fe7ba5d33e8b5e4df04", "<KEY>", "<KEY>", "7c4c2ababf0e43f683808f85a46d93a8", "<KEY>", "416866fc028f411099ac6417d9ef46e7", "<KEY>", "be9bfce232d645118c8300c73ca73e0f", "<KEY>", "de2277df2f204cab8c71af010bca7848", "<KEY>", "<KEY>", "decea8a0e43d414986b453aa1e29f81b", "e62a625ec21e427a8d0c7159ca006827", "c802a93a87cd4006b623a4221116f53f", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "f0a9c78825e3481cba9957af9ff3cdb8", "<KEY>", "<KEY>", "fa3658b03a514f6abe8385830679c485", "<KEY>", "<KEY>", "eb275ca61792414a97df751c520cec77", "2a936901f24941328188e8ade25fcc4d", "<KEY>", "890b0fa4823d4e6b995a31c6dd6ce44c", "8cb9635ad2824dbd9e1f263ed5b6f287", "ecc7c669c6a345bd9e249daa084fbbec", "b138d8f2063744b9a8cb0e68d11878bc", "798f4ed39b1443f0b3e5bef252146504", "<KEY>", "52f119e559d94a828a7d84d6a1329216", "bec1f78a2089408eb969d3418b2a07b7"]} executionInfo={"status": "ok", "timestamp": 1649686715461, "user_tz": -120, "elapsed": 37627, "user": {"displayName": "<NAME>\u0107", "userId": "09411250063536912562"}} outputId="6cf06a64-651d-480f-932e-95dc02d85779" if model == '124M' or model == '355M' or model == '774M': ai = aitextgen(tf_gpt2=model, to_gpu=True) else: ai = aitextgen(model='EleutherAI/' + model, to_gpu=True) # + id="Z7IeveW7Odze" folder = 'trained_model_' + model # + [markdown] id="N8KXuKWzQSsN" # ## Mounting Google Drive # # The best way to get input text to-be-trained into the Colaboratory VM, and to get the trained model *out* of Colaboratory, is to route it through Google Drive *first*. # # Running this cell (which will only work in Colaboratory) will mount your personal Google Drive in the VM, which later cells can use to get data in/out. (it will ask for an auth code; that auth is not saved anywhere) # + id="puq4iC6vUAHc" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1649686847456, "user_tz": -120, "elapsed": 132018, "user": {"displayName": "<NAME>\u0107", "userId": "09411250063536912562"}} outputId="b51e347e-d758-4638-a3ae-d02dc5a3b6ad" mount_gdrive() # + [markdown] id="BT__brhBCvJu" # ## Uploading a Text File to be Trained to Colaboratory # # In the Colaboratory Notebook sidebar on the left of the screen, select *Files*. From there you can upload files: # # ![alt text](https://i.imgur.com/w3wvHhR.png) # # Upload **any smaller text file** (for example, [a text file of Shakespeare plays](https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt)) and update the file name in the cell below, then run the cell. # + id="6OFnPCLADfll" file_name = 'content2.txt' # + [markdown] id="HeeSKtNWUedE" # If your text file is large (>10MB), it is recommended to upload that file to Google Drive first, then copy that file from Google Drive to the Colaboratory VM. # # Additionally, you may want to consider [compressing the dataset to a cache first](https://docs.aitextgen.io/dataset/) on your local computer, then uploading the resulting `dataset_cache.tar.gz` and setting the `file_name`in the previous cell to that. # + id="-Z6okFD8VKtS" copy_file_from_gdrive(file_name) # + [markdown] id="LdpZQXknFNY3" # ## Finetune GPT-2 # # The next cell will start the actual finetuning of GPT-2 in aitextgen. It runs for `num_steps`, and a progress bar will appear to show training progress, current loss (the lower the better the model), and average loss (to give a sense on loss trajectory). # # The model will be saved every `save_every` steps in `trained_model` by default, and when training completes. If you mounted your Google Drive, the model will _also_ be saved there in a unique folder. # # The training might time out after 4ish hours; if you did not mount to Google Drive, make sure you end training and save the results so you don't lose them! (if this happens frequently, you may want to consider using [Colab Pro](https://colab.research.google.com/signup)) # # Important parameters for `train()`: # # - **`line_by_line`**: Set this to `True` if the input text file is a single-column CSV, with one record per row. aitextgen will automatically process it optimally. # - **`from_cache`**: If you compressed your dataset locally (as noted in the previous section) and are using that cache file, set this to `True`. # - **`num_steps`**: Number of steps to train the model for. # - **`generate_every`**: Interval of steps to generate example text from the model; good for qualitatively validating training. # - **`save_every`**: Interval of steps to save the model: the model will be saved in the VM to `/trained_model`. # - **`save_gdrive`**: Set this to `True` to copy the model to a unique folder in your Google Drive, if you have mounted it in the earlier cells # - **`fp16`**: Enables half-precision training for faster/more memory-efficient training. Only works on a T4 or V100 GPU. # # Here are other important parameters for `train()` that are useful but you likely do not need to change. # # - **`learning_rate`**: Learning rate of the model training. # - **`batch_size`**: Batch size of the model training; setting it too high will cause the GPU to go OOM. (if using `fp16`, you can increase the batch size more safely) # + id="aeXshJM-Cuaf" colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["e8b7e18481de4451b65e9e6ae0eabe4a", "fa2be76cc8fe407796435c4ba617a4de", "a93ca88f61944746bca742868de5d763", "ea928d7ad1c04b19a666f14df425a6f0", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "87abe7c5177e419f9866b73cecf7d68b", "922ea0a93f894095b26600eec65d7978", "9979cfb6a36f427ea3b71f40b701ed74", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "e40472976b5b493bb1e1cd69eadd3624", "e1441c00785e4f1da68ca32da0a2ca3e", "ad8d0d5ce0e44236898d1e449773852a", "<KEY>", "<KEY>", "<KEY>"]} executionInfo={"status": "ok", "timestamp": 1649690068975, "user_tz": -120, "elapsed": 3220327, "user": {"displayName": "<NAME>\u0107", "userId": "09411250063536912562"}} outputId="05b3b4cc-9cc5-43f5-a78f-77c08834c0f0" ai.train(file_name, line_by_line=False, from_cache=False, num_steps=2000, generate_every=400, save_every=200, save_gdrive=True, output_dir=folder, learning_rate=1e-3, fp16=False, batch_size=1) # + [markdown] id="qQJgV_b4bmzd" # You're done! Feel free to go to the **Generate Text From The Trained Model** section to generate text based on your retrained model. # + [markdown] id="pel-uBULXO2L" # # ## Load a Trained Model # # If you already had a trained model from this notebook, running the next cell will copy the `pytorch_model.bin` and the `config.json`file from the specified folder in Google Drive into the Colaboratory VM. (If no `from_folder` is specified, it assumes the two files are located at the root level of your Google Drive) # + id="DCcx5u7sbPTD" #copy_file_from_gdrive('pytorch_model.bin', folder) #copy_file_from_gdrive('config.json', folder) # + [markdown] id="RTa6zf3e_9gV" # The next cell will allow you to load the retrained model + metadata necessary to generate text. # + [markdown] id="ClJwpF_ACONp" # ## Generate Text From The Trained Model # # After you've trained the model or loaded a retrained model from checkpoint, you can now generate text. # # **If you just trained a model**, you'll get much faster training performance if you reload the model; the next cell will reload the model you just trained from the `trained_model` folder. # + id="mSvHhTuHJc-Q" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1649690071673, "user_tz": -120, "elapsed": 2704, "user": {"displayName": "<NAME>\u0107", "userId": "09411250063536912562"}} outputId="ea22df5c-e18f-48d8-d520-31ef6402ef55" ai = aitextgen(model_folder=folder, to_gpu=True) # + [markdown] id="3cd0RGDbJiDp" # `generate()` without any parameters generates a single text from the loaded model to the console. # + id="4RNY6RBI9LmL" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1649690076273, "user_tz": -120, "elapsed": 4607, "user": {"displayName": "<NAME>\u0107", "userId": "09411250063536912562"}} outputId="9a962f13-011f-4220-a7a9-09ee494da904" ai.generate() # + [markdown] id="oF4-PqF0Fl7R" # If you're creating an API based on your model and need to pass the generated text elsewhere, you can do `text = ai.generate_one()` # # You can also pass in a `prompt` to the generate function to force the text to start with a given character sequence and generate text from there (good if you add an indicator when the text starts). # # You can also generate multiple texts at a time by specifing `n`. You can pass a `batch_size` to generate multiple samples in parallel, giving a massive speedup (in Colaboratory, set a maximum of 50 for `batch_size` to avoid going OOM). # # Other optional-but-helpful parameters for `ai.generate()` and friends: # # * **`min length`**: The minimum length of the generated text: if the text is shorter than this value after cleanup, aitextgen will generate another one. # * **`max_length`**: Number of tokens to generate (default 256, you can generate up to 1024 tokens with GPT-2 and 2048 with GPT Neo) # * **`temperature`**: The higher the temperature, the crazier the text (default 0.7, recommended to keep between 0.7 and 1.0) # * **`top_k`**: Limits the generated guesses to the top *k* guesses (default 0 which disables the behavior; if the generated output is super crazy, you may want to set `top_k=40`) # * **`top_p`**: Nucleus sampling: limits the generated guesses to a cumulative probability. (gets good results on a dataset with `top_p=0.9`) # + id="CxZfHlURP6Yx" prompts = ['Digital Forensics Analysis Report\n', 'This report is ', 'The contents of ', 'Conclusion\n', 'It is recommended that ', 'In the opinion of the expert, '] # + id="8DKMc0fiej4N" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1649690259843, "user_tz": -120, "elapsed": 183585, "user": {"displayName": "<NAME>\u0107", "userId": "09411250063536912562"}} outputId="d4f0f959-ba0c-44a5-dcb5-5f3dbdf7411b" for prompt in prompts: ai.generate(n=5, batch_size=1, prompt=prompt, max_length=1000, temperature=1.0, top_p=0.9) # + [markdown] id="zjjEN2Tafhl2" # For bulk generation, you can generate a large amount of texts to a file and sort out the samples locally on your computer. The next cell will generate `num_files` files, each with `n` texts and whatever other parameters you would pass to `generate()`. The files can then be downloaded from the Files sidebar! # # You can rerun the cells as many times as you want for even more generated texts! # + id="Fa6p6arifSL0" num_files = 0 for prompt in prompts: for _ in range(num_files): ai.generate_to_file(n=200, batch_size=1, prompt=prompt, max_length=2000, temperature=1.0, top_p=0.9) # + [markdown] id="wmTXWNUygS5E" # # LICENSE # # MIT License # # Copyright (c) 2020-2021 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.
notebooks/old/4/aitextgen-CCS-gpt-neo-125M-2000-a.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # First Order Equations import numpy as np import matplotlib.pyplot as plt # ## Definitions # # A [differential equation](https://en.wikipedia.org/wiki/Differential_equation) is an equation involving an unknown function $y(t)$ (with independent variable $t$) and its derivatives $y'$, $y''$, $y'''$, etc. The [order](https://en.wikipedia.org/wiki/Differential_equation#Equation_order) of a differential equation refers to the highest order derivative of the unknown function $y(t)$ appearing in the equation. A differential equation is [linear](https://en.wikipedia.org/wiki/Linear_differential_equation) if it is of the form # # $$ # a_n(t) y^{(n)} + a_{n_1}(t) y^{(n-1)} + \cdots + a_1(t) y' + a_0(t) y_0 = f(t) # $$ # # where $a_n, \dots, a_0, f$ are functions of the independent variable $t$ only. For example, the equation # # $$ # y'' + ty' + y^2 = t # $$ # # is second order non-linear, and the equation # # $$ # y' + ty = t^2 # $$ # # is first order linear. Most differential equations are *impossible to solve* explicitly however we can always use [numerical methods](https://en.wikipedia.org/wiki/Numerical_methods_for_ordinary_differential_equations) to approximate solutions. # ## Euler's Method # # The simplest numerical method for approximating solutions of differential equations is [Euler's method](https://en.wikipedia.org/wiki/Euler_method). Consider a first order differential equation with an initial condition: # # $$ # y' = f(y,y) \ , \ \ y(t_0)=y_0 # $$ # # The procedure for Euler's method is as follows: # # 1. Contruct the equation of the tangent line to the unknown function $y(t)$ at $t=t_0$: # # $$ # y = y(t_0) + f(y_0,t_0)(t - t_0) # $$ # # where $y'(t_0) = f(y_0,t_0)$ is the slope of $y(t)$ at $t=t_0$. # # 2. Use the tangent line to approximate $y(t)$ at a small time step $t_1 = t_0 + h$: # $$ # y_1 = y_0 + f(y_0,t_0)(t_1 - t_0) # $$ # where $y_1 \approx y(t_1)$. # # 3. Construct the tangent line at the point $(t_1,y_1)$ and repeat. # # The formula for Euler's method defines a recursive sequence: # # $$ # y_{n+1} = y_n + f(y_n,t_n)(t_{n+1} - t_n) \ , \ \ y_0 = y(t_0) # $$ # # where $y_n \approx y(t_n)$ for each $n$. If we choose equally spaced $t$ values then the formula becomes # # $$ # y_{n+1} = y_n + f(y_n,t_n)h \ \ , \ \ y_0 = y(t_0) \ , \ \ t_n = t_0 + nh # $$ # # with time step $h = t_{n+1} - t_n$. # # Note two very important things about Euler's method and numerical methods in general: # # * A smaller time step $h$ reduces the error in the approximation. # * A smaller time step $h$ requires more computations! # ## Implementation # # Let's write a function called `odeEuler` which takes 3 input parameters `f`, `y0` and `t` where: # # * `f` is a function of 2 variables which represents the right side of a first order differential equation `y' = f(y,t)` # * `t` is a 1D NumPy array of $t$ values where we are approximating $y$ values # * `y0` is an intial value $y(t_0)=y_0$ where $t_0$ is the entry at index 0 of the array `t` # # The function `odeEuler` returns a 1D NumPy array of $y$ values which approximate the solution $y(t)$ of the differential equation # # $$ # y' = f(y,t) \ , \ \ y(t_0)=y_0 # $$ # # by Euler's method. Notice that we don't specify a time step value $h$. Instead, the function `odeEuler` takes an array of $t$ values and returns $y$ values approximating the solution $y(t)$ by the formula # # $$ # y_{n+1} = y_n + f(y_n,t_n)(t_{n+1} - t_n) # $$ def odeEuler(f,y0,t): '''Approximate the solution of y'=f(y,t) by Euler's method. Parameters ---------- f : function Right-hand side of the differential equation y'=f(t,y), y(t_0)=y_0 y0 : number Initial value y(t0)=y0 wher t0 is the entry at index 0 in the array t t : array 1D NumPy array of t values where we approximate y values. Time step at each iteration is given by t[n+1] - t[n]. Returns ------- y : 1D NumPy array Approximation y[n] of the solution y(t_n) computed by Euler's method. ''' y = np.zeros(len(t)) y[0] = y0 for n in range(0,len(t)-1): y[n+1] = y[n] + f(y[n],t[n])*(t[n+1] - t[n]) return y # ## Examples # ### Exponential Equation # # Let's implement Euler's method to approximate solutions of $y' = y$ for $t \in [0,2]$. We know the solution is $y(t) = e^t$ in this case and so we can compare the approximation by Euler's method to the true solution. t = np.linspace(0,2,21) y0 = 1 f = lambda y,t: y y = odeEuler(f,y0,t) y_true = np.exp(t) plt.plot(t,y,'b.-',t,y_true,'r-') plt.legend(['Euler','True']) plt.axis([0,2,0,9]) plt.grid(True) plt.title("Solution of $y'=y , y(0)=1$") plt.show() # ### Non-Linear Equation # # Let's plot an approximation of $y'=y^2$ for $y(0)=-1$. We know the solution is # # $$ # y(t) = \frac{-1}{t+1} # $$ t = np.linspace(0,5,16) y0 = -1 f = lambda y,t: y**2 y = odeEuler(f,y0,t) t_true = np.linspace(0,5,100) y_true = -1/(t_true + 1) plt.plot(t,y,'r.-',t_true,y_true) plt.legend(['Euler','True']) plt.grid(True) plt.axis([0,5,-1,0]) plt.title("Solution of $y'=y^2 , y(0)=1$") plt.show() # ### Autonomous Equation # # Let's do an example where we know that it would be impossible to find the true solution. Let's approximate the solution of $y' = \sin(y^2)$ for each initial condition $y(0)=-3,-2.75,-2.5,...,2.5,2.75$ and plot all the results together. # # Note that $y'=0$ when $y=\pm \sqrt{k \pi}$ for $k=0,1,2,3,4,...$. These points are called [equilibrium points](https://en.wikipedia.org/wiki/Equilibrium_point) of the equation and represent steady state (or constant) solutions. # + t0 = 0; tf = 3; h = 0.1; f = lambda y,t: np.sin(y**2) t = np.arange(t0,tf+h,h) for y0 in np.arange(-3,3,0.25): y = odeEuler(f,y0,t) plt.plot(t,y,'b') for k in range(0,3): y_eq = np.sqrt(k*np.pi) plt.plot([t0,tf],[y_eq,y_eq],'r--') plt.plot([t0,tf],[-y_eq,-y_eq],'r--') plt.grid(True) plt.axis([0,3,-3,3.5]) plt.title("Equilibrium solutions of $y'=\sin(y^2)$") plt.show()
Python/3. Computational Sciences and Mathematics/Diff Eqs/First Order Equations ODEs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:miniconda3-geoml] # language: python # name: conda-env-miniconda3-geoml-py # --- # # Intro - NYC Taxi Example import geopandas as gpd import time, os gpd.options.use_pygeos=False # + # from geopandas_parallelise import paralellise # parallelise(gpd) # gpd.sjoin(left,right,op, workers=XX) # gdf.buffer_utm(XXX, workers=XX, unit='m') # - # set root path root = os.path.abspath(os.path.join(os.getcwd(),'..')) os.environ['PYTHONPATH'] = os.path.abspath(os.path.join(os.getcwd(),'..')) # + ### get some data # - # !wget -O $PYTHONPATH/data/zone_data.zip https://archive.nyu.edu/retrieve/77439/nyu_2451_36743.zip --no-check-certificate -q # !wget -O $PYTHONPATH/data/ride_data_apr.csv https://github.com/fivethirtyeight/uber-tlc-foil-response/blob/master/uber-trip-data/uber-raw-data-apr14.csv?raw=true -q # !wget -O $PYTHONPATH/data/ride_data_may.csv https://github.com/fivethirtyeight/uber-tlc-foil-response/blob/master/uber-trip-data/uber-raw-data-may14.csv?raw=true -q # !wget -O $PYTHONPATH/data/ride_data_jun.csv https://github.com/fivethirtyeight/uber-tlc-foil-response/blob/master/uber-trip-data/uber-raw-data-jun14.csv?raw=true -q # !wget -O $PYTHONPATH/data/ride_data_jul.csv https://github.com/fivethirtyeight/uber-tlc-foil-response/blob/master/uber-trip-data/uber-raw-data-jul14.csv?raw=true -q # !wget -O $PYTHONPATH/data/ride_data_aug.csv https://github.com/fivethirtyeight/uber-tlc-foil-response/blob/master/uber-trip-data/uber-raw-data-aug14.csv?raw=true -q # !wget -O $PYTHONPATH/data/ride_data_sep.csv https://github.com/fivethirtyeight/uber-tlc-foil-response/blob/master/uber-trip-data/uber-raw-data-sep14.csv?raw=true -q import pandas as pd rides = pd.concat([pd.read_csv(os.path.join(root,'data',f'ride_data_{month}.csv')) for month in ['apr','may','jun','jul','aug','sep']]) rides = gpd.GeoDataFrame(rides, geometry=gpd.points_from_xy(rides.Lon, rides.Lat)) zones = gpd.read_file('zip://'+os.path.join(root,'data','zone_data.zip')) # Spatial Join start_time = time.time() sjoined = gpd.sjoin(rides, zones, op="within") print("It takes %s seconds" % (time.time() - start_time))
tutorials/NYC_taxi_pygeos_trial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pydantic import BaseModel from typing import Callable # + class DummyDataSource(BaseModel): table: str class DummyConnector(BaseModel): host: str secrets_id: str get_secrets: Callable[[str], dict] def get_df(self, data_source: DummyDataSource): print('get_df for table', data_source.table) print('with secrets: ', self.get_secrets(self.secrets_id)) # + def get_dummy_secrets(secrets_id): if secrets_id == 'aaa': return {'token': '<PASSWORD>'} elif secrets_id == 'bbb': return {'token': '<PASSWORD>'} conn_config = {'host': '0.2.3.4', 'secrets_id': 'aaa'} conn = DummyConnector(get_secrets=get_dummy_secrets, **conn_config) ds = DummyDataSource(table='plop') conn.get_df(ds) # - DummyConnector.schema()
doc/Method forwarding in connectors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp core # - # # module name here # # > API details. #hide # %load_ext autoreload # %autoreload 2 #export from nbdev.showdoc import * import pandas as pd import logging import datetime import sys import dask.dataframe as dd import numpy as np import traceback from dask_ml import preprocessing as dask_preprocessing import dask.array as da, dask.dataframe as dd # !mkdir -p tmp # + #export def test_eq(a,b): assert a==b, f'{a}, {b}' from collections.abc import Sequence def _seq_but_not_str(obj): return isinstance(obj, Sequence) and not isinstance(obj, (str, bytes, bytearray)) def listify(obj): if _seq_but_not_str(obj): return obj return [obj] def test_in(items, target): items = listify(items) missing = [item for item in items if item not in target] assert len(missing) == 0, f'{missing} are not in {target}' def test_err(f, expected_message_part = None): try: f() except Exception as e: if not expected_message_part or expected_message_part in str(e): return else: raise ValueError(f"Expected different error to be thrown: {expected_message_part}") raise ValueError("Expected error to be thrown") # - test_in('a', ['a', 'b', 'c']) test_in(['b', 'c'], ['a', 'b', 'c']) #export def configure_logging(log_dir, log_name, log_lvl='DEBUG', con_log_lvl='INFO', date_format='%Y-%m-%d %H:%M:%S'): class IndentAdapter(logging.LoggerAdapter): def __init__(self, indent_start, indent_char, logger, extra): super().__init__(logger, extra) self.indent_start = indent_start self.indent_char = indent_char def indent(self): indentation_level = len(traceback.extract_stack()) return indentation_level-self.indent_start-3 # indent + process + adapter call def process(self, msg, kwargs): return '{i}{m}'.format(i=self.indent_char*self.indent(), m=msg), kwargs log = logging.getLogger('root') already_initialized = any(filter(lambda h: isinstance(h, logging.StreamHandler), log.handlers)) if already_initialized: print("Logging already initialized") return logging.getLogger('root') numeric_level = getattr(logging, log_lvl, None) log_format = '%(levelname)5s [%(asctime)s] %(name)s: %(message)s' logging.basicConfig( filename=f'{log_dir}/{log_name}_{datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S")}.txt', level=numeric_level, format=log_format, datefmt=date_format) log = logging.getLogger('root') ch = logging.StreamHandler() ch.setLevel(getattr(logging, con_log_lvl, None)) ch.setFormatter(logging.Formatter(log_format, date_format)) curr_indent = len(traceback.extract_stack()) res = IndentAdapter(curr_indent, ' ', log, extra={}) log.addHandler(ch) return res log = configure_logging('./tmp', 'test_log', con_log_lvl='DEBUG', date_format='XXXX-XX-XX XX:XX:XX') # + def _test(): log.debug('in test') def _test2(): log.debug('in test2') _test2() _test() log.debug('Interactive cell log') # - #export def setup_dataframe_copy_logging(log, threshold_mb): if not '_original_copy' in dir(pd.DataFrame): log.debug('Patching up DataFrame.copy') pd.DataFrame._original_copy = pd.DataFrame.copy else: log.debug('Patching up DataFrame.copy :: already done - skipping.') def _loud_copy(self, deep=True): size_mb = sys.getsizeof(self) / 1024 / 1024 if size_mb >= threshold_mb: log.debug(f'Copying {size_mb:.1f} MiB (deep={deep})') return pd.DataFrame._original_copy(self, deep) pd.DataFrame.copy = _loud_copy setup_dataframe_copy_logging(log, threshold_mb=50) df = pd.DataFrame({'a':[1,2,3]}) df2 = df.copy() #export n_total_series = 30490 n_days_total = 1913 raw_dir = 'raw' #export def read_series_sample(log, n): df = dd.read_csv( f'{raw_dir}/sales_train_validation.csv' ).sample(frac = n / n_total_series) log.debug(f"Read {len(df)} series") return df sample = read_series_sample(log, 13) test_eq(13, len(sample)) #export def melt_sales_series(df_sales_train): id_columns = [col for col in df_sales_train.columns if 'id' in col] sales_columns = [col for col in df_sales_train.columns if 'd_' in col] cat_columns = [col for col in id_columns if col != 'id'] df_sales_train_melt = df_sales_train.melt( id_vars=id_columns, var_name='day_id', value_name='sales' ) df_sales_train_melt['sales'] = df_sales_train_melt['sales'].astype('int16') return df_sales_train_melt sample_melt = melt_sales_series(sample) test_eq(n_days_total * 13, len(sample_melt)) #export def extract_day_ids(df_sales_train_melt): sales_columns = [f'd_{col}' for col in range(1, n_days_total+1)] mapping = {col: int(col.split('_')[1]) for col in sales_columns} df_sales_train_melt['day_id'] = df_sales_train_melt['day_id'].map(mapping) import datetime d_1_date = pd.to_datetime('2011-01-29') mapping = {day:(d_1_date + datetime.timedelta(days=day-1)) for day in range(1, n_days_total+1)} df_sales_train_melt['day_date'] = df_sales_train_melt['day_id'].map(mapping) mapping = {day:str((d_1_date + datetime.timedelta(days=day-1)).date()) for day in range(1, n_days_total+1)} # gonna need it for joining with calendars & stuff df_sales_train_melt['day_date_str'] = df_sales_train_melt['day_id'].map(mapping) df_sales_train_melt['day_id'] = df_sales_train_melt['day_id'].astype('int16') df_sales_train_melt['month_id'] = df_sales_train_melt['day_date'].dt.month.astype('uint8') return df_sales_train_melt sample_melt = extract_day_ids(sample_melt) sample_melt.columns test_eq(n_days_total * 13, len(sample_melt)) first_row = sample_melt.head(1) first_row test_in(['day_date', 'day_date_str', 'day_id', 'month_id'], first_row.columns) test_eq('2011-01-29', first_row.loc[0, 'day_date_str']) test_eq(1, first_row.loc[0, 'day_id']) test_eq(1, first_row.loc[0, 'month_id']) #export def join_w_calendar(df_sales_train_melt, raw_dir): df_calendar = pd.read_csv(f'{raw_dir}/calendar.csv') df_calendar_melt = df_calendar.melt( id_vars=['date', 'wm_yr_wk', 'weekday', 'wday', 'year', 'd', 'event_name_1', 'event_name_2', 'event_type_1', 'event_type_2'], value_name='snap_flag', var_name='state_id', value_vars=['snap_CA', 'snap_TX', 'snap_WI'] ) df_calendar_melt['snap_flag'] = df_calendar_melt['snap_flag'].astype('uint8') df_calendar_melt['state_id'] = df_calendar_melt['state_id'].str.split('_').str[1] df_sales_train_melt = df_sales_train_melt.merge( df_calendar_melt[['date', 'state_id', 'wm_yr_wk', 'snap_flag']], left_on=['day_date_str', 'state_id'], right_on=['date', 'state_id'], # TODO: dask does not seem to support these validate='many_to_one' ) df_sales_train_melt['wm_yr_wk'] = df_sales_train_melt['wm_yr_wk'].astype('int16') return df_sales_train_melt sample_melt = join_w_calendar(sample_melt, raw_dir) sample_melt.head(1) # TODO: test_not_in ('date') == dup of day_date_str test_in(['wm_yr_wk', 'snap_flag'], sample_melt.columns) #export def join_w_prices(partition, raw_dir): df_prices = pd.read_csv(f'{raw_dir}/sell_prices.csv') partition = partition.merge( df_prices, on=['store_id', 'item_id', 'wm_yr_wk'], how='left' ) partition['sell_price'] = partition['sell_price'].astype('float32') partition['sales_dollars'] = (partition['sales'] * partition['sell_price']).astype('float32') partition = partition.fillna({'sales_dollars': 0} # TODO: doesn't seem to be supported by dask, inplace=True ) return partition sample_melt = join_w_prices(sample_melt, raw_dir).persist() sample_melt.head(1) test_in(['sell_price', 'sales_dollars'], sample_melt.columns) # + def _reproduce_warn_generating_encoder(): c1 = da.concatenate([ da.from_array( np.array(['c', 'a', 'b']) ), da.from_array( np.array(['c', 'a', 'b']) ), ]) enc = dask_preprocessing.LabelEncoder() c2 = enc.fit_transform(c1) return enc enc = _reproduce_warn_generating_encoder() np.save('./tmp/encoder.npy', enc.classes_) # - # export def save_encoder(enc, path): classes_ = enc.classes_ if da.core.Array == type(enc.classes_): classes_ = classes_.compute() np.save(path, classes_) save_encoder(enc, path='./tmp/encoder.npy') #export def to_parquet(sales_series, file_name, processed_dir, LOG): LOG.debug('Setting index') sales_series = sales_series.set_index(sales_series['id']) LOG.debug('Setting index - done') encoders = {} # TODO: dask supposedly does this on its own with sensible defaults # sales_series['parquet_partition'] = np.random.randint(0, 100, sales_series.shape[0]) # this one is a dup of day_date_str which is harder to squeeze through the rest of the pipeline (yay petastorm) if 'day_date' in sales_series.columns: LOG.debug(f"Dropping 'day_date' from {sales_series.columns}") sales_series = sales_series.drop(['day_date'], axis=1) for col in sales_series.columns: if col in encoders: LOG.debug(f'Skipping: {col} - already encoded') continue # petastorm can't read these if str(sales_series[col].dtype) == 'uint8': sales_series[col] = sales_series[col].astype('int') if str(sales_series[col].dtype) in ['category', 'object']: LOG.debug(f'Encoding: {col}') enc = dask_preprocessing.LabelEncoder() #enc = LabelEncoder() sales_series[col] = enc.fit_transform(sales_series[col]) # TODO: update other transforms too! encoders[col] = enc for name, enc in encoders.items(): LOG.debug(f"Saving encoder: {name}") save_encoder(enc, f'{processed_dir}/{name}.npy') # TODO: uint -> int, category/object -> int, day_date -> drop # TODO: this is being called both on dask and pandas data frames and args are rather not compatible :/ parquet_file = f'{processed_dir}/{file_name}' LOG.debug(f"Saving {type(sales_series)} to {parquet_file}") kwargs = {} is_pandas_df = type(sales_series) == pd.DataFrame index_kwarg_name = 'index' if is_pandas_df else 'write_index' kwargs[index_kwarg_name] = False sales_series.to_parquet( parquet_file, **kwargs # partition_cols=['parquet_partition'] ) to_parquet(sample_melt, 'sample', './tmp', log) pd_df = sample_melt.compute() test_eq(pd.DataFrame, type(pd_df)) to_parquet(sample_melt, 'sample_pandas', './tmp', log) # TODO: check if these can be read back well with a sibling func [f'{col}:{sample_melt[col].dtype}' for col in sample_melt.columns] sample_melt['id'].value_counts().compute() # export def extract_id_columns(t): extracted = t['id'].str.extract('([A-Z]+)_(\\d)_(\\d{3})_([A-Z]{2})_(\d)') t['cat_id'] = extracted[0] t['dept_id'] = t['cat_id'] + '_' + extracted[1] t['item_id'] = t['cat_id'] + '_' + extracted[2] t['state_id'] = extracted[3] t['store_id'] = t['state_id'] + '_' + extracted[4] return t # export from datetime import timedelta def get_submission_template_melt(raw, d_1_date=pd.to_datetime('2016-06-20')): df_sample_submission = pd.read_csv(f'{raw}/sample_submission.csv') mapping = {f'F{day}':(d_1_date + timedelta(days=day-1)).date() for day in range(1,29)} mapping['id'] = 'id' df_sample_submission.columns = df_sample_submission.columns.map(mapping) df_sample_submission_melt = df_sample_submission.melt(id_vars='id', var_name='day', value_name='sales') last_prices = pd.read_csv(f'{raw}/sell_prices.csv') max_week = last_prices['wm_yr_wk'].max() last_prices = last_prices.query('wm_yr_wk == @max_week').copy() last_prices['id'] = last_prices['item_id'] + '_' + last_prices['store_id'] last_prices_v = last_prices.copy() last_prices_e = last_prices last_prices_e['id'] = last_prices_e['id'] + '_evaluation' last_prices_v['id'] = last_prices_v['id'] + '_validation' last_prices = pd.concat([last_prices_e, last_prices_v], axis=0)[['id', 'sell_price']] df_sample_submission_melt = df_sample_submission_melt.merge( last_prices, on='id', how='left', validate='many_to_one') df_sample_submission_melt = extract_id_columns(df_sample_submission_melt) df_sample_submission_melt.drop(['sales'], axis=1, inplace=True) df_sample_submission_melt.rename({'day': 'date'}, axis=1, inplace=True) return df_sample_submission_melt submission_template = get_submission_template_melt(raw='raw') test_eq(pd.to_datetime('2016-06-20').date(), submission_template['date'].min()) test_eq(2*30490, len(submission_template['id'].unique()))
00_core.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Training Data # + # Lab 5 Logistic Regression Classifier import tensorflow as tf tf.set_random_seed(777) # for reproducibility x_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]] y_data = [[0], [0], [0], [1], [1], [1]] # - # setting placeholder, variables X = tf.placeholder(tf.float32, shape=[None, 2]) Y = tf.placeholder(tf.float32, shape=[None, 1]) W = tf.Variable(tf.random_normal([2, 1]), name='weight') b = tf.Variable(tf.random_normal([1]), name='bias') # build a model # + # Hypothesis using sigmoid: tf.div(1., 1. + tf.exp(tf.matmul(X, W))) hypothesis = tf.sigmoid(tf.matmul(X, W) + b) cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis)) train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost) predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32) accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32)) # - # train a model with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for step in range(10001): cost_val, _ = sess.run([cost, train], feed_dict={X: x_data, Y: y_data}) if step % 200 == 0: print(step, cost_val) # accuracy report h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict={X: x_data, Y: y_data}) print("\nhypothesis: ", h, "\nCorrect(Y): ", c, "\nAccuracy: ", a)
tf/gdgkr/lab5-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # импортируем программные библиотеки from osgeo import gdal import matplotlib.pyplot as plt import matplotlib as mpl import numpy as np #подгрузка вероятностей, землепользования на 1770 и пустого файла для лэнд юза dataset = gdal.Open('Probability/lu1860.asc') dataset0 = gdal.Open('Probability/P0.asc') dataset1 = gdal.Open('Probability/P1.asc') dataset2 = gdal.Open('Probability/P2.asc') dataset3 = gdal.Open('Probability/P3.asc') dataset4 = gdal.Open('Probability/Empty.asc') dachi_id = gdal.Open('Probability/Drivers/dachi_id.asc').ReadAsArray() #determining raster size X=dataset0.RasterXSize Y=dataset0.RasterYSize # Матрица чисел dataset.ReadAsArray() LU1860=dataset.ReadAsArray() #открытие растров P0=dataset0.ReadAsArray() P1=dataset1.ReadAsArray() P2=dataset2.ReadAsArray() P3=dataset3.ReadAsArray() LU=dataset4.ReadAsArray() #ввод E0, E1, E2, E3 iter0=0 iter1=0 iter2=0 iter3=0 count=0 #ввод эластичностей E0=0.2177815772600983 E1=0.43029512473503995 E2=0.24595155709342562 E3=3.6788990825688074 #добавление эластичностей в пикселях с опр. LU (поощряет сохранение в старых границах) for i in range(0, Y): for j in range(0, X): if P0[i][j]!=-99999: if LU[i][j]==0: P0[i][j]+=E0 if LU[i][j]==1: P1[i][j]+=E1 if LU[i][j]==2: P2[i][j]+=E2 if LU[i][j]==3: P3[i][j]+=E3 else: continue #по очереди обращаемся к каждой даче с её спросом dachas=[869, 856, 859, 860, 861, 862, 865, 866, 867, 868, 870, 872, 873, 874, 875, 877, 880, 881, 882] demands=[0, 0, 0, 0, 0, 0.145, 0.1335, 0, 0, 0.033, 0.1054, 0, 0, 0, 0.078, 0.133, 0, 0.0413, 0] L=len(dachas) for N in range(0, L): breaker=0 while breaker==0: dachasN=dachas[N] for i in range(0, Y): for j in range(0, X): if dachi_id[i][j]==dachasN: #добавление итерационного параметрa if P0[i][j]!=-99999: P0[i][j]+=iter0 P1[i][j]+=iter1 P2[i][j]+=iter2 P3[i][j]+=iter3 #первичное распределение лэнд юза if P0[i][j]!=-99999: PG=max(P0[i][j],P1[i][j],P2[i][j],P3[i][j]) if P1[i][j]>=PG: LU[i][j]=1 else: LU[i][j]=4 else: LU[i][j]=-99999 if LU1860[i][j]==5: LU[i][j]=5 #подсчёт доли пикселей от общего LU uses=0 others=0 fields=0 for i in range(0, Y): for j in range(0, X): if dachi_id[i][j]==dachasN: if P0[i][j]!=-99999: uses+=1 if LU[i][j]==1: fields+=1 elif LU[i][j]==4: others+=1 print ('Fields =', fields) print ('Others =', others) #определение разницы с демандом #ввод demand0, demand1, demand2, demand3 demand1=demands[N] diff1=demand1-fields/uses #выведем разницы print ('diff1 =', diff1) check1=0 if diff1>0.01 or diff1<-0.01: #число из головы (1%) if diff1>0.01: iter1=diff1*0.5 else: iter1=diff1 #чтобы не получалось одинаковых "перескоков" else: check1=1 #находим подходящее распределение if check1==1: print ('success for region', dachasN) breaker=1 else: print ('one more') #Если что-то доделать надо (маленький регион, однородное поле вероятности) dachas=[871] demands=[0.2085] L=len(dachas) for N in range(0, L): breaker=0 while breaker==0: dachasN=dachas[N] for i in range(0, Y): for j in range(0, X): if dachi_id[i][j]==dachasN: #добавление итерационного параметрa if P0[i][j]!=-99999: P0[i][j]+=iter0 P1[i][j]+=iter1 P2[i][j]+=iter2 P3[i][j]+=iter3 #первичное распределение лэнд юза if P0[i][j]!=-99999: PG=max(P0[i][j],P1[i][j],P2[i][j],P3[i][j]) if P1[i][j]>=PG: LU[i][j]=1 else: LU[i][j]=4 else: LU[i][j]=-99999 if LU1860[i][j]==5: LU[i][j]=5 #подсчёт доли пикселей от общего LU uses=0 others=0 fields=0 for i in range(0, Y): for j in range(0, X): if dachi_id[i][j]==dachasN: if P0[i][j]!=-99999: uses+=1 if LU[i][j]==1: fields+=1 elif LU[i][j]==4: others+=1 print ('Fields =', fields) print ('Others =', others) #определение разницы с демандом #ввод demand0, demand1, demand2, demand3 demand1=demands[N] diff1=demand1-fields/uses #выведем разницы print ('diff1 =', diff1) check1=0 if diff1>0.01 or diff1<-0.01: #число из головы (1%) if diff1>0.01: iter1=diff1*0.02 else: iter1=diff1*0.2 #чтобы не получалось одинаковых "перескоков" else: check1=1 #находим подходящее распределение if check1==1: print ('success for region', dachasN) breaker=1 else: print ('one more') #импорт созданного грида (без привязки!) #координаты добавлены вручную np.savetxt('C:/Users/olegz/Desktop/CLUE/White Lake project/Predicted/Predicted_by_dacha.asc', LU, fmt='%4.1f', comments='', header='NCOLS %s \nNROWS %s \nXLLCORNER 569875.0000000000 \nYLLCORNER 6120445.0000000000 \nCELLSIZE 30.000000 \nNODATA_VALUE -99999.0'%(X, Y)) #подготовка к быстрой визуализации в Питоне #убираем nodata Dachi = gdal.Open('C:/Users/olegz/Desktop/CLUE/White Lake project/Predicted/Predicted_by_dacha.asc').ReadAsArray() Dachi[Dachi == -99999.] = np.nan dataset18 = gdal.Open('Probability/Predicted16.asc') General=dataset18.ReadAsArray() Full_time = gdal.Open('C:/Users/olegz/Desktop/CLUE/White Lake project/Predicted/Predicted_full-time_arable.asc').ReadAsArray() Full_time[Full_time == -99999.] = np.nan General[General == -99999.] = np.nan # + C=[[0.95, 0.95, 0.65],[0.65, 0.9, 0.3],[0.3, 0.55, 0.2], [0.3, 0.55, 0.2], [0.7, 0.82, 0.8]] cm = mpl.colors.ListedColormap(C) mpl.rcParams['figure.figsize'] = (30, 30) plt.subplot(231) plt.imshow(General, cmap=cm) plt.axis('off') plt.subplot(232) plt.imshow(Dachi, cmap=cm) plt.axis('off') plt.subplot(233) plt.imshow(Full_time, cmap=cm) plt.axis('off') # -
Whitelake by dachas for Khitrov.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Regression with DLT # In this notebook, we want to demonstartate how to use different arguments in **DLT** to train a model with various regression settings. We continue to use *iclaims* data for the demo purpose: # # 1. regular regression # 2. regression with specific signs and priors for regression coefficients # # Finally, we will also use a simulated dataset to illustrate different types of regression penalties: # # 1. `fixed-ridge` # 2. `auto-ridge` # 3. `lasso` # # Generally speaking, regression coefficients are more robust under full Bayesian sampling and estimation. Hence, we will use `estimator=stan-mcmc` (the default) in the session. # + # %matplotlib inline import matplotlib.pyplot as plt import orbit from orbit.utils.dataset import load_iclaims from orbit.models import DLT from orbit.diagnostics.plot import plot_predicted_data from orbit.constants.palette import OrbitPalette # - print(orbit.__version__) # + [markdown] toc-hr-collapsed=false # ## US Weekly Initial Claims # - # Recall the *iclaims* dataset by previous section. In order to use this data to nowcast the US unemployment claims during COVID-19 period, we extended the dataset to Jan 2021 and added the [S&P 500 (^GSPC)](https://finance.yahoo.com/quote/%5EGSPC/history?period1=1264032000&period2=1611187200&interval=1wk&filter=history&frequency=1wk&includeAdjustedClose=true) and [VIX](https://finance.yahoo.com/quote/%5EVIX/history?p=%5EVIX) Index historical data for the same period. # # The data is standardized and log-transformed for the model fitting purpose. # load data df = load_iclaims(end_date='2021-01-03') date_col = 'week' response_col = 'claims' df.dtypes df.head(5) # We can see form the plot below, there are seasonality, trend, and as well as a huge changpoint due the impact of COVID-19. fig, axs = plt.subplots(2, 2,figsize=(20,8)) axs[0, 0].plot(df['week'], df['claims']) axs[0, 0].set_title('Unemployment Claims') axs[0, 1].plot(df['week'], df['trend.unemploy'], 'tab:orange') axs[0, 1].set_title('Google trend - unemploy') axs[1, 0].plot(df['week'], df['vix'], 'tab:green') axs[1, 0].set_title('VIX') axs[1, 1].plot(df['week'], df['sp500'], 'tab:red') axs[1, 1].set_title('S&P500') # using relatively updated data df = df[df['week'] > '2018-01-01'].reset_index(drop=True) test_size = 26 train_df = df[:-test_size] test_df = df[-test_size:] # ### Naive Model # Here we will use DLT models to compare the model performance with vs. without regression. # %%time dlt = DLT( response_col=response_col, date_col=date_col, seasonality=52, seed=8888, num_warmup=4000, ) dlt.fit(df=train_df) predicted_df = dlt.predict(df=test_df) # + [markdown] toc-hr-collapsed=false # ### DLT With Regression # - # The regressor columns can be supplied via argument `regressor_col`. Recall the regression formula in **DLT**: # # $$ # \hat{y}_t =\mu_t + s_t + r_t \\ # r_t = \sum_{j}\beta_j x_{jt} \\ # \beta_j ~\sim \mathcal{N}(\mu_j, \sigma_j^2) # $$ # # Let's use the default where $\mu_j = 0$ and $\sigma_j = 1$. In addition, we can set a *sign* constraint for each coefficient $\beta_j$. This is can be done by supplying the `regressor_sign` as a list where elements are in one of followings: # # * '=': $\beta_j ~\sim \mathcal{N}(0, \sigma_j^2)$ i.e. $\beta_j \in (-\inf, \inf)$ # * '+': $\beta_j ~\sim \mathcal{N}^+(0, \sigma_j^2)$ i.e. $\beta_j \in [0, \inf)$ # * '-': $\beta_j ~\sim \mathcal{N}^-(0, \sigma_j^2)$ i.e. $\beta_j \in (-\inf, 0]$ # # Based on some intuition, it's reasonable to assume search terms such as "unemployment", "filling" and **VIX** index to be positively correlated and stock index such as **SP500** to be negatively correlated to the outcome. Then we will leave whatever unsured as a regular regressor. # %%time dlt_reg = DLT( response_col=response_col, date_col=date_col, regressor_col=['trend.unemploy', 'trend.filling', 'trend.job', 'sp500', 'vix'], regressor_sign=["+", '+', '=', '-', '+'], seasonality=52, seed=8888, num_warmup=4000, ) dlt_reg.fit(df=train_df) predicted_df_reg = dlt_reg.predict(test_df) # The estimated regressor coefficients can be retrieved via `.get_regression_coefs()`. dlt_reg.get_regression_coefs() # ### DLT with Regression and Informative Priors # Assuming users obtain further knowledge on some of the regressors, they could use informative priors ($\mu$, $\sigma$) by replacing the defaults. This can be done via the arguments `regressor_beta_prior` and `regressor_sigma_prior`. These two lists should be of the same lenght as `regressor_col`. dlt_reg_adjust = DLT( response_col=response_col, date_col=date_col, regressor_col=['trend.unemploy', 'trend.filling', 'trend.job', 'sp500', 'vix'], regressor_sign=["+", '+', '=', '-', '+'], regressor_beta_prior=[0.5, 0.25, 0.07, -0.3, 0.03], regressor_sigma_prior=[0.1] * 5, seasonality=52, seed=8888, num_warmup=4000, ) dlt_reg_adjust.fit(df=train_df) predicted_df_reg_adjust = dlt_reg_adjust.predict(test_df) dlt_reg_adjust.get_regression_coefs() # Let's compare the holdout performance by using the built-in function `smape()` . # + import numpy as np from orbit.diagnostics.metrics import smape # to reverse the log-transformation def smape_adjusted(x, y): x = np.exp(x) y = np.exp(y) return smape(x, y) naive_smape = smape_adjusted(predicted_df['prediction'].values, test_df['claims'].values) reg_smape = smape_adjusted(predicted_df_reg['prediction'].values, test_df['claims'].values) reg_adjust_smape = smape_adjusted(predicted_df_reg_adjust['prediction'].values, test_df['claims'].values) print('Naive Model: {:.3f}\nRegression Model: {:.3f}\nRefined Regression Model: {:.3f}'.format( naive_smape, reg_smape, reg_adjust_smape )) # - # ## Regression on Simulated Dataset # # Let's use a simulated dateset to demonstrate sparse regression. import pandas as pd from orbit.utils.simulation import make_trend, make_regression from orbit.diagnostics.metrics import mse # We have developed a few utilites to generate simulated data. For details, please refer to our API doc. In brief, we are generating observations $y$ such that # # $$y_t = l_t + r_t $$ # # where # # $$r_t = \sum_p^{P} \beta_p x_{p,t}$$ # ### Regular Regression # # Let's start with a small number of regressors with $P=10$ and $T=100$. NUM_OF_REGRESSORS = 10 SERIES_LEN = 50 SEED = 20210101 # sample some coefficients COEFS = np.random.default_rng(SEED).uniform(-1, 1, NUM_OF_REGRESSORS) trend = make_trend(SERIES_LEN, rw_loc=0.01, rw_scale=0.1) x, regression, coefs = make_regression(series_len=SERIES_LEN, coefs=COEFS) print(regression.shape, x.shape) # combine trend and the regression y = trend + regression x_cols = [f"x{x}" for x in range(1, NUM_OF_REGRESSORS + 1)] response_col = "y" dt_col = "date" obs_matrix = np.concatenate([y.reshape(-1, 1), x], axis=1) # make a data frame for orbit inputs df = pd.DataFrame(obs_matrix, columns=[response_col] + x_cols) # make some dummy date stamp dt = pd.date_range(start='2016-01-04', periods=SERIES_LEN, freq="1W") df['date'] = dt df.shape # Let's take a peek on the coefficients. coefs # Now, let's run a regression with the defaults where we have constant `regressor_sigma_prior` and `regression_penalty` set as `fixed-ridge`. # #### Fixed Ridge Penalty # %%time dlt_fridge = DLT( response_col=response_col, date_col=dt_col, regressor_col=x_cols, seed=SEED, # this is default regression_penalty='fixed_ridge', # fixing the smoothing parameters to learn regression coefficients more effectively level_sm_input=0.01, slope_sm_input=0.01, num_warmup=4000, ) dlt_fridge.fit(df=df) coef_fridge = np.quantile(dlt_fridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 ) lw=3 idx = np.arange(NUM_OF_REGRESSORS) plt.figure(figsize=(20, 8)) plt.title("Weights of the model", fontsize=20) plt.plot(idx, coef_fridge[1], color=OrbitPalette.GREEN.value, linewidth=lw, drawstyle='steps', label='Fixed-Ridge', alpha=0.5, linestyle='--') plt.fill_between(idx, coef_fridge[0], coef_fridge[2], step='pre', alpha=0.3, color=OrbitPalette.GREEN.value) plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth") plt.ylim(1, -1) plt.legend(prop={'size': 20}) plt.grid() # We can also set the `regression_penalty` to be `auto-ridge` in case we are sure what to set for the `regressor_sigma_prior`. # #### Auto-Ridge Penalty # Instead of using fixed scale in the coefficients prior, a hyperprior can be assigned to them, i.e. # # $$ \sigma_j \sim \text{Cauchy}^{+} {(0, \alpha)} $$ # # This can be done by setting `regression_penalty="auto_ridge"` with the argument `auto_ridge_scale` (default of `0.5`) set the hyperprior $\alpha$. We can also supply stan config such as `adapt_delta` to reduce divergence. Check the [here](https://mc-stan.org/rstanarm/reference/adapt_delta.html) for details of `adapt_delta`. # %%time dlt_auto_ridge = DLT( response_col=response_col, date_col=dt_col, regressor_col=x_cols, seed=SEED, # this is default regression_penalty='auto_ridge', # fixing the smoothing parameters to learn regression coefficients more effectively level_sm_input=0.01, slope_sm_input=0.01, num_warmup=4000, # reduce divergence stan_mcmc_control={'adapt_delta':0.9}, ) dlt_auto_ridge.fit(df=df) coef_auto_ridge = np.quantile(dlt_auto_ridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 ) lw=3 idx = np.arange(NUM_OF_REGRESSORS) plt.figure(figsize=(20, 8)) plt.title("Weights of the model", fontsize=24) plt.plot(idx, coef_auto_ridge[1], color=OrbitPalette.GREEN.value, linewidth=lw, drawstyle='steps', label='Auto-Ridge', alpha=0.5, linestyle='--') plt.fill_between(idx, coef_auto_ridge[0], coef_auto_ridge[2], step='pre', alpha=0.3, color=OrbitPalette.GREEN.value) plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth") plt.ylim(1, -1) plt.legend(prop={'size': 20}) plt.grid(); print('Fixed Ridge MSE:{:.3f}\nAuto Ridge MSE:{:.3f}'.format( mse(coef_fridge[1], coefs), mse(coef_auto_ridge[1], coefs) )) # ### Sparse Regrssion # # Now, let's move to a challenging problem with a much higher $P$ to $N$ ratio with a sparsity specified by the parameter `relevance=0.5` under the simulation process. NUM_OF_REGRESSORS = 50 SERIES_LEN = 50 SEED = 20210101 COEFS = np.random.default_rng(SEED).uniform(0.3, 0.5, NUM_OF_REGRESSORS) SIGNS = np.random.default_rng(SEED).choice([1, -1], NUM_OF_REGRESSORS) # to mimic a either zero or relative observable coefficients COEFS = COEFS * SIGNS trend = make_trend(SERIES_LEN, rw_loc=0.01, rw_scale=0.1) x, regression, coefs = make_regression(series_len=SERIES_LEN, coefs=COEFS, relevance=0.5) print(regression.shape, x.shape) # generated sparsed coefficients coefs # combine trend and the regression y = trend + regression x_cols = [f"x{x}" for x in range(1, NUM_OF_REGRESSORS + 1)] response_col = "y" dt_col = "date" obs_matrix = np.concatenate([y.reshape(-1, 1), x], axis=1) # make a data frame for orbit inputs df = pd.DataFrame(obs_matrix, columns=[response_col] + x_cols) # make some dummy date stamp dt = pd.date_range(start='2016-01-04', periods=SERIES_LEN, freq="1W") df['date'] = dt df.shape # ### Fixed Ridge Penalty dlt_fridge = DLT( response_col=response_col, date_col=dt_col, regressor_col=x_cols, seed=SEED, level_sm_input=0.01, slope_sm_input=0.01, num_warmup=8000, ) dlt_fridge.fit(df=df) coef_fridge = np.quantile(dlt_fridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 ) lw=3 idx = np.arange(NUM_OF_REGRESSORS) plt.figure(figsize=(20, 8)) plt.title("Weights of the model", fontsize=24) plt.plot(coef_fridge[1], color=OrbitPalette.GREEN.value, linewidth=lw, drawstyle='steps', label="Ridge", alpha=0.5, linestyle='--') plt.fill_between(idx, coef_fridge[0], coef_fridge[2], step='pre', alpha=0.3, color=OrbitPalette.GREEN.value) plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth") plt.legend(prop={'size': 20}) plt.grid(); # #### LASSO Penalty # # In high $P$ to $N$ problems, *LASS0* penalty usually shines compared to *Ridge* penalty. dlt_lasso = DLT( response_col=response_col, date_col=dt_col, regressor_col=x_cols, seed=SEED, regression_penalty='lasso', level_sm_input=0.01, slope_sm_input=0.01, num_warmup=8000, ) dlt_lasso.fit(df=df) coef_lasso = np.quantile(dlt_lasso._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 ) lw=3 idx = np.arange(NUM_OF_REGRESSORS) plt.figure(figsize=(20, 8)) plt.title("Weights of the model", fontsize=24) plt.plot(coef_lasso[1], color=OrbitPalette.GREEN.value, linewidth=lw, drawstyle='steps', label="Lasso", alpha=0.5, linestyle='--') plt.fill_between(idx, coef_lasso[0], coef_lasso[2], step='pre', alpha=0.3, color=OrbitPalette.GREEN.value) plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth") plt.legend(prop={'size': 20}) plt.grid(); print('Fixed Ridge MSE:{:.3f}\nLASSO MSE:{:.3f}'.format( mse(coef_fridge[1], coefs), mse(coef_lasso[1], coefs) ))
docs/tutorials/regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: .venv # language: python # name: .venv # --- # + tags=[] import numpy as np # X = Blocked, O = Available grid = np.array([ ['O','O','O','O','O'], ['X','O','O','O','X'], ['X','O','O','O','X'], ['O','O','X','O','O'], ['O','X','O','O','X'] ]) # Grid width/height (assuming grid is equal width and height), used later to iterate through neightbours grid_size = len(grid[0]) # - # Start in bottom left, finish in top right start = (grid_size-1, 0) finish = (0, grid_size-1) print(finish) # Let the node at which we are starting be called the '''initial node'''. Let the '''distance of node ''Y''''' be the distance from the '''initial node''' to ''Y''. Dijkstra's algorithm will assign some initial distance values and will try to improve them step by step. # # 1. Mark all nodes unvisited. Create a set of all the unvisited nodes called the ''unvisited set''. # 1. Assign to every node a tentative distance value: set it to zero for our initial node and to infinity for all other nodes. Set the initial node as current. # 1. For the current node, consider all of its unvisited neighbours and calculate their ''tentative'' distances through the current node. Compare the newly calculated ''tentative'' distance to the current assigned value and assign the smaller one. For example, if the current node ''A'' is marked with a distance of 6, and the edge connecting it with a neighbour ''B'' has length 2, then the distance to ''B'' through ''A'' will be 6 + 2 = 8. If B was previously marked with a distance greater than 8 then change it to 8. Otherwise, the current value will be kept. # 1. When we are done considering all of the unvisited neighbours of the current node, mark the current node as visited and remove it from the ''unvisited set''. A visited node will never be checked again. # 1. If the destination node has been marked visited (when planning a route between two specific nodes) or if the smallest tentative distance among the nodes in the ''unvisited set'' is infinity (when planning a complete traversal; occurs when there is no connection between the initial node and remaining unvisited nodes), then stop. The algorithm has finished. # 1. Otherwise, select the unvisited node that is marked with the smallest tentative distance, set it as the new "current node", and go back to step 3. # # When planning a route, it is actually not necessary to wait until the destination node is "visited" as above: the algorithm can stop once the destination node has the smallest tentative distance among all "unvisited" nodes (and thus could be selected as the next "current"). # + def get_neighbours(index, size): x, y = index results = [] if x != 0: results.append((x-1, y)) if x != size - 1: results.append((x+1, y)) if y != 0: results.append((x, y-1)) if y != size - 1: results.append((x, y+1)) return results # Whether node has been visited visited = np.full((grid_size, grid_size), False) # Initialize all distances to infinity distances = np.full((grid_size, grid_size), np.Infinity) distances[start] = 0 current_node = start while not visited[finish]: for neighbour in get_neighbours(current_node, grid_size): if visited[neighbour]: continue # Skip blocked grid squares if grid[neighbour] == 'X': continue # Just assume all edges have the same cost of 1 edge_cost = 1 distances[neighbour] = min(distances[current_node] + edge_cost, distances[neighbour]) # select unvisited node with smallest distance visited[current_node] = True min_index = None min_value = None for x in range(grid_size): for y in range(grid_size): if not visited[(x, y)]: if min_value == None or distances[(x, y)] < min_value: min_value = distances[(x, y)] min_index = (x, y) current_node = min_index print(distances)
dijkstra.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Day 6 # ## Part 1 from aocd.models import Puzzle import numpy as np puzzle = Puzzle(year=2020, day=6) def count_yes(group_text): yes_set = set() for line in group_text.splitlines(): yes_set |= set(line) yes_set -= set('\n') return len(yes_set) # + group_text = "" total_yes = 0 for line in puzzle.input_data.splitlines(): if line == "": total_yes += count_yes(group_text) group_text = "" else: group_text += line group_text += '\n' total_yes += count_yes(group_text) total_yes # - puzzle.answer_a = total_yes # + def count_yes(group_text): yes_set = set('abcdefghijklmnopqrstuvwxyz') for line in group_text.splitlines(): yes_set &= set(line) yes_set -= set('\n') return len(yes_set) s = """mz mz mzch""" count_yes(s) # + group_text = "" total_yes = 0 for line in puzzle.input_data.splitlines(): if line == "": total_yes += count_yes(group_text) group_text = "" else: group_text += line group_text += '\n' total_yes += count_yes(group_text) total_yes # - puzzle.answer_b = total_yes
src/2020/day-six.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import org.apache.spark.{SparkConf, SparkContext} import org.apache.spark.mllib.feature.HashingTF import org.apache.spark.mllib.regression.LabeledPoint import org.apache.spark.mllib.util.MLUtils import com.amazonaws.services.sagemaker.sparksdk.IAMRole import com.amazonaws.services.sagemaker.sparksdk.algorithms.XGBoostSageMakerEstimator import com.amazonaws.services.sagemaker.sparksdk.SageMakerResourceCleanup # + // Load 2 types of emails from text files: spam and ham (non-spam). // Each line has text from one email. // Convert to lower case, remove punctuation and numbers, trim whitespace // This adds 0.6% accurary! val spam = sc.textFile("s3://sagemaker-eu-west-1-123456789012/spam").map(l => l.toLowerCase()).map(l => l.replaceAll("[^ a-z]", "")).map(l => l.trim()) val ham = sc.textFile("s3://sagemaker-eu-west-1-123456789012/ham").map(l => l.toLowerCase()).map(l => l.replaceAll("[^ a-z]", "")).map(l => l.trim()) spam.take(5) # + // Create a HashingTF instance to map email text to vectors of features. val tf = new HashingTF(numFeatures = 200) // Each email is split into words, and each word is mapped to one feature. val spamFeatures = spam.map(email => tf.transform(email.split(" "))) val hamFeatures = ham.map(email => tf.transform(email.split(" "))) // Display features for a spam sample spamFeatures.take(1) // Display features for a ham sample hamFeatures.take(1) # + // Create LabeledPoint datasets for positive (spam) and negative (ham) examples. val positiveExamples = spamFeatures.map(features => LabeledPoint(1, features)) val negativeExamples = hamFeatures.map(features => LabeledPoint(0, features)) // Display label for a spam sample positiveExamples.take(1) // Display label for a ham sample negativeExamples.take(1) # - // The XGBoost built-in algo requires a libsvm-formatted DataFrame val data = positiveExamples.union(negativeExamples) val data_libsvm = MLUtils.convertVectorColumnsToML(data.toDF) data_libsvm.take(2) // Split the data set 80/20 val Array(trainingData, testData) = data_libsvm.randomSplit(Array(0.8, 0.2)) # + val roleArn = "arn:aws:iam::123456789012:role/service-role/AmazonSageMaker-ExecutionRole-20200501T145026" val xgboost_estimator = new XGBoostSageMakerEstimator( trainingInstanceType="ml.m5.large", trainingInstanceCount=1, endpointInstanceType="ml.t2.medium", endpointInitialInstanceCount=1, sagemakerRole=IAMRole(roleArn)) xgboost_estimator.setObjective("binary:logistic") xgboost_estimator.setNumRound(25) # - val xgboost_model = xgboost_estimator.fit(trainingData) val transformedData = xgboost_model.transform(testData) transformedData.head(5) val roundedData = transformedData.withColumn("prediction_rounded", when($"prediction" > 0.5 , 1.0).otherwise(0.0)) val accuracy = 1.0 * roundedData.filter($"label"=== $"prediction_rounded").count / roundedData.count() xgboost_model.getCreatedResources val cleanup = new SageMakerResourceCleanup(xgboost_model.sagemakerClient) cleanup.deleteResources(xgboost_model.getCreatedResources)
sdkv2/ch7/spark/Spam Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- df <- read.csv("registration.csv") summary(df) plot(df) cor(df$voted,df$registered) fit <- lm(df$voted ~ df$registered) fit par(mfrow=c(2,2)) plot(fit) residuals(fit) summary(fit)
Section 6/Voter Registration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ML_venv # name: ml_venv # --- # # SLP Test # # Explorando classificador **Perceptron** com conjunto de dados da iris, seguindo o [tutorial](https://chrisalbon.com/machine_learning/basics/perceptron_in_scikit-learn/). # # ## Importando bibliotecas from sklearn.datasets import load_iris from sklearn.linear_model import Perceptron from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, plot_confusion_matrix # ## Carregando _dataset_ # + irisDS = load_iris() X = irisDS.data y = irisDS.target irisDS.target_names # - # ## Separando dados para treino e teste X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) # ## Pré-processando os dados de treino # + # Train the scaler, which standarizes all the features to have mean=0 and unit variance sc = StandardScaler() sc.fit(X_train) # + X_train_std = sc.transform(X_train) X_test_std = sc.transform(X_test) # - # ## Instanciando, treinando, testando e exibindo resultados do classificador # + classifier = Perceptron(max_iter=30, eta0=0.1, random_state=0) classifier.fit(X_train_std, y_train) y_pred = classifier.predict(X_test_std) print('\n' + classification_report(y_test, y_pred, target_names=irisDS.target_names)) plot_confusion_matrix( estimator=classifier, X=X_test_std, y_true=y_test, display_labels=irisDS.target_names, normalize='true' )
ML/f00-sklearn_explore/02-slp/00-test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # 1D Unbalanced optimal transport # # This example illustrates the computation of Unbalanced Optimal transport # using a Kullback-Leibler relaxation. # # + # Author: <NAME> <<EMAIL>> # # License: MIT License import numpy as np import matplotlib.pylab as pl import ot import ot.plot from ot.datasets import make_1D_gauss as gauss # - # ## Generate data # # # + n = 100 # nb bins # bin positions x = np.arange(n, dtype=np.float64) # Gaussian distributions a = gauss(n, m=20, s=5) # m= mean, s= std b = gauss(n, m=60, s=10) # make distributions unbalanced b *= 5. # loss matrix M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1))) M /= M.max() # - # ## Plot distributions and loss matrix # # # + pl.figure(1, figsize=(6.4, 3)) pl.plot(x, a, 'b', label='Source distribution') pl.plot(x, b, 'r', label='Target distribution') pl.legend() # plot distributions and loss matrix pl.figure(2, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, M, 'Cost matrix M') # - # ## Solve Unbalanced Sinkhorn # # # + # Sinkhorn epsilon = 0.1 # entropy parameter alpha = 1. # Unbalanced KL relaxation parameter Gs = ot.unbalanced.sinkhorn_unbalanced(a, b, M, epsilon, alpha, verbose=True) pl.figure(4, figsize=(5, 5)) ot.plot.plot1D_mat(a, b, Gs, 'UOT matrix Sinkhorn') pl.show()
master/_downloads/67d9f7b7540dd154bb2a380735385c70/plot_UOT_1D.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> # <script> # window.dataLayer = window.dataLayer || []; # function gtag(){dataLayer.push(arguments);} # gtag('js', new Date()); # # gtag('config', 'UA-59152712-8'); # </script> # # # Start-to-Finish Example: Setting up Exact Initial Data for Einstein's Equations, in Curvilinear Coordinates # ## Authors: <NAME>, <NAME>, and <NAME> # # ## This module sets up initial data for a specified exact solution written in terms of ADM variables, using the [*Exact* ADM Spherical to BSSN Curvilinear initial data module](../edit/BSSN/ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear.py). # # **Module Status:** <font color='green'><b> Validated </b></font> # # **Validation Notes:** This module has been validated, confirming that all initial data sets exhibit convergence to zero of the Hamiltonian and momentum constraints at the expected rate or better. # # ### NRPy+ Source Code for this module: # * [BSSN/ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear.py](../edit/BSSN/ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-Converting_Exact_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb): *Exact* Spherical ADM$\to$Curvilinear BSSN converter function # * [BSSN/BSSN_constraints.py](../edit/BSSN/BSSN_constraints.py); [\[**tutorial**\]](Tutorial-BSSN_constraints.ipynb): Hamiltonian & momentum constraints in BSSN curvilinear basis/coordinates # # ## Introduction: # Here we use NRPy+ to generate a C code confirming that specified *exact* initial data satisfy Einstein's equations of general relativity. The following exact initial data types are supported: # # * Shifted Kerr-Schild spinning black hole initial data # * "Static" Trumpet black hole initial data # * Brill-Lindquist two black hole initial data # * UIUC black hole initial data # <a id='toc'></a> # # # Table of Contents # $$\label{toc}$$ # # This module is organized as follows # # 0. [Preliminaries](#prelim): The Choices for Initial Data # 1. [Choice 1](#sks): Shifted Kerr-Schild spinning black hole initial data # 1. [Choice 2](#st): "Static" Trumpet black hole initial data # 1. [Choice 3](#bl): Brill-Lindquist two black hole initial data # 1. [Choice 4](#uiuc): UIUC black hole initial data # 1. [Step 2](#initializenrpy): Set core NRPy+ parameters for numerical grids and reference metric # 1. [Step 3](#adm_id): Import Black Hole ADM initial data C function from NRPy+ module # 1. [Step 4](#validate): Validating that the black hole initial data satisfy the Hamiltonian constraint # 1. [Step 4.a](#ham_const_output): Output C code for evaluating the Hamiltonian and Momentum constraint violation # 1. [Step 4.b](#apply_bcs): Apply singular, curvilinear coordinate boundary conditions # 1. [Step 4.c](#enforce3metric): Enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint # 1. [Step 5](#mainc): `Initial_Data.c`: The Main C Code # 1. [Step 6](#plot): Plotting the initial data # 1. [Step 7](#convergence): Validation: Convergence of numerical errors (Hamiltonian constraint violation) to zero # 1. [Step 8](#latex_pdf_output): Output this module to $\LaTeX$-formatted PDF file # <a id='prelim'></a> # # # Preliminaries: The Choices for Initial Data # $$\label{prelim}$$ # <a id='sks'></a> # # ## Shifted Kerr-Schild spinning black hole initial data \[Back to [top](#toc)\] # $$\label{sks}$$ # # Here we use NRPy+ to generate initial data for a spinning black hole. # # Shifted Kerr-Schild spinning black hole initial data has been <font color='green'><b> validated </b></font> to exhibit convergence to zero of both the Hamiltonian and momentum constraint violations at the expected order to the exact solution. # # **NRPy+ Source Code:** # * [BSSN/ShiftedKerrSchild.py](../edit/BSSN/ShiftedKerrSchild.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-ShiftedKerrSchild.ipynb) # # The [BSSN.ShiftedKerrSchild](../edit/BSSN/ShiftedKerrSchild.py) NRPy+ module does the following: # # 1. Set up shifted Kerr-Schild initial data, represented by [ADM](https://en.wikipedia.org/wiki/ADM_formalism) quantities in the **Spherical basis**, as [documented here](Tutorial-ADM_Initial_Data-ShiftedKerrSchild.ipynb). # 1. Convert the exact ADM **Spherical quantities** to **BSSN quantities in the desired Curvilinear basis** (set by `reference_metric::CoordSystem`), as [documented here](Tutorial-ADM_Initial_Data-Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb). # 1. Sets up the standardized C function for setting all BSSN Curvilinear gridfunctions in a pointwise fashion, as [written here](../edit/BSSN/BSSN_ID_function_string.py), and returns the C function as a Python string. # <a id='st'></a> # # ## "Static" Trumpet black hole initial data \[Back to [top](#toc)\] # $$\label{st}$$ # # Here we use NRPy+ to generate initial data for a single trumpet black hole ([Dennison & Baumgarte, PRD ???](https://arxiv.org/abs/??)). # # "Static" Trumpet black hole initial data has been <font color='green'><b> validated </b></font> to exhibit convergence to zero of the Hamiltonian constraint violation at the expected order to the exact solution. It was carefully ported from the [original NRPy+ code](https://bitbucket.org/zach_etienne/nrpy). # # **NRPy+ Source Code:** # * [BSSN/StaticTrumpet.py](../edit/BSSN/StaticTrumpet.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-StaticTrumpet.ipynb) # # The [BSSN.StaticTrumpet](../edit/BSSN/StaticTrumpet.py) NRPy+ module does the following: # # 1. Set up static trumpet black hole initial data, represented by [ADM](https://en.wikipedia.org/wiki/ADM_formalism) quantities in the **Spherical basis**, as [documented here](Tutorial-ADM_Initial_Data-StaticTrumpetBlackHoleipynb). # 1. Convert the exact ADM **Spherical quantities** to **BSSN quantities in the desired Curvilinear basis** (set by `reference_metric::CoordSystem`), as [documented here](Tutorial-ADM_Initial_Data-Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb). # 1. Sets up the standardized C function for setting all BSSN Curvilinear gridfunctions in a pointwise fashion, as [written here](../edit/BSSN/BSSN_ID_function_string.py), and returns the C function as a Python string. # <a id='bl'></a> # # ## Brill-Lindquist initial data \[Back to [top](#toc)\] # $$\label{bl}$$ # # Here we use NRPy+ to generate initial data for two black holes (Brill-Lindquist, [Brill & Lindquist, Phys. Rev. 131, 471, 1963](https://journals.aps.org/pr/abstract/10.1103/PhysRev.131.471); see also Eq. 1 of [<NAME>, arXiv:gr-qc/9711015v1](https://arxiv.org/pdf/gr-qc/9711015v1.pdf)). # # [//]: # " and then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4)." # # Brill-Lindquist initial data has been <font color='green'><b> validated </b></font> to exhibit convergence to zero of the Hamiltonian constraint violation at the expected order to the exact solution, and all quantities have been validated against the [original SENR code](https://bitbucket.org/zach_etienne/nrpy). # # **NRPy+ Source Code:** # * [BSSN/BrillLindquist.py](../edit/BSSN/BrillLindquist.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-Brill-Lindquist.ipynb) # * [BSSN/BSSN_ID_function_string.py](../edit/BSSN/BSSN_ID_function_string.py) # # The [BSSN.BrillLindquist](../edit/BSSN/BrillLindquist.py) NRPy+ module does the following: # # 1. Set up Brill-Lindquist initial data [ADM](https://en.wikipedia.org/wiki/ADM_formalism) quantities in the **Cartesian basis**, as [documented here](Tutorial-ADM_Initial_Data-Brill-Lindquist.ipynb). # 1. Convert the ADM **Cartesian quantities** to **BSSN quantities in the desired Curvilinear basis** (set by `reference_metric::CoordSystem`), as [documented here](Tutorial-ADM_Initial_Data-Converting_ADMCartesian_to_BSSNCurvilinear.ipynb). # 1. Sets up the standardized C function for setting all BSSN Curvilinear gridfunctions in a pointwise fashion, as [written here](../edit/BSSN/BSSN_ID_function_string.py), and returns the C function as a Python string. # <a id='uiuc'></a> # # ## UIUC black hole initial data \[Back to [top](#toc)\] # $$\label{uiuc}$$ # # UIUC black hole initial data has been <font color='green'><b> validated </b></font> to exhibit convergence to zero of the Hamiltonian constraint violation at the expected order to the exact solution, and all quantities have been validated against the [original SENR code](https://bitbucket.org/zach_etienne/nrpy). # # **NRPy+ Source Code:** # * [BSSN/UIUCBlackHole.py](../edit/BSSN/UIUCBlackHole.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-UIUCBlackHole.ipynb) # # The [BSSN.UIUCBlackHole](../edit/BSSN/UIUCBlackHole.py) NRPy+ module does the following: # # 1. Set up UIUC black hole initial data, represented by [ADM](https://en.wikipedia.org/wiki/ADM_formalism) quantities in the **Spherical basis**, as [documented here](Tutorial-ADM_Initial_Data-UIUCBlackHoleipynb). # 1. Convert the numerical ADM **Spherical quantities** to **BSSN quantities in the desired Curvilinear basis** (set by `reference_metric::CoordSystem`), as [documented here](Tutorial-ADM_Initial_Data-Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb). # 1. Sets up the standardized C function for setting all BSSN Curvilinear gridfunctions in a pointwise fashion, as [written here](../edit/BSSN/BSSN_ID_function_string.py), and returns the C function as a Python string. # <a id='-pickid'></a> # # # Step 1: Specify the Initial Data to Test \[Back to [top](#toc)\] # $$\label{pickid}$$ # # Here you have a choice for which initial data you would like to import and test for convergence. The following is a list of the currently compatible `initial_data_string` options for you to choose from. # # * `"Shifted KerrSchild"` # * `"Static Trumpet"` # * `"Brill-Lindquist"` # * `"UIUC"` # + import collections ################# # For the User: Choose initial data, default is Shifted KerrSchild. # You are also encouraged to adjust any of the # DestGridCoordSystem, freeparams, or EnableMomentum parameters! # NOTE: Only DestGridCoordSystem == Spherical or SinhSpherical # currently work out of the box; additional modifications # will likely be necessary for other CoordSystems. ################# initial_data_string = "Shifted KerrSchild" # "UIUC" dictID = {} IDmod_retfunc = collections.namedtuple('IDmod_retfunc', 'modulename functionname DestGridCoordSystem freeparams EnableMomentum') dictID['Shifted KerrSchild'] = IDmod_retfunc( modulename = "BSSN.ShiftedKerrSchild", functionname = "ShiftedKerrSchild", DestGridCoordSystem = "Spherical", freeparams = ["const REAL M = 1.0;", "const REAL a = 0.9;", "const REAL r0 = 1.0;"], EnableMomentum = True) dictID['Static Trumpet'] = IDmod_retfunc( modulename = "BSSN.StaticTrumpet", functionname = "StaticTrumpet", DestGridCoordSystem = "Spherical", freeparams = ["const REAL M = 1.0;"], EnableMomentum = False) dictID['Brill-Lindquist'] = IDmod_retfunc( modulename = "BSSN.BrillLindquist", functionname = "BrillLindquist", DestGridCoordSystem = "Spherical", freeparams = ["const REAL BH1_posn_x =-1.0,BH1_posn_y = 0.0,BH1_posn_z = 0.0;", "const REAL BH2_posn_x = 1.0,BH2_posn_y = 0.0,BH2_posn_z = 0.0;", "const REAL BH1_mass = 0.5,BH2_mass = 0.5;"], EnableMomentum = False) dictID['UIUC'] = IDmod_retfunc(modulename = "BSSN.UIUCBlackHole", functionname = "UIUCBlackHole", DestGridCoordSystem = "SinhSpherical", freeparams = ["const REAL M = 1.0;", "const REAL chi = 0.99;"], EnableMomentum = True) # Output the free parameters associated with the initial data with open("BSSN/freeparams.h", "w") as file: for p in dictID[initial_data_string].freeparams: file.write(p+"\n") # - # <a id='initializenrpy'></a> # # # Step 2: Set up the needed NRPy+ infrastructure and declare core gridfunctions \[Back to [top](#toc)\] # $$\label{initializenrpy}$$ # # We will import the core modules of NRPy that we will need and specify the main gridfunctions we will need. # + # We we import needed core NRPy+ modules from outputC import * import NRPy_param_funcs as par import grid as gri import loop as lp import indexedexp as ixp import finite_difference as fin import reference_metric as rfm import importlib # Set spatial dimension (must be 3 for BSSN) DIM = 3 par.set_parval_from_str("grid::DIM",DIM) # Then we set the coordinate system for the numerical grid par.set_parval_from_str("reference_metric::CoordSystem", dictID[initial_data_string].DestGridCoordSystem) rfm.reference_metric() # Create ReU, ReDD needed for rescaling B-L initial data, generating BSSN RHSs, etc. ################# # Next output C headers related to the numerical grids we just set up: ################# # First output the coordinate bounds xxmin[] and xxmax[]: with open("BSSN/xxminmax.h", "w") as file: file.write("const REAL xxmin[3] = {"+str(rfm.xxmin[0])+","+str(rfm.xxmin[1])+","+str(rfm.xxmin[2])+"};\n") file.write("const REAL xxmax[3] = {"+str(rfm.xxmax[0])+","+str(rfm.xxmax[1])+","+str(rfm.xxmax[2])+"};\n") # Generic coordinate NRPy+ file output, Part 2: output the conversion from (x0,x1,x2) to Cartesian (x,y,z) outputC([rfm.xxCart[0],rfm.xxCart[1],rfm.xxCart[2]],["xCart[0]","xCart[1]","xCart[2]"], "BSSN/xxCart.h") gri.glb_gridfcs_list = [] H = gri.register_gridfunctions("AUX","H") # Register the Momentum Constraint as a gridfunction, to be used later MU = ixp.register_gridfunctions_for_single_rank1("AUX", "MU") # - # <a id='adm_id'></a> # # # Step 3: Import Black Hole ADM initial data C function from NRPy+ module \[Back to [top](#toc)\] # $$\label{adm_id}$$ # + # Import Black Hole initial data IDmodule = importlib.import_module(dictID[initial_data_string].modulename) IDfunc = getattr(IDmodule, dictID[initial_data_string].functionname) returnfunction = IDfunc() # Now output the initial data to file: #header_string = dictID[initial_data_string].modulename.replace(".", "/")+".h" with open("BSSN/InitialData.h", "w") as file: file.write(IDmodule.returnfunction) # - # <a id='validate'></a> # # # Step 4: Validating that the black hole initial data satisfy the Hamiltonian constraint \[Back to [top](#toc)\] # $$\label{validate}$$ # # We will validate that the black hole initial data satisfy the Hamiltonian constraint, modulo numerical finite differencing error. # <a id='ham_const_output'></a> # # ## Step 4.a: Output C code for evaluating the Hamiltonian and Momentum constraint violation \[Back to [top](#toc)\] # $$\label{ham_const_output}$$ # # First output C code for evaluating the Hamiltonian constraint violation. For the initial data where `EnableMomentum = True` we must also output C code for evaluating the Momentum constraint violation. import BSSN.BSSN_constraints as bssncon bssncon.output_C__Hamiltonian_h(add_T4UUmunu_source_terms=False) bssncon.output_C__MomentumConstraint_h(add_T4UUmunu_source_terms=False) # <a id='apply_bcs'></a> # # ## Step 4.b: Apply singular, curvilinear coordinate boundary conditions \[Back to [top](#toc)\] # $$\label{apply_bcs}$$ # # Next apply singular, curvilinear coordinate boundary conditions [as documented in the corresponding NRPy+ tutorial module](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb) import CurviBoundaryConditions.CurviBoundaryConditions as cbcs cbcs.Set_up_CurviBoundaryConditions() # <a id='enforce3metric'></a> # # ## Step 4.c: Enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint # $$\label{enforce3metric}$$ # # Then enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint (Eq. 53 of [Ruchlin, Etienne, and Baumgarte (2018)](https://arxiv.org/abs/1712.07658)), as [documented in the corresponding NRPy+ tutorial module](Tutorial-BSSN-Enforcing_Determinant_gammabar_equals_gammahat_Constraint.ipynb) # # Applying curvilinear boundary conditions should affect the initial data at the outer boundary, and will in general cause the $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint to be violated there. Thus after we apply these boundary conditions, we must always call the routine for enforcing the $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint: import BSSN.Enforce_Detgammabar_Constraint as EGC EGC.output_Enforce_Detgammabar_Constraint_Ccode() # <a id='mainc'></a> # # # Step 5: `Initial_Data_Playground.c`: The Main C Code \[Back to [top](#toc)\] # $$\label{mainc}$$ # + # Part P0: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER # set REAL=double, so that all floating point numbers are stored to at least ~16 significant digits. with open("BSSN/Initial_Data_Playground_REAL__NGHOSTS.h", "w") as file: file.write(""" // Part P0.a: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER #define NGHOSTS """+str(int(par.parval_from_str("finite_difference::FD_CENTDERIVS_ORDER")/2)+1)+"""\n // Part P0.b: Set the numerical precision (REAL) to double, ensuring all floating point // numbers are stored to at least ~16 significant digits #define REAL double\n""") # + # %%writefile BSSN/Initial_Data_Playground.c // Part P0: define NGHOSTS and REAL double #include "Initial_Data_Playground_REAL__NGHOSTS.h" // Part P1: Import needed header files #include "stdio.h" #include "stdlib.h" #include "math.h" #include "stdint.h" // Needed for Windows GCC 6.x compatibility // Part P2: Add needed #define's to set data type, the IDX4() macro, and the gridfunctions // Part P2a: set REAL=double, so that all floating point numbers are stored to at least ~16 significant digits. // Step P3: Set free parameters for the numerical grid // Spherical coordinates parameter: // Set RMAX, the scale of the numerical domain, // and the max radius in Spherical coordinates. #define RMAX 3.0 // SinhSpherical coordinates parameters: const REAL AMPL = RMAX; const REAL SINHW = 0.2; // Cylindrical coordinates parameters: const REAL ZMIN = -RMAX; const REAL ZMAX = RMAX; const REAL RHOMAX = RMAX; // Cartesian coordinates parameters: const REAL xmin = -RMAX, xmax = RMAX; const REAL ymin = -RMAX, ymax = RMAX; const REAL zmin = -RMAX, zmax = RMAX; // SymTP coordinates parameters: const REAL bScale = 0.5; const REAL AMAX = RMAX; // Step P4: Set free parameters for the initial data #include "freeparams.h" // Step P4a: Set the needed ID_inputs struct. // As this struct only provides necessary // information for *numerical* // initial data, we populate the // struct with a dummy variable. typedef struct __ID_inputs { int dummy; } ID_inputs; // Part P4b: Declare the IDX4(gf,i,j,k) macro, which enables us to store 4-dimensions of // data in a 1D array. In this case, consecutive values of "i" // (all other indices held to a fixed value) are consecutive in memory, where // consecutive values of "j" (fixing all other indices) are separated by // Nxx_plus_2NGHOSTS[0] elements in memory. Similarly, consecutive values of // "k" are separated by Nxx_plus_2NGHOSTS[0]*Nxx_plus_2NGHOSTS[1] in memory, etc. #define IDX4(g,i,j,k) \ ( (i) + Nxx_plus_2NGHOSTS[0] * ( (j) + Nxx_plus_2NGHOSTS[1] * ( (k) + Nxx_plus_2NGHOSTS[2] * (g) ) ) ) #define IDX3(i,j,k) ( (i) + Nxx_plus_2NGHOSTS[0] * ( (j) + Nxx_plus_2NGHOSTS[1] * (k) ) ) // Assuming idx = IDX3(i,j,k). Much faster if idx can be reused over and over: #define IDX4pt(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS[0]*Nxx_plus_2NGHOSTS[1]*Nxx_plus_2NGHOSTS[2]) * (g) ) // Part P4c: Set #define's for BSSN gridfunctions. C code generated above #include "../CurviBoundaryConditions/gridfunction_defines.h" #define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) // Step P5: Function for converting uniform grid coord // (xx[0][i0],xx[1][i1],xx[2][i2]) to // corresponding Cartesian coordinate. void xxCart(REAL *xx[3],const int i0,const int i1,const int i2, REAL xCart[3]) { REAL xx0 = xx[0][i0]; REAL xx1 = xx[1][i1]; REAL xx2 = xx[2][i2]; #include "xxCart.h" } // Step P6: Include basic functions needed to impose curvilinear // parity and boundary conditions. #include "../CurviBoundaryConditions/curvilinear_parity_and_outer_boundary_conditions.h" // Step P7: Function for enforcing the gammabar=gammahat constraint: #include "enforce_detgammabar_constraint.h" // Step P8: Set BSSN_ID() for BrillLindquist initial data #include "InitialData.h" // Part P9: Declare the function for setting up initial data. void initial_data(const int Nxx_plus_2NGHOSTS[3],REAL *xx[3], REAL *in_gfs) { #pragma omp parallel for LOOP_REGION(0,Nxx_plus_2NGHOSTS[0], 0,Nxx_plus_2NGHOSTS[1], 0,Nxx_plus_2NGHOSTS[2]) { const int idx = IDX3(i0,i1,i2); BSSN_ID(xx[0][i0],xx[1][i1],xx[2][i2], &in_gfs[IDX4pt(HDD00GF,idx)],&in_gfs[IDX4pt(HDD01GF,idx)],&in_gfs[IDX4pt(HDD02GF,idx)], &in_gfs[IDX4pt(HDD11GF,idx)],&in_gfs[IDX4pt(HDD12GF,idx)],&in_gfs[IDX4pt(HDD22GF,idx)], &in_gfs[IDX4pt(ADD00GF,idx)],&in_gfs[IDX4pt(ADD01GF,idx)],&in_gfs[IDX4pt(ADD02GF,idx)], &in_gfs[IDX4pt(ADD11GF,idx)],&in_gfs[IDX4pt(ADD12GF,idx)],&in_gfs[IDX4pt(ADD22GF,idx)], &in_gfs[IDX4pt(TRKGF,idx)], &in_gfs[IDX4pt(LAMBDAU0GF,idx)],&in_gfs[IDX4pt(LAMBDAU1GF,idx)],&in_gfs[IDX4pt(LAMBDAU2GF,idx)], &in_gfs[IDX4pt(VETU0GF,idx)],&in_gfs[IDX4pt(VETU1GF,idx)],&in_gfs[IDX4pt(VETU2GF,idx)], &in_gfs[IDX4pt(BETU0GF,idx)],&in_gfs[IDX4pt(BETU1GF,idx)],&in_gfs[IDX4pt(BETU2GF,idx)], &in_gfs[IDX4pt(ALPHAGF,idx)],&in_gfs[IDX4pt(CFGF,idx)]); } } // Part P10: Declare function for computing the Hamiltonian // constraint violation, which should converge to // zero with increasing numerical resolution. void Hamiltonian_constraint(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],const REAL dxx[3], REAL *xx[3], REAL *in_gfs, REAL *aux_gfs) { #include "Hamiltonian.h" } // Part P11: Declare function for computing the Momentum // constraint violation, which should converge to // zero with increasing numerical resolution. void Momentum_constraint(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],const REAL dxx[3], REAL *xx[3], REAL *in_gfs, REAL *aux_gfs) { #include "MomentumConstraint.h" } // main() function: // Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates // Step 1: Set up chosen initial data // Step 2: Apply parity boundary conditions & quadratic extrapolation boundary conditions on outer boundary/ies. // Step 3: Evaluate Hamiltonian & momentum constraint violations. // Step 4: Free all allocated memory int main(int argc, const char *argv[]) { // Step 0a: Read command-line input, error out if nonconformant if(argc != 4 || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < 2 /* FIXME; allow for axisymmetric sims */) { fprintf(stderr,"Error: Expected three command-line arguments: ./InitialData_Playground Nx0 Nx1 Nx2,\n"); fprintf(stderr,"where Nx[0,1,2] is the number of grid points in the 0, 1, and 2 directions.\n"); fprintf(stderr,"Nx[] MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS); exit(1); } // Step 0b: Set up numerical grid structure, first in space... const int Nxx[3] = { atoi(argv[1]), atoi(argv[2]), atoi(argv[3]) }; if(Nxx[0]%2 != 0 || Nxx[1]%2 != 0 || Nxx[2]%2 != 0) { fprintf(stderr,"Error: Cannot guarantee a proper cell-centered grid if number of grid cells not set to even number.\n"); fprintf(stderr," For example, in case of angular directions, proper symmetry zones will not exist.\n"); exit(1); } const int Nxx_plus_2NGHOSTS[3] = { Nxx[0]+2*NGHOSTS, Nxx[1]+2*NGHOSTS, Nxx[2]+2*NGHOSTS }; const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS[0]*Nxx_plus_2NGHOSTS[1]*Nxx_plus_2NGHOSTS[2]; #include "xxminmax.h" ID_inputs dummy; // Step 0c: Allocate memory for gridfunctions REAL *exact_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *aux_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUX_GFS * Nxx_plus_2NGHOSTS_tot); // Step 0d: Set up space and time coordinates // Step 0d.i: Set \Delta x^i on uniform grids. REAL dxx[3]; for(int i=0;i<3;i++) dxx[i] = (xxmax[i] - xxmin[i]) / ((REAL)Nxx[i]); // Step 0d.ii: Set up uniform coordinate grids REAL *xx[3]; for(int i=0;i<3;i++) { xx[i] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS[i]); for(int j=0;j<Nxx_plus_2NGHOSTS[i];j++) { xx[i][j] = xxmin[i] + ((REAL)(j-NGHOSTS) + (1.0/2.0))*dxx[i]; // Cell-centered grid. } } // Step 0e: Find ghostzone mappings and parities: gz_map *bc_gz_map = (gz_map *)malloc(sizeof(gz_map)*Nxx_plus_2NGHOSTS_tot); parity_condition *bc_parity_conditions = (parity_condition *)malloc(sizeof(parity_condition)*Nxx_plus_2NGHOSTS_tot); set_up_bc_gz_map_and_parity_conditions(Nxx_plus_2NGHOSTS,xx,dxx,xxmin,xxmax, bc_gz_map, bc_parity_conditions); // Step 1: Set up initial data to an exact solution initial_data(Nxx_plus_2NGHOSTS, xx, exact_gfs); // Step 2: Apply parity boundary conditions & quadratic // extrapolation boundary conditions on outer // boundary/ies. // Note that *even exact* initial // data are sometimes ill-defined in // ghost zones. // E.g., spherical initial data might not be // properly defined at points where r=-1. apply_bcs(Nxx, Nxx_plus_2NGHOSTS, bc_gz_map,bc_parity_conditions,NUM_EVOL_GFS,evol_gf_parity, exact_gfs); enforce_detgammabar_constraint(Nxx_plus_2NGHOSTS, xx, exact_gfs); // Step 3: Evaluate Hamiltonian & momentum constraint violations. // Step 3a: Evaluate Hamiltonian constraint violation Hamiltonian_constraint(Nxx,Nxx_plus_2NGHOSTS,dxx, xx, exact_gfs, aux_gfs); // Step 3b: Evaluate Momentum Constraint Violation Momentum_constraint(Nxx,Nxx_plus_2NGHOSTS,dxx, xx, exact_gfs, aux_gfs); /* Step 3c: 2D output file: Output conformal factor (CFGF) and constraint violations (HGF, MU0GF, MU1GF, MU2GF). */ const int i0MIN=NGHOSTS; // In spherical, r=Delta r/2. const int i1mid=Nxx_plus_2NGHOSTS[1]/2; const int i2mid=Nxx_plus_2NGHOSTS[2]/2; LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS[0]-NGHOSTS, i1mid,i1mid+1, NGHOSTS,Nxx_plus_2NGHOSTS[2]-NGHOSTS) { REAL xx0 = xx[0][i0]; REAL xx1 = xx[1][i1]; REAL xx2 = xx[2][i2]; REAL xCart[3]; #include "xxCart.h" int idx = IDX3(i0,i1,i2); printf("%e %e %e %e %e %e %e\n",xCart[0],xCart[1], exact_gfs[IDX4pt(CFGF,idx)], log10(fabs(aux_gfs[IDX4pt(HGF,idx)])), log10(fabs(aux_gfs[IDX4pt(MU0GF,idx)])+1e-200), log10(fabs(aux_gfs[IDX4pt(MU1GF,idx)])+1e-200), log10(fabs(aux_gfs[IDX4pt(MU2GF,idx)])+1e-200)); } /* Step 4: Free all allocated memory */ free(aux_gfs); free(exact_gfs); for(int i=0;i<3;i++) free(xx[i]); return 0; } # + import cmdline_helper as cmd cmd.C_compile("BSSN/Initial_Data_Playground.c", "Initial_Data_Playground") cmd.delete_existing_files("out*.txt") cmd.delete_existing_files("out*.png") args_output_list = [["96 96 96", "out96.txt"], ["48 48 48", "out48.txt"]] for args_output in args_output_list: cmd.Execute("Initial_Data_Playground", args_output[0], args_output[1]) # - # <a id='plot'></a> # # # Step 6: Plotting the initial data \[Back to [top](#toc)\] # $$\label{plot}$$ # # Here we plot the evolved conformal factor of these initial data on a 2D grid, such that darker colors imply stronger gravitational fields. Hence, we see the the black hole(s) centered at $x/M=\pm 1$, where $M$ is an arbitrary mass scale (conventionally the [ADM mass](https://en.wikipedia.org/w/index.php?title=ADM_formalism&oldid=846335453) is chosen), and our formulation of Einstein's equations adopt $G=c=1$ [geometrized units](https://en.wikipedia.org/w/index.php?title=Geometrized_unit_system&oldid=861682626). # + import numpy as np from scipy.interpolate import griddata from pylab import savefig import matplotlib.pyplot as plt import matplotlib.cm as cm from IPython.display import Image x96,y96,valuesCF96,valuesHam96,valuesmomr96,valuesmomtheta96,valuesmomphi96 = np.loadtxt('out96.txt').T #Transposed for easier unpacking pl_xmin = -3. pl_xmax = +3. pl_ymin = -3. pl_ymax = +3. grid_x, grid_y = np.mgrid[pl_xmin:pl_xmax:100j, pl_ymin:pl_ymax:100j] points96 = np.zeros((len(x96), 2)) for i in range(len(x96)): points96[i][0] = x96[i] points96[i][1] = y96[i] grid96 = griddata(points96, valuesCF96, (grid_x, grid_y), method='nearest') grid96cub = griddata(points96, valuesCF96, (grid_x, grid_y), method='cubic') plt.clf() plt.title("Initial Data") plt.xlabel("x/M") plt.ylabel("y/M") # fig, ax = plt.subplots() #ax.plot(grid96cub.T, extent=(pl_xmin,pl_xmax, pl_ymin,pl_ymax)) plt.imshow(grid96.T, extent=(pl_xmin,pl_xmax, pl_ymin,pl_ymax)) savefig("ID.png") plt.close() Image("ID.png") # # interpolation='nearest', cmap=cm.gist_rainbow) # - # <a id='convergence'></a> # # # Step 7: Validation: Convergence of numerical errors (Hamiltonian & momentum constraint violations) to zero \[Back to [top](#toc)\] # $$\label{convergence}$$ # # **Special thanks to <NAME> for creating the following plotting script.** # # The equations behind these initial data solve Einstein's equations exactly, at a single instant in time. One reflection of this solution is that the Hamiltonian constraint violation should be exactly zero in the initial data. # # However, when evaluated on numerical grids, the Hamiltonian constraint violation will *not* generally evaluate to zero due to the associated numerical derivatives not being exact. However, these numerical derivatives (finite difference derivatives in this case) should *converge* to the exact derivatives as the density of numerical sampling points approaches infinity. # # In this case, all of our finite difference derivatives agree with the exact solution, with an error term that drops with the uniform gridspacing to the fourth power: $\left(\Delta x^i\right)^4$. # # Here, as in the [Start-to-Finish Scalar Wave (Cartesian grids) NRPy+ tutorial](Tutorial-Start_to_Finish-ScalarWave.ipynb) and the [Start-to-Finish Scalar Wave (curvilinear grids) NRPy+ tutorial](Tutorial-Start_to_Finish-ScalarWaveCurvilinear.ipynb) we confirm this convergence. # # First, let's take a look at what the numerical error looks like on the x-y plane at a given numerical resolution, plotting $\log_{10}|H|$, where $H$ is the Hamiltonian constraint violation: # + RefData=[valuesHam96,valuesmomr96,valuesmomtheta96,valuesmomphi96] SubTitles=["\mathcal{H}",'\mathcal{M}^r',r"\mathcal{M}^{\theta}","\mathcal{M}^{\phi}"] axN = [] #this will let us automate the subplots in the loop that follows grid96N = [] #we need to calculate the grid96 data for each constraint for use later plt.clf() # We want to create four plots. One for the Hamiltonian, and three for the momentum # constraints (r,th,ph) # Define the size of the overall figure fig = plt.figure(figsize=(12,12)) # 8 in x 8 in num_plots = 4 if dictID[initial_data_string].EnableMomentum == False: num_plots = 1 for p in range(num_plots): grid96 = griddata(points96, RefData[p], (grid_x, grid_y), method='nearest') grid96N.append(grid96) grid96cub = griddata(points96, RefData[p], (grid_x, grid_y), method='cubic') #fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True) #Generate the subplot for the each constraint ax = fig.add_subplot(221+p) axN.append(ax) # Grid of 2x2 axN[p].set_xlabel('x/M') axN[p].set_ylabel('y/M') axN[p].set_title('$96^3$ Numerical Err.: $log_{10}|'+SubTitles[p]+'|$') fig96cub = plt.imshow(grid96cub.T, extent=(pl_xmin,pl_xmax, pl_ymin,pl_ymax)) cb = plt.colorbar(fig96cub) # Adjust the spacing between plots plt.tight_layout(pad=4) # - # Next, we set up the same initial data but on a lower-resolution, $48^3$ grid. Since the constraint violation (numerical error associated with the fourth-order-accurate, finite-difference derivatives) should converge to zero with the uniform gridspacing to the fourth power: $\left(\Delta x^i\right)^4$, we expect the constraint violation will increase (relative to the $96^3$ grid) by a factor of $\left(96/48\right)^4$. Here we demonstrate that indeed this order of convergence is observed as expected. I.e., at all points *except* at the points immediately surrounding the coordinate center of the black hole (due to the spatial slice excising the physical singularity at this point through [the puncture method](http://gr.physics.ncsu.edu/UMD_June09.pdf)) exhibit numerical errors that drop as $\left(\Delta x^i\right)^4$. # + x48,y48,valuesCF48,valuesHam48,valuesmomr48,valuesmomtheta48,valuesmomphi48 = np.loadtxt('out48.txt').T #Transposed for easier unpacking points48 = np.zeros((len(x48), 2)) for i in range(len(x48)): points48[i][0] = x48[i] points48[i][1] = y48[i] RefData=[valuesHam48,valuesmomr48,valuesmomtheta48,valuesmomphi48] SubTitles=["\mathcal{H}",'\mathcal{M}^r',r"\mathcal{M}^{\theta}","\mathcal{M}^{\phi}"] axN = [] plt.clf() # We want to create four plots. One for the Hamiltonian, and three for the momentum # constrains (r,th,ph) # Define the size of the overall figure fig = plt.figure(figsize=(12,12)) # 8 in x 8 in for p in range(num_plots): #loop to cycle through our constraints and plot the data grid48 = griddata(points48, RefData[p], (grid_x, grid_y), method='nearest') griddiff_48_minus_96 = np.zeros((100,100)) griddiff_48_minus_96_1darray = np.zeros(100*100) gridx_1darray_yeq0 = np.zeros(100) grid48_1darray_yeq0 = np.zeros(100) grid96_1darray_yeq0 = np.zeros(100) count = 0 for i in range(100): for j in range(100): griddiff_48_minus_96[i][j] = grid48[i][j] - grid96N[p][i][j] griddiff_48_minus_96_1darray[count] = griddiff_48_minus_96[i][j] if j==49: gridx_1darray_yeq0[i] = grid_x[i][j] grid48_1darray_yeq0[i] = grid48[i][j] + np.log10((48./96.)**4) grid96_1darray_yeq0[i] = grid96N[p][i][j] count = count + 1 #Generate the subplot for the each constraint ax = fig.add_subplot(221+p) axN.append(ax) # Grid of 2x2 axN[p].set_title('Plot Demonstrating $4^{th}$-Order Convergence of $'+SubTitles[p]+'$') axN[p].set_xlabel("x/M") axN[p].set_ylabel("$log_{10}$(Relative Error)") ax.plot(gridx_1darray_yeq0, grid96_1darray_yeq0, 'k-', label='Nr=96') ax.plot(gridx_1darray_yeq0, grid48_1darray_yeq0, 'k--', label='Nr=48, mult by (48/96)^4') ax.set_ylim([-14,4.]) legend = ax.legend(loc='lower right', shadow=True, fontsize='x-large') legend.get_frame().set_facecolor('C1') # Adjust the spacing between plots plt.tight_layout(pad=4) # - # <a id='latex_pdf_output'></a> # # # Step 7: Output this module to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] # $$\label{latex_pdf_output}$$ # # The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename # [Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_Exact_Initial_Data.pdf](Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_Exact_Initial_Data.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) # !jupyter nbconvert --to latex --template latex_nrpy_style.tplx Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_Exact_Initial_Data.ipynb # !pdflatex -interaction=batchmode Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_Exact_Initial_Data.tex # !pdflatex -interaction=batchmode Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_Exact_Initial_Data.tex # !pdflatex -interaction=batchmode Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_Exact_Initial_Data.tex # !rm -f Tut*.out Tut*.aux Tut*.log
Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_Exact_Initial_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- from keras.models import load_model import numpy as np import pandas as pd import cv2 model = load_model('vgg16_gender2.h5') mean = np.load('summ.npy') # + def model_predict(model, img, mean, im_height = 224, im_width = 224, rounded = False): image_final = np.zeros((1,224,224,3)) img = cv2.resize(img, (im_width, im_height)) img = img - mean image_final[0,:,:,:] = img image_final = np.swapaxes(image_final, 1, 3) image_final = np.swapaxes(image_final, 2, 3) prediction = model.predict(image_final) if rounded: if prediction > 0.7: return "MALE" else: return "FEMALE" return str(prediction) def recognize_person(): gender = "" face_cascade = cv2.CascadeClassifier('../Capture-Face-master/face_detection_xml/face_cascade_classifier.xml') video_capture = cv2.VideoCapture(0) face_detected = False while True: # Capture frame-by-frame ret, frame = video_capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2) image = frame[max(0, y - 40): min(frame.shape[0], y+h + 40), max(0, x - 40):min(frame.shape[1], x+w + 40)] cv2.imwrite('face.png', image) gender = model_predict(model, image, mean) face_detected = True if face_detected: cv2.putText(image, gender, (image.shape[1]/5, image.shape[0]/5), cv2.FONT_HERSHEY_SIMPLEX, 2, 255) cv2.imshow("Prediction", image) raw_input("Enter To continue") face_detected = False cv2.imshow('img', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break #When everything is done, release the capture video_capture.release() cv2.destroyAllWindows() recognize_person()
UI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch import PIL import glob import numpy as np import matplotlib.pyplot as plt from src.data import LungDataset, blend, Pad, Crop, Resize # - t = torch.ones(1, 1, 100, 100) torch.nn.ConvTranspose2d(1, 1, 3, 2, 1, 1)(t).size() data_folder = "input/dataset/" origins_folder = data_folder + "images/" masks_folder = data_folder + "masks/" models_folder = "models/" images_folder = "images/" origins_list = list(map(lambda n: n.replace(origins_folder, "").replace(".png", ""), glob.glob(origins_folder + "*.png"))) masks_list = list(map(lambda n: n.replace(masks_folder, "").replace(".png", ""), glob.glob(masks_folder + "*.png"))) origin_mask_list = [(mask_name.replace("_mask", ""), mask_name) for mask_name in masks_list] # + # %%time overall_dataset = LungDataset(origin_mask_list, origins_folder, masks_folder) sizes = [] lung_areas = [] for origin, mask in overall_dataset: sizes.append(mask.size()) lung_areas.append(mask.sum().item()) # - image_area = [w * h for w, h in sizes] plt.hist(image_area) plt.hist(lung_areas, np.linspace(100000, 8000000, 5))
sandbox.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Solution 1 num = input("Enter a the height of the plane - ") i = int(num) if i<=1000: print("Land the plane because height is", i) elif i>1000 and i<5000: print("Come down to 1000ft because height is", i) else: print("Go around and try later because height is", i) num = input("Enter a the height of the plane - ") i = int(num) if i<=1000: print("Land the plane because height is", i) elif i>1000 and i<5000: print("Come down to 1000ft because height is", i) else: print("Go around and try later because height is", i) num = input("Enter a the height of the plane - ") i = int(num) if i<=1000: print("Land the plane because height is", i) elif i>1000 and i<5000: print("Come down to 1000ft because height is", i) else: print("Go around and try later because height is", i) # # Solution 2 for i in range(1,201): if i>1: for j in range(2,i): if i%j==0: break else: print(i)
Assignment Day 3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Parts of this assignment will be **automatically graded**. Please take note of the following: # - Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All). # - You can add additional cells, but it is not recommended to (re)move cells. Cells required for autograding cannot be moved and cells containing tests cannot be edited. # - You are allowed to use a service such as [Google Colaboratory](https://colab.research.google.com/) to work together. However, you **cannot** hand in the notebook that was hosted on Google Colaboratory, but you need to copy your answers into the original notebook and verify that it runs succesfully offline. This is because Google Colaboratory destroys the metadata required for grading. # - Name your notebook **exactly** `{TA_name}_{student1_id}_{student2_id}_lab{i}.ipynb`, for example `wouter_12345_67890_lab1.ipynb` (or elise or stephan, depending on your TA), **otherwise your submission will be skipped by our regex and you will get 0 points** (but no penalty as we cannot parse your student ids ;)). # # Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as well as your names below: NAMES = "<NAME>, <NAME>" # --- # + deletable=false editable=false nbgrader={"checksum": "931b3dfcc3a02b92b499929fb27299cb", "grade": false, "grade_id": "cell-fc69f22067705372", "locked": true, "schema_version": 1, "solution": false} # %matplotlib inline import os import numpy as np import matplotlib.pyplot as plt import sys import torch from torch import nn import torch.nn.functional as F from torch import optim from tqdm import tqdm as _tqdm def tqdm(*args, **kwargs): return _tqdm(*args, **kwargs, mininterval=1) # Safety, do not overflow buffer EPS = float(np.finfo(np.float32).eps) assert sys.version_info[:3] >= (3, 6, 0), "Make sure you have Python 3.6 installed!" # + [markdown] deletable=false editable=false nbgrader={"checksum": "e83ecfc2751cf2e6ff05d0c01d311673", "grade": false, "grade_id": "cell-fef7e20e54e6243b", "locked": true, "schema_version": 1, "solution": false} # --- # ## 1. Deep Q-Network (DQN) (10 (+ 2 bonus) points) # + deletable=false editable=false nbgrader={"checksum": "e27fe8f72a248bbcf1f7a21e5550e657", "grade": true, "grade_id": "cell-39519f4ab05eb2a1", "locked": true, "points": 0, "schema_version": 1, "solution": false} import gym env = gym.envs.make("CartPole-v0") # - # env is a TimeLimit wrapper around an env, so use env.env to look into the env (but otherwise you can forget about this) ??env.env import time # The nice thing about the CARTPOLE is that it has very nice rendering functionality (if you are on a local environment). Let's have a look at an episode obs = env.reset() env.render() done = False while not done: obs, reward, done, _ = env.step(env.action_space.sample()) env.render() time.sleep(0.05) env.close() # Close the environment or you will have a lot of render screens soon # + [markdown] deletable=false editable=false nbgrader={"checksum": "11a9c014ee5fbe790ce999428cc22658", "grade": false, "grade_id": "cell-2d83f70e62b99520", "locked": true, "schema_version": 1, "solution": false} # Remember from the previous lab, that in order to optimize a policy we need to estimate the Q-values (e.g. estimate the *action* values). In the CartPole problem, our state is current position of the cart, the current velocity of the cart, the current (angular) position of the pole and the (angular) speed of the pole. As these are continuous variables, we have an infinite number of states (ignoring the fact that a digital computer can only represent finitely many states in finite memory). # + [markdown] deletable=false editable=false nbgrader={"checksum": "9692b7acb09d018d9f80ce95685b81d5", "grade": false, "grade_id": "cell-bf2ac21267daffbb", "locked": true, "schema_version": 1, "solution": false} # Can you think of a way in which we can still use a tabular approach? Why would this work and can you think of an example problem where this would not work? # + [markdown] deletable=false nbgrader={"checksum": "3ffce6fca4071a1b543186db1b74cc98", "grade": true, "grade_id": "cell-b0fa2cb0c2cd2a63", "locked": false, "points": 1, "schema_version": 1, "solution": true} # We can quantize the continuous variables into bins thus allowing us to use tabular approach. This is due to the fact that each continuous state will now be able to be associated with a specific bin thus making states discrete. # Also, using large discretization step, we can reduce number of states from infinitely many to some finite number, which can fit into the memory. # # As we have seen in the previous lab, tabular learning methods such as Q-learning and SARSA actually learn pretty well so if the discretization step is large enough to make it possible for those algorithms to explore state space, we are guaranteed that they will perform somewhat good. # # However, if we need to be able to approximate continuous state space with enough precision (i.e. balancing pole cart with high precision where if angular position of the pole changes by infinitesimal degree, we need to change the action), tabular approach won't work anymore as it would have to store an infinite amount of states, which is not feasible. # + [markdown] deletable=false editable=false nbgrader={"checksum": "cd66b44d93f348df1e0ef8353377c879", "grade": false, "grade_id": "cell-0b3162496f5e6cf5", "locked": true, "schema_version": 1, "solution": false} # ### 1.1 Implement Q-Network # + [markdown] deletable=false editable=false nbgrader={"checksum": "84b9c38718c952ef8e62486fc9bf5e4a", "grade": false, "grade_id": "cell-96a86bcfa1ebc84a", "locked": true, "schema_version": 1, "solution": false} # We will not use the tabular approach but approximate the Q-value function by a general approximator function. We will skip the linear case and directly use a two layer Neural Network. We use [PyTorch](https://pytorch.org/) to implement the network, as this will allow us to train it easily later. We can implement a model using `torch.nn.Sequential`, but with PyTorch it is actually very easy to implement the model (e.g. the forward pass) from scratch. Now implement the `QNetwork.forward` function that uses one hidden layer with ReLU activation (no output activation). # + deletable=false nbgrader={"checksum": "4ef7d14363dc2aa4beb638856c57a58c", "grade": false, "grade_id": "cell-216429a5dccf8a0e", "locked": false, "schema_version": 1, "solution": true} class QNetwork(nn.Module): def __init__(self, num_hidden=128): nn.Module.__init__(self) self.l1 = nn.Linear(4, num_hidden) self.l2 = nn.Linear(num_hidden, 2) def forward(self, x): x = self.l1(x) x = torch.relu(x) out = self.l2(x) return out # + deletable=false editable=false nbgrader={"checksum": "2b9a48f9aee9ebc46da01c6f11cd789a", "grade": true, "grade_id": "cell-00ce108d640a5942", "locked": true, "points": 1, "schema_version": 1, "solution": false} # Let's instantiate and test if it works num_hidden = 128 torch.manual_seed(1234) model = QNetwork(num_hidden) torch.manual_seed(1234) test_model = nn.Sequential( nn.Linear(4, num_hidden), nn.ReLU(), nn.Linear(num_hidden, 2) ) x = torch.rand(10, 4) # If you do not need backpropagation, wrap the computation in the torch.no_grad() context # This saves time and memory, and PyTorch complaints when converting to numpy with torch.no_grad(): assert np.allclose(model(x).numpy(), test_model(x).numpy()) # + [markdown] deletable=false editable=false nbgrader={"checksum": "7fc82889691dbd60ff9469b770744fcc", "grade": false, "grade_id": "cell-ca77eae2e62180cf", "locked": true, "schema_version": 1, "solution": false} # ### 1.2 Experience Replay # + [markdown] deletable=false editable=false nbgrader={"checksum": "5b3265bef151a12fe6969c378af76be2", "grade": false, "grade_id": "cell-b5b012e42dd2029e", "locked": true, "schema_version": 1, "solution": false} # What could be a problem with doing gradient updates on a sequence of state, action pairs $((s_t, a_t), (s_{t+1}, a_{t+1}) ...)$ observed while interacting with the environment? How will using *experience replay* help to overcome this (potential problem)? # + [markdown] deletable=false nbgrader={"checksum": "75e1a8b00b2bfa9b7dd8805b371c6a4e", "grade": true, "grade_id": "cell-70a2e59541668a25", "locked": false, "points": 1, "schema_version": 1, "solution": true} # The problem is that by doing updates on data observed while interacting with the environment we break the iid assumption of stochastic gradient descent methods. As the data are strongly correlated, it is very likely for gradient methods to fail. Also, we avoid multiple wrong predictions on similar states, which can make the value function not explore and get stuck. # By using experience replay, we randomly sample data from the past experience, making the samples independent, which helps the value function to not fall in a bad local minima. # + [markdown] deletable=false editable=false nbgrader={"checksum": "9b3bbd8aaf3aade515736d0d07917a61", "grade": false, "grade_id": "cell-2c1d117a1a75fd69", "locked": true, "schema_version": 1, "solution": false} # Now implement the `push` function that adds a transition to the replay buffer, and the sample function that returns a batch of samples. It should keep at most the maximum number of transitions. Also implement the `sample` function that samples a (random!) batch of data, for use during training (hint: you can use the function `random.sample`). # + deletable=false nbgrader={"checksum": "93a9f55f3950fe63b44aa84c5fd7f793", "grade": false, "grade_id": "cell-a3cc876e51eb157f", "locked": false, "schema_version": 1, "solution": true} import random class ReplayMemory: def __init__(self, capacity): self.capacity = capacity self.memory = [] def push(self, transition): self.memory.append(transition) if len(self.memory) == self.capacity: self.memory = self.memory[1:] def sample(self, batch_size): sample = random.sample(self.memory, batch_size) return sample def __len__(self): return len(self.memory) # + deletable=false editable=false nbgrader={"checksum": "6865749b3a8810bdaaf1604a9cea42e7", "grade": true, "grade_id": "cell-3b90135921c4da76", "locked": true, "points": 1, "schema_version": 1, "solution": false} capacity = 10 memory = ReplayMemory(capacity) # Sample a transition s = env.reset() a = env.action_space.sample() s_next, r, done, _ = env.step(a) # Push a transition memory.push((s, a, r, s_next, done)) # Sample a batch size of 1 print(memory.sample(1)) # + [markdown] deletable=false editable=false nbgrader={"checksum": "3c742d499c0f9b7f10d1c0c3a085236a", "grade": false, "grade_id": "cell-88f67e3c051da6a9", "locked": true, "schema_version": 1, "solution": false} # ### 1.3 $\epsilon$psilon greedy policy # + [markdown] deletable=false editable=false nbgrader={"checksum": "61d26d0dec0133f2aa737ed4711d6e08", "grade": false, "grade_id": "cell-aa3c7d1b3000f697", "locked": true, "schema_version": 1, "solution": false} # In order to learn a good policy, we need to explore quite a bit initially. As we start to learn a good policy, we want to decrease the exploration. As the amount of exploration using an $\epsilon$-greedy policy is controlled by $\epsilon$, we can define an 'exploration scheme' by writing $\epsilon$ as a function of time. There are many possible schemes, but we will use a simple one: we will start with only exploring (so taking random actions) at iteration 0, and then in 1000 iterations linearly anneal $\epsilon$ such that after 1000 iterations we take random (exploration) actions with 5\% probability (forever, as you never know if the environment will change). # + deletable=false nbgrader={"checksum": "270ab31d4bb29dc9a05223c16a4967a7", "grade": false, "grade_id": "cell-5789e7a792108576", "locked": false, "schema_version": 1, "solution": true} def get_epsilon(it): return 1 - it*(( 0.95)/1000) if it < 1000 else 0.05 # + deletable=false editable=false nbgrader={"checksum": "b1a81dd07e1b7a98d2cd06ebc171ebdd", "grade": true, "grade_id": "cell-40e66db45e742b2e", "locked": true, "points": 1, "schema_version": 1, "solution": false} # So what's an easy way to check? plt.plot([get_epsilon(it) for it in range(5000)]) # + [markdown] deletable=false editable=false nbgrader={"checksum": "84685c23e4eb899d7fed3a87b7f8915e", "grade": false, "grade_id": "cell-a8b604c9998c6c3b", "locked": true, "schema_version": 1, "solution": false} # Now write a function that takes a state and uses the Q-network to select an ($\epsilon$-greedy) action. It should return a random action with probability epsilon (which we will pass later). Note, you do not need to backpropagate through the model computations, so use `with torch.no_grad():` (see above for example). Unlike numpy, PyTorch has no argmax function, but Google is your friend... Note that to convert a PyTorch tensor with only 1 element (0 dimensional) to a simple python scalar (int or float), you can use the '.item()' function. # + deletable=false nbgrader={"checksum": "882f51819100c850120e73340aec387d", "grade": false, "grade_id": "cell-878ad3a637cfb51c", "locked": false, "schema_version": 1, "solution": true} def select_action(model, state, epsilon): with torch.no_grad(): actions = model(torch.FloatTensor(state)) argmax = torch.max(actions, 0)[1] action = np.random.choice(2,1, p = [epsilon / 2 + ((1-epsilon) if i == argmax else 0) for i in range(2) ]) return action[0] # + deletable=false editable=false nbgrader={"checksum": "21f939075cb0c8dde152dabf47568a9d", "grade": true, "grade_id": "cell-e895338d56bee477", "locked": true, "points": 1, "schema_version": 1, "solution": false} s = env.reset() a = select_action(model, s, 0.05) assert not torch.is_tensor(a) print (a) # + [markdown] deletable=false editable=false nbgrader={"checksum": "5d00ab2e5e0b39257771d0e778fda2d6", "grade": false, "grade_id": "cell-ec5e94e0b03f8aec", "locked": true, "schema_version": 1, "solution": false} # ### 1.4 Training function # + [markdown] deletable=false editable=false nbgrader={"checksum": "4839aac72a80552046ebecc40c1615cf", "grade": false, "grade_id": "cell-d1a12cc97386fe56", "locked": true, "schema_version": 1, "solution": false} # Now we will implement the function 'train' that samples a batch from the memory and performs a gradient step using some convenient PyTorch functionality. However, you still need to compute the Q-values for the (state, action) pairs in the experience, as well as their target (e.g. the value they should move towards). What is the target for a Q-learning update? What should be the target if `next_state` is terminal (e.g. `done`)? # # For computing the Q-values for the actions, note that the model returns all action values where you are only interested in a single action value. Because of the batch dimension, you can't use simple indexing, but you may want to have a look at [torch.gather](https://pytorch.org/docs/stable/torch.html?highlight=gather#torch.gather) or use [advanced indexing](https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.indexing.html) (numpy tutorial but works mostly the same in PyTorch). Note, you should NOT modify the function train. You can view the size of a tensor `x` with `x.size()` (similar to `x.shape` in numpy). # + deletable=false nbgrader={"checksum": "c466ee49add35cb1ec6a3e4a85f733c9", "grade": false, "grade_id": "cell-6c45485324b40081", "locked": false, "schema_version": 1, "solution": true} def compute_q_val(model, state, action): actions = model(torch.FloatTensor(state)) # print(actions) return actions[range(len(state)), action] def compute_target(model, reward, next_state, done, discount_factor): # done is a boolean (vector) that indicates if next_state is terminal (episode is done) # YOUR CODE HERE Qvals = model(next_state) target = reward + discount_factor * Qvals.max(1)[0] * (1- done.float()) return target def train(model, memory, optimizer, batch_size, discount_factor): # DO NOT MODIFY THIS FUNCTION # don't learn without some decent experience if len(memory) < batch_size: return None # random transition batch is taken from experience replay memory transitions = memory.sample(batch_size) # transition is a list of 4-tuples, instead we want 4 vectors (as torch.Tensor's) state, action, reward, next_state, done = zip(*transitions) # convert to PyTorch and define types state = torch.tensor(state, dtype=torch.float) action = torch.tensor(action, dtype=torch.int64) # Need 64 bit to use them as index next_state = torch.tensor(next_state, dtype=torch.float) reward = torch.tensor(reward, dtype=torch.float) done = torch.tensor(done, dtype=torch.uint8) # Boolean # compute the q value q_val = compute_q_val(model, state, action) with torch.no_grad(): # Don't compute gradient info for the target (semi-gradient) target = compute_target(model, reward, next_state, done, discount_factor) # loss is measured from error between current and newly expected Q values loss = F.smooth_l1_loss(q_val, target) # backpropagation of loss to Neural Network (PyTorch magic) optimizer.zero_grad() loss.backward() optimizer.step() return loss.item() # Returns a Python scalar, and releases history (similar to .detach()) # + deletable=false editable=false nbgrader={"checksum": "877c400001292b619e6871c1366524b9", "grade": true, "grade_id": "cell-b060b822eec4282f", "locked": true, "points": 2, "schema_version": 1, "solution": false} # You may want to test your functions individually, but after you do so lets see if the method train works. batch_size = 64 discount_factor = 0.8 learn_rate = 1e-3 # Simple gradient descent may take long, so we will use Adam optimizer = optim.Adam(model.parameters(), learn_rate) # We need a larger memory, fill with dummy data transition = memory.sample(1)[0] memory = ReplayMemory(10 * batch_size) for i in range(batch_size): memory.push(transition) # Now let's see if it works loss = train(model, memory, optimizer, batch_size, discount_factor) print (loss) # + [markdown] deletable=false editable=false nbgrader={"checksum": "2057dee580a43fb0442fe52557c0ac64", "grade": false, "grade_id": "cell-3eafd0ab49103f3b", "locked": true, "schema_version": 1, "solution": false} # ### 1.5 Put it all together # + [markdown] deletable=false editable=false nbgrader={"checksum": "06dd71aae5c3c699f2b707b348a88107", "grade": false, "grade_id": "cell-36b8a04b393d8104", "locked": true, "schema_version": 1, "solution": false} # Now that you have implemented the training step, you should be able to put everything together. Implement the function `run_episodes` that runs a number of episodes of DQN training. It should return the durations (e.g. number of steps) of each episode. Note: we pass the train function as an argument such that we can swap it for a different training step later. # + deletable=false nbgrader={"checksum": "c3f61b2ca270d84ab9b28d989dd65d4c", "grade": false, "grade_id": "cell-540a7d50ecc1d046", "locked": false, "schema_version": 1, "solution": true} def run_episodes(train, model, memory, env, num_episodes, batch_size, discount_factor, learn_rate): optimizer = optim.Adam(model.parameters(), learn_rate) global_steps = 0 # Count the steps (do not reset at episode start, to compute epsilon) episode_durations = [] # for i in range(num_episodes): duration = 0 s = env.reset() while True: epsilon = get_epsilon(global_steps) a = select_action(model, s, epsilon) next_state, r, done, _ = env.step(a) # TODO: is it correct to compute duration here and not after the break? duration += 1 memory.push((s, a, r, next_state, done)) train(model, memory, optimizer, batch_size, discount_factor) global_steps += 1 if done: break s = next_state episode_durations.append(duration) return episode_durations # + # Let's run it! num_episodes = 100 batch_size = 64 discount_factor = 0.8 learn_rate = 1e-3 memory = ReplayMemory(10000) num_hidden = 128 seed = 42 # This is not randomly chosen # We will seed the algorithm (before initializing QNetwork!) for reproducability random.seed(seed) torch.manual_seed(seed) env.seed(seed) model = QNetwork(num_hidden) episode_durations = run_episodes(train, model, memory, env, num_episodes, batch_size, discount_factor, learn_rate) # + deletable=false editable=false nbgrader={"checksum": "70d16eb61eae34605e8d7813a70a604a", "grade": true, "grade_id": "cell-928ecc11ed5c43d8", "locked": true, "points": 2, "schema_version": 1, "solution": false} # And see the results def smooth(x, N): cumsum = np.cumsum(np.insert(x, 0, 0)) return (cumsum[N:] - cumsum[:-N]) / float(N) plt.plot(smooth(episode_durations, 10)) plt.title('Episode durations per episode') # + [markdown] deletable=false editable=false nbgrader={"checksum": "1e106dba734da10d4d8b3bf90d6bb772", "grade": false, "grade_id": "cell-49e6bf74834a67ef", "locked": true, "schema_version": 1, "solution": false} # ### 1.6 Semi-gradient vs. true gradient (bonus) # + [markdown] deletable=false editable=false nbgrader={"checksum": "acf155c686f3916453a3d11d95994987", "grade": false, "grade_id": "cell-fc30be2a6983bc77", "locked": true, "schema_version": 1, "solution": false} # Note that by using automatic differentiation in PyTorch, it is (relatively) easy to implement the true gradient method. Hint: PyTorch may complain about computing gradients for the target in [smooth_l1_loss](https://pytorch.org/docs/stable/nn.html?highlight=smooth_l1_loss#torch.nn.functional.smooth_l1_loss). How can you circumvent this problem? Implement the `train_true_gradient` method below. # + deletable=false nbgrader={"checksum": "3d1e72257ed8c59175352e163f1bfdaf", "grade": true, "grade_id": "cell-71707640573b23d1", "locked": false, "points": 1, "schema_version": 1, "solution": true} def train_true_gradient(model, memory, optimizer, batch_size, discount_factor): # don't learn without some decent experience if len(memory) < batch_size: return None # random transition batch is taken from experience replay memory transitions = memory.sample(batch_size) # transition is a list of 4-tuples, instead we want 4 vectors (as torch.Tensor's) state, action, reward, next_state, done = zip(*transitions) # convert to PyTorch and define types state = torch.tensor(state, dtype=torch.float) action = torch.tensor(action, dtype=torch.int64) # Need 64 bit to use them as index next_state = torch.tensor(next_state, dtype=torch.float) reward = torch.tensor(reward, dtype=torch.float) done = torch.tensor(done, dtype=torch.uint8) # Boolean # compute the q value q_val = compute_q_val(model, state, action) # with torch.no_grad(): # Don't compute gradient info for the target (semi-gradient) target = compute_target(model, reward, next_state, done, discount_factor) # loss is measured from error between current and newly expected Q values # we can do this as the smooth l1 is symmetric loss = F.smooth_l1_loss(target - q_val , torch.zeros_like(q_val)) # alternative: calculate gradient two times for both variables # loss = F.smooth_l1_loss(q_val, target.detach()) # loss2 = F.smooth_l1_loss(target, q_val.detach()) # loss = loss+loss2 # backpropagation of loss to Neural Network (PyTorch magic) optimizer.zero_grad() loss.backward() optimizer.step() return loss.item() # Returns a Python scalar, and releases history (similar to .detach()) random.seed(seed) torch.manual_seed(seed) env.seed(seed) model = QNetwork(num_hidden) episode_durations_true_gradient = run_episodes( train_true_gradient, model, memory, env, num_episodes, batch_size, discount_factor, learn_rate) plt.plot(smooth(episode_durations, 20)) plt.plot(smooth(episode_durations_true_gradient, 20)) plt.title('Episode durations per episode') plt.legend(['Semi-gradient', 'True gradient']) # + [markdown] deletable=false editable=false nbgrader={"checksum": "95b462060bc00fccd7e8bc2ccc857215", "grade": false, "grade_id": "cell-b6fb5a1b0894fb4e", "locked": true, "schema_version": 1, "solution": false} # Which algorithm performs better? Is this what you would expect? Can you explain this? # # Note: you may want to play around with the number of episodes to answer this question, but please reset it to 100 before handing in the notebook. # + [markdown] deletable=false nbgrader={"checksum": "b2e5712195d20cce7d1a6afb34e24a41", "grade": true, "grade_id": "cell-d99dae457ea5bde6", "locked": false, "points": 1, "schema_version": 1, "solution": true} # Using true gradient leads to better rewards in our experiments, in less number of episodes. This is exactly what we expected, as we fully use the information available (q values and target) in the gradient step. In the semi-gradient, we update the q values so that they get closer to the target. However, in the full gradient, we do so by also changing the estimate of the target through the gradient. Also, using the semi gradient we implicitly make the assumption that the target values will not change, which is not the case. Thus, it can be observed that with more episodes, the semi-gradient method tends to diverge sometimes and lead to slightly lower rewards. # + [markdown] deletable=false editable=false nbgrader={"checksum": "de7203182e41f55f391af5892477e89d", "grade": false, "grade_id": "cell-6607b79e73a101a9", "locked": true, "schema_version": 1, "solution": false} # --- # # ## 2. Policy Gradient (8 points) # + [markdown] deletable=false editable=false nbgrader={"checksum": "951b88e9cd8396d088d3f80e6da9690c", "grade": false, "grade_id": "cell-083fe71da94aa7aa", "locked": true, "schema_version": 1, "solution": false} # So we have spent a lot of time working on *value based* methods. We will now switch to *policy based* methods, i.e. learn a policy directly rather than learn a value function from which the policy follows. Mention two advantages of using a policy based method. # + [markdown] deletable=false nbgrader={"checksum": "a5c1f505cb22eca6eb3b8213ff23e60f", "grade": true, "grade_id": "cell-134510705650d5ac", "locked": false, "points": 2, "schema_version": 1, "solution": true} # The main benefits come from the fact that you can learn policy directly which makes the approach much more flexible. # Namely, # 1. Value based methods can't directly learn stochastic optimal policy as they have to base it on the value function, which doesn't give explicit stochasticity, while policy based methods can directly output stochastic optimal policy. # 2. Policy based methods allow us to incorporate prior knowledge about the expected form of the policy into the learning algorithm directly. This way we can expect the system to perform better as it will # 3. The last point is that we can approximate continuous policies with policy based methods, while this is impossible with value based methods. # + [markdown] deletable=false editable=false nbgrader={"checksum": "174629c02b62968e23fa6088c4d5763b", "grade": false, "grade_id": "cell-76a10fe31897025f", "locked": true, "schema_version": 1, "solution": false} # ### 2.1 Policy Network # + [markdown] deletable=false editable=false nbgrader={"checksum": "2bc16b45e6145226b8a6f5117003b7f5", "grade": false, "grade_id": "cell-34f0712f792bbcca", "locked": true, "schema_version": 1, "solution": false} # In order to do so, we will implement a Policy network. Although in general this does not have to be the case, we will use an architecture very similar to the Q-network (two layers with ReLU activation for the hidden layer). Since we have discrete actions, our model will output one value per action, where each value represents the (normalized!) log-probability of selecting that action. *Use the (log-)softmax activation function.* # + deletable=false nbgrader={"checksum": "155baf230fd6deb5f6ccf93138fa3419", "grade": false, "grade_id": "cell-6a31440f9477f963", "locked": false, "schema_version": 1, "solution": true} class PolicyNetwork(nn.Module): def __init__(self, num_hidden=128): nn.Module.__init__(self) self.mlp = nn.Sequential( nn.Linear(4, num_hidden), nn.ReLU(), nn.Linear(num_hidden, 2), nn.LogSoftmax(dim = -1) ) def forward(self, x): out = self.mlp(x) return out # + deletable=false editable=false nbgrader={"checksum": "3cb94e04b03fa4b663bcf38a96ef656d", "grade": true, "grade_id": "cell-9d280fe6520edc91", "locked": true, "points": 1, "schema_version": 1, "solution": false} # Let's instantiate and test if it works num_hidden = 128 torch.manual_seed(1234) model = PolicyNetwork(num_hidden) x = torch.rand(10, 4) log_p = model(x) # Does the outcome make sense? print(log_p.exp()) # + [markdown] deletable=false editable=false nbgrader={"checksum": "619c714e930c0d167304597d188f229b", "grade": false, "grade_id": "cell-35294ca4eda15b11", "locked": true, "schema_version": 1, "solution": false} # ### 2.2 Monte Carlo REINFORCE # + [markdown] deletable=false editable=false nbgrader={"checksum": "93ed9cbcf70541f5a04709ee89a16e78", "grade": false, "grade_id": "cell-44f33e587542974d", "locked": true, "schema_version": 1, "solution": false} # Now we will implement the *Monte Carlo* policy gradient algorithm. Remember from lab 1 that this means that we will estimate returns for states by sample episodes. Compared to DQN, this means that we do *not* perform an update step at every environment step, but only at the end of each episode. This means that we should generate an episode of data, compute the REINFORCE loss (which requires computing the returns) and then perform a gradient step. # # To help you, we already implemented a few functions that you can (but do not have to) use. # # * You can use `torch.multinomial` to sample from a categorical distribution. # * The REINFORCE loss is defined as $- \sum_t \log \pi_\theta(a_t|s_t) G_t$, which means that you should compute the (discounted) return $G_t$ for all $t$. Make sure that you do this in **linear time**, otherwise your algorithm will be very slow! Note the - (minus) since you want to maximize return while you want to minimize the loss. # * Importantly, you should **normalize the returns** (not the rewards!, e.g. subtract mean and divide by standard deviation within the episode) before computing the loss, or your estimator will have very high variance. # + deletable=false nbgrader={"checksum": "3b2c75181678fed25fcc7c8b39bb7de3", "grade": true, "grade_id": "cell-3f6e32c4931392bf", "locked": false, "points": 5, "schema_version": 1, "solution": true} from torch.distributions import Categorical import random def select_action(model, state): # Samples an action according to the probability distribution induced by the model # Also returns the log_probability # YOUR CODE HERE log_p = model(torch.tensor(state).float()) action = torch.multinomial(torch.exp(log_p), 1).item() return action, log_p[action] def run_episode(env, model): episode = [] done = False state = env.reset() single_transition = [] while not done: previous_state = state action, log_p = select_action(model, state) state, reward, done, _ = env.step(action) episode.append((previous_state, log_p, reward)) return episode def compute_reinforce_loss(episode, discount_factor): # Compute the reinforce loss # Make sure that your function runs in LINEAR TIME # Don't forget to normalize your RETURNS (not rewards) # Note that the rewards/returns should be maximized # while the loss should be minimized so you need a - somewhere # YOUR CODE HERE states, log_probs, rewards = zip(*episode) log_probs = torch.stack(log_probs) episode_return = 0.0 returns = [] for r in rewards[::-1]: episode_return = r + discount_factor * episode_return returns.append(episode_return) returns = torch.tensor(returns[::-1]) returns -= torch.mean(returns) returns /= torch.std(returns) loss = -(returns * log_probs).sum() return loss def run_episodes_policy_gradient(model, env, num_episodes, discount_factor, learn_rate): optimizer = optim.Adam(model.parameters(), learn_rate) episode_durations = [] for i in range(num_episodes): # YOUR CODE HERE episode = run_episode(env, model) loss = compute_reinforce_loss(episode, discount_factor) optimizer.zero_grad() loss.backward() optimizer.step() if i % 10 == 0: print("{2} Episode {0} finished after {1} steps" .format(i, len(episode), '\033[92m' if len(episode) >= 195 else '\033[99m')) episode_durations.append(len(episode)) return episode_durations # + # Feel free to play around with the parameters! num_episodes = 500 discount_factor = 0.99 learn_rate = 0.01 seed = 42 random.seed(seed) torch.manual_seed(seed) env.seed(seed) model = PolicyNetwork(num_hidden) episode_durations_policy_gradient = run_episodes_policy_gradient( model, env, num_episodes, discount_factor, learn_rate) plt.plot(smooth(episode_durations_policy_gradient, 10)) plt.title('Episode durations per episode') plt.legend(['Policy gradient']) # + [markdown] deletable=false editable=false nbgrader={"checksum": "b9fe846472bc09094ba671593c4b40b4", "grade": false, "grade_id": "cell-af9c49b396393dc0", "locked": true, "schema_version": 1, "solution": false} # --- # ## Actor-Critic (7 points) # + [markdown] deletable=false editable=false nbgrader={"checksum": "ff32c0931b08aa9a5719639105a7b3e5", "grade": false, "grade_id": "cell-7eabad968ce02adf", "locked": true, "schema_version": 1, "solution": false} # We will now implement the basic Actor-Critic algorithm, which means that instead of using Monte Carlo returns, we will bootstrap (1-step) returns using a critic (state-value function), so $G_t = R_t + \gamma V(s_{t+1})$. What happens at the end of the episode? Hint: you may find it useful to have a look at the `train` method for DQN. # # * Note that we now have to train an actor (policy) and a critic (value network). # * We will do this using a single optimizer, which means that we have to sum the loss for the actor and the critic into a single loss term. # * For the critic, use the `smooth_l1_loss` like with DQN. # * For the actor, the loss should be the REINFORCE loss, but with two differences: # - Instead of the Monte Carlo return $G_t$, use the one step return $G_{t:t+1}$ where the critic is used to bootstrap the value of $s_{t+1}$. # - Instead of normalizing the returns (which can be viewed as using the average as baseline and then scaling), we will use the estimated value $V(s_t)$ as baseline. # * **Important**: note that you cannot use `with torch.no_grad():` to compute the critic value (for the current state) since you need gradients to train the critic! However, when using the value to compute the actor loss, you do not want to get gradients of the critic parameters w.r.t. the actor loss (e.g. your target and baseline must be constant)! Therefore, use `v.detach()` on the output of the critic when it is used in the loss term for the actor, this will make sure the value(s) are treated as a constant and no gradients will be backpropagated. # + deletable=false nbgrader={"checksum": "3b649f137296d2c6e9ac367781f1b04e", "grade": true, "grade_id": "cell-5a7326fd2ab9349c", "locked": false, "points": 5, "schema_version": 1, "solution": true} class ValueNetwork(nn.Module): def __init__(self, num_hidden=128): nn.Module.__init__(self) self.mlp = nn.Sequential( nn.Linear(4, num_hidden), nn.ReLU(), nn.Linear(num_hidden, 1) ) def forward(self, x): out = self.mlp(x) return out def select_action(model, state): # Samples an action according to the probability distribution induced by the model # Also returns the log_probability # YOUR CODE HERE log_p = model(torch.tensor(state).float()) probs = Categorical(torch.exp(log_p)) action = probs.sample() log_p = log_p[range(len(state)), action] # action and log_p should be a 1 dimensional vector n = len(state) assert action.size() == (n, ) assert log_p.size() == (n, ) return action, log_p def train_actor_critic(actor, critic, optimizer, log_ps, state, reward, next_state, done, discount_factor): # YOUR CODE HERE v_next = critic(next_state).squeeze() v_cur = critic(state).squeeze() tgt = (reward + discount_factor * v_next - v_cur) * (~done).float() error = tgt - v_cur value_loss = F.smooth_l1_loss(error, torch.zeros_like(error)) actor_loss = (-(log_ps * error.detach())).mean() # The loss is composed of the value_loss (for the critic) and the actor_loss loss = value_loss + actor_loss # backpropagation of loss to Neural Network (PyTorch magic) optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(actor.parameters(), 1) torch.nn.utils.clip_grad_norm_(critic.parameters(), 1) optimizer.step() return loss.item(), value_loss.item(), actor_loss.item() # Returns a Python scalar, and releases history (similar to .detach()) def run_episodes_actor_critic(actor, critic, envs, max_episodes, max_steps, discount_factor, actor_learn_rate, critic_learn_rate): # We can use a single optimizer for both the actor and the critic, even with separate learn rates optimizer = optim.Adam([ {'params': actor.parameters(), 'lr': actor_learn_rate}, {'params': critic.parameters(), 'lr': critic_learn_rate} ]) episode_durations = [] state = torch.tensor([env.reset() for env in envs], dtype=torch.float) current_episode_lengths = torch.zeros(len(envs), dtype=torch.int64) step_losses = [] # Keep track of losses for plotting for i in range(max_steps): if i % 100 == 0: print(f"Step {i}, finished {len(episode_durations)} / {num_episodes} episodes, average episode duration of last 100 episodes: {np.mean(episode_durations[-100:])}") action, log_ps = select_action(actor, state) next_state, reward, done, _ = zip(*[env.step(a.item()) for env, a in zip(envs, action)]) next_state = torch.tensor(next_state, dtype=torch.float) reward = torch.tensor(reward, dtype=torch.float) done = torch.tensor(done, dtype=torch.uint8) # Boolean current_episode_lengths += 1 losses = train_actor_critic(actor, critic, optimizer, log_ps, state, reward, next_state, done, discount_factor) step_losses.append(losses) # Reset envs that are done next_state = torch.tensor([ env.reset() if d else s.tolist() for env, s, d in zip(envs, next_state, done) ], dtype=torch.float) episode_durations.extend(current_episode_lengths[done]) current_episode_lengths[done] = 0 # PyTorch can also work in place state = next_state # Check if we have finished sufficiently many episodes if len(episode_durations) >= max_episodes: break return episode_durations[:max_episodes], step_losses # In case we want exactly num_episodes returned num_envs = 16 max_steps = 10000 max_episodes = 10000 discount_factor = 0.8 lr_actor = 1e-3 lr_critic = 1e-3 seed = 42 actor = PolicyNetwork(num_hidden) critic = ValueNetwork(num_hidden) envs = [gym.envs.make("CartPole-v0") for i in range(num_envs)] for i, env in enumerate(envs): env.seed(seed + i) torch.manual_seed(seed) episode_durations, step_losses = run_episodes_actor_critic(actor, critic, envs, max_episodes, max_steps, discount_factor, lr_actor, lr_critic) # + plt.plot(smooth(episode_durations, 100)) plt.title('Episode durations') plt.show() loss, v_loss, a_loss = zip(*step_losses) plt.plot(smooth(v_loss, 100)) plt.title('Value loss') plt.show() plt.plot(smooth(a_loss, 100)) plt.title('Actor loss') plt.show() # + [markdown] deletable=false editable=false nbgrader={"checksum": "de8c4cba2ebd1a8bba2236f92a0b550c", "grade": false, "grade_id": "cell-8d15d4c9c0310bec", "locked": true, "schema_version": 1, "solution": false} # What is the difficulty of training AC algorithms? What could you try to do to overcome these difficulties? Hint: look at some online implementations. # + [markdown] deletable=false nbgrader={"checksum": "1e51e82a7730101dfd07b2f0e470d1b4", "grade": true, "grade_id": "cell-f68c6134a9df40b9", "locked": false, "points": 2, "schema_version": 1, "solution": true} # The main issue for the training of the AC algorithm is that we use function approximators for estimating both policy and action-value functions. # # As the Critic computes the value of taking action at the specific state and then the Actor updates its policy parameters using this value, if any of the agents doesn't perform well, this directly affects the other agent. # # Therefore, the algorithm might diverge as poor performance of one of them will make the other peform worse and this can accumulate up until the divergence. # # Potential way of avoiding those issues would be to pre-train critic using $\epsilon$-greedy policy and then include actor into training process. This way actor would already have better estimates of the action-state values and will be able to learn better policy which in turn will help critic perform better. # + [markdown] deletable=false editable=false nbgrader={"checksum": "5947c1e643f533003715ae8da659af9e", "grade": false, "grade_id": "cell-ad1138b69e6728a0", "locked": true, "schema_version": 1, "solution": false} # ## Deep Reinforcement Learning (5 bonus points) # Note that so far we used the state variables as input. However, the true power of Deep Learning is that we can directly learn from raw inputs, e.g. we can learn to balance the cart pole *by just looking at the screen*. This probably means that you need a deep(er) (convolutional) network, as well as tweaking some parameters, running for more iterations (perhaps on GPU) and do other tricks to stabilize learning. Can you get this to work? This will earn you bonus points! # # Hints: # * You may want to use [Google Colab](https://colab.research.google.com/) such that you can benefit from GPU acceleration. # * Even if you don't use Colab, save the weights of your final model and load it in the code here (see example below). Hand in the model file with the .ipynb in a .zip. We likely won't be able to run your training code during grading! # * To run the code below, you need to install `torchvision`, for this uncomment the two lines in the cell below or run the command in a terminal. Note: you may need to restart the terminal after installing. # * Preprocessing is already done for you, and the observation is the difference between two consequtive frames such that the model can 'see' (angular) speed from a single image. Now do you see why we (sometimes) use the word observation (and not state)? # + # # %%bash # conda install torchvision -c pytorch # + deletable=false editable=false nbgrader={"checksum": "f660e1484fe2bf60d66467326eacb1ba", "grade": false, "grade_id": "cell-9c9dfa80827c5680", "locked": true, "schema_version": 1, "solution": false} import torchvision.transforms as T from PIL import Image resize = T.Compose([T.ToPILImage(), T.Resize(40, interpolation=Image.CUBIC), T.ToTensor()]) class CartPoleRawEnv(gym.Env): def __init__(self, *args, **kwargs): self._env = gym.make('CartPole-v0', *args, **kwargs) #.unwrapped self.action_space = self._env.action_space screen_height, screen_width = 40, 80 # TODO self.observation_space = gym.spaces.Box( low=0, high=255, shape=(screen_height, screen_width, 3), dtype=np.uint8) def seed(self, seed=None): return self._env.seed(seed) def reset(self): s = self._env.reset() self.prev_screen = self.screen = self.get_screen() return self._get_observation() def step(self, action): s, r, done, info = self._env.step(action) self.prev_screen = self.screen self.screen = self.get_screen() return self._get_observation(), r, done, info def _get_observation(self): return self.screen - self.prev_screen def _get_cart_location(self, screen_width): _env = self._env.unwrapped world_width = _env.x_threshold * 2 scale = screen_width / world_width return int(_env.state[0] * scale + screen_width / 2.0) # MIDDLE OF CART def get_screen(self): screen = self._env.unwrapped.render(mode='rgb_array').transpose( (2, 0, 1)) # transpose into torch order (CHW) # Strip off the top and bottom of the screen _, screen_height, screen_width = screen.shape screen = screen[:, screen_height * 4 // 10:screen_height * 8 // 10] view_width = screen_height * 8 // 10 cart_location = self._get_cart_location(screen_width) if cart_location < view_width // 2: slice_range = slice(view_width) elif cart_location > (screen_width - view_width // 2): slice_range = slice(-view_width, None) else: slice_range = slice(cart_location - view_width // 2, cart_location + view_width // 2) # Strip off the edges, so that we have a square image centered on a cart screen = screen[:, :, slice_range] # Convert to float, rescare, convert to torch tensor # (this doesn't require a copy) screen = np.ascontiguousarray(screen, dtype=np.float32) / 255 screen = torch.from_numpy(screen) # Resize, and add a batch dimension (BCHW) #return screen.unsqueeze(0).to(device) return resize(screen).unsqueeze(0) def close(self): return self._env.close() raw_env = CartPoleRawEnv() s = raw_env.reset() # s, r, done, _ = raw_env.step(env.action_space.sample()) raw_env.reset() plt.figure() plt.imshow(raw_env.get_screen().cpu().squeeze(0).permute(1, 2, 0).numpy(), interpolation='none') plt.title('Example extracted screen') plt.show() # Observations are (-1, 1) while we need to plot (0, 1) so show (rgb + 1) / 2 plt.figure() plt.imshow((s.cpu().squeeze(0).permute(1, 2, 0).numpy() + 1) / 2, interpolation='none') plt.title('Example observation') plt.show() raw_env.close() # + ### NOTE: this is just a rewriting of REINFORCE # we modified it to use cuda and adjusted to the new state shape from torch.distributions import Categorical import random def deep_select_action(model, state): # Samples an action according to the probability distribution induced by the model # Also returns the log_probability # YOUR CODE HERE log_p = model(state) probs = Categorical(torch.exp(log_p)) action = probs.sample().item() return action, log_p.squeeze()[action] def deep_run_episode(env, model): episode = [] done = False state = env.reset().to(device) single_transition = [] while not done: previous_state = state action, log_p = deep_select_action(model, state) state, reward, done, _ = env.step(action) state = state.to(device) episode.append((previous_state, log_p, reward)) return episode def deep_compute_reinforce_loss(episode, discount_factor): # Compute the reinforce loss # Make sure that your function runs in LINEAR TIME # Don't forget to normalize your RETURNS (not rewards) # Note that the rewards/returns should be maximized # while the loss should be minimized so you need a - somewhere # YOUR CODE HERE states, log_probs, rewards = zip(*episode) log_probs = torch.stack(log_probs) episode_return = 0.0 returns = [] for r in rewards[::-1]: episode_return = r + discount_factor * episode_return returns.append(episode_return) returns = torch.tensor(returns[::-1]).to(device) returns -= torch.mean(returns) returns /= torch.std(returns) loss = -(returns * log_probs).sum() return loss def deep_run_episodes_policy_gradient(model, env, num_episodes, discount_factor, learn_rate): optimizer = optim.Adam(model.parameters(), learn_rate) episode_durations = [] for i in range(num_episodes): # YOUR CODE HERE episode = deep_run_episode(env, model) loss = deep_compute_reinforce_loss(episode, discount_factor) optimizer.zero_grad() loss.backward() optimizer.step() if i % 10 == 0: print("{2} Episode {0} finished after {1} steps" .format(i, len(episode), '\033[92m' if len(episode) >= 195 else '\033[99m')) episode_durations.append(len(episode)) return episode_durations # + # Maybe you should make it a bit deeper? class DeepPolicy(nn.Module): def __init__(self): nn.Module.__init__(self) self.cnn = nn.Sequential( nn.Conv2d(3, 32, kernel_size=(3, 3), stride=1), nn.ReLU(), nn.Conv2d(32, 64, kernel_size=(3, 3), stride=2), nn.ReLU(), nn.MaxPool2d(2, 2), nn.Conv2d(64, 64, kernel_size=(3, 3), stride=2), nn.MaxPool2d((2, 2)), ) self.mlp = nn.Sequential( nn.Linear(512, 64), nn.ReLU(), nn.Linear(64, 2) ) def forward(self, x): x = self.cnn(x) out = self.mlp(x.view(x.size(0), -1)) return F.log_softmax(out, -1) model = DeepPolicy() filename = 'weights.pt' if os.path.isfile(filename): print(f"Loading weights from {filename}") weights = torch.load(filename) print(weights.keys()) model.load_state_dict(weights['policy-weights']) else: # Train device = 'cpu' num_episodes = 1000 discount_factor = 0.99 learn_rate = 0.0001 seed = 42 env = CartPoleRawEnv() random.seed(seed) torch.manual_seed(seed) env.seed(seed) episode_durations_policy_gradient = deep_run_episodes_policy_gradient( model, env, num_episodes, discount_factor, learn_rate) ### TODO some training here, maybe? Or run this on a different machine? torch.manual_seed(42) print(f"Saving weights to {filename}") torch.save({ # You can add more here if you need, e.g. critic 'policy-weights': policy.state_dict() # Always save weights rather than objects }, filename) def bonus_get_action(x): return model(x).exp().multinomial(1)[:, 0] # + deletable=false editable=false nbgrader={"checksum": "b800bfb91f987f14e0c35bc0c41d538b", "grade": true, "grade_id": "cell-0d7bd58a23fdfabb", "locked": true, "points": 5, "schema_version": 1, "solution": false} seed = 42 episode_durations = [] for i in range(20): # Not too many since it may take forever to render test_env = CartPoleRawEnv() test_env.seed(seed + i) state = test_env.reset() done = False steps = 0 while not done: steps += 1 with torch.no_grad(): action = bonus_get_action(state).item() state, reward, done, _ = test_env.step(action) episode_durations.append(steps) test_env.close() plt.plot(smooth(episode_durations, 100)) plt.title('Episode durations') plt.show() # - plt.plot(smooth(episode_durations, 10)) plt.title('Episode durations') plt.show()
labs/lab2/lab2_sol.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/kmjohnson3/ML4MI_Bootcamp/blob/master/ImageSegmentation/SegmentationBootcamp_Master__broken.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="2KX46C7E0UNn" # # Introduction # This tutorial will give an example application of using deep learning for medical image segmentation. This example will demonstrate how to train a convolutional neural network for the purpose of lung segmentation in CT images. The tutorial will have 3 main parts: # 1. Loading and examining data for model training # 2. Creating, training, and evaluating a deep learning segmentation model # 3. Making improvements to the model with skip connections # # Keep an eye out for questions through this demo to test your new DL knowledge and critical thinking. There are answers at the end of the document. # + [markdown] colab_type="text" id="aA5x8w1J0UNo" # ### Initial preparation # + [markdown] colab_type="text" id="J1_ONBCG0UNo" # Import necessary modules. We also need to install the Python package PyDicom # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="-QzVX9iz0UNp" outputId="35933a02-e194-412b-ebd6-91df25acb7b7" import os # operating system operations os.environ["TF_CPP_MIN_LOG_LEVEL"]="3" import tensorflow as tf import matplotlib.pyplot as plt # for plotting our results import numpy as np np.random.seed(1) # set seed for random number generator # !pip install pydicom # + [markdown] colab_type="text" id="GpPvW5ZW0UNs" # Next, we need to copy the files to a place where our CoLab notebook can read them. # + colab={"base_uri": "https://localhost:8080/", "height": 70} colab_type="code" id="rMUfGgtR0rZG" outputId="1ca7cb9f-409a-4b1f-a8df-3f2709805a35" # Mount the Google Drive from google.colab import drive drive.mount('/content/drive') # Copy data to this VM import tarfile from tqdm import tqdm with tarfile.open(name='/content/drive/My Drive/ML4MI_BOOTCAMP_DATA/ImageSegmentation.tar') as tar: for member in tqdm(iterable=tar.getmembers(), desc='Decompressing', unit='file', total=len(tar.getmembers())): tar.extract(member=member,path='/home/') # Change the working folder # %cd /home/ImageSegmentation # Download helper function to this VM # !wget https://raw.githubusercontent.com/kmjohnson3/ML4MI_bootcamp/master/ImageSegmentation/Demo_Functions.py # + [markdown] colab_type="text" id="zJ2wNIPw0UNs" # # Part 1: Data Preparation # All deep learning applications start with getting the data. In this case, the data has already been collected from subjects through CT scans and annotations have been made. # # Additionally, we have already created a function for loading in this data in an organized way and get it ready to feed into a deep learning model for training. The data is currently stored as DICOMs in a mess of directories. This function sorts through the directories and loads in the necessary images and masks. # # So, we can just import this function and load the data into our various needed arrays. # + colab={"base_uri": "https://localhost:8080/", "height": 325} colab_type="code" id="8XIzxaAK0UNt" outputId="c087d032-79b1-400b-f4d9-f074eba1bbb8" from Demo_Functions import GetLungSegData trainX,trainY,valX,valY = GetLungSegData('/home/ImageSegmentation/LCTSC') # + [markdown] colab_type="text" id="BfsSi42g0UNv" # Let's examine these arrays we've loaded. # + colab={} colab_type="code" id="_f270kUy0UNw" print(trainX.shape) print(trainY.shape) print(valX.shape) print(valY.shape) # + [markdown] colab_type="text" id="e2myYIC50UNy" # We have two sets of corresponding images and masks. There are 1299 slices of 256x256 images in the training set, and 144 in the validation set. # # Each of these sets has a 4th dimension that has length 1. Why? # # Keras, and most other deep learning libraries, expects images to be in color. That is, they have R,G, and B color channels. So images are expected to be passed in as 4 dimensional arrays. In this case, we are passing in grayscale images, so they will just have a single color channel instead of 3. # + [markdown] colab_type="text" id="3FXCKR_c0UNy" # ##### Question 1: What could be another use for having multiple input channels? # Hint: Think MRI. # + [markdown] colab_type="text" id="WIwIKgtp0UNz" # At this point, it would be good to check that our data loaded correctly and the masks correspond to the input images. We'll using the python plotting package matplotlib to display a sample image and mask side by side for both the training and validation datasets. # # It's a good idea to try several different display indices to make sure all your data is lined up correctly. # + colab={} colab_type="code" id="0qU93mMD0UN0" # pick a random index for display disp_ind = 1 # make a figure plt.figure() # concatenate the input and target image together disp = np.c_[trainX[disp_ind,...,0], trainY[disp_ind,...,0]] # display image (with grayscale) plt.imshow(disp,cmap='gray') plt.show() # repeat for validation set disp_ind = 7 plt.figure() # concatenate the input and target image together disp = np.c_[valX[disp_ind,...,0], valY[disp_ind,...,0]] # display image (with grayscale) plt.imshow(disp,cmap='gray') plt.show() # + [markdown] colab_type="text" id="wXMzqu0m0UN1" # Looks good! # + [markdown] colab_type="text" id="wLTW2_Ae0UN2" # ## Our data is now ready for training! # + [markdown] colab_type="text" id="FfIKyxpS0UN3" # # Part 2: Building a segmentation network # # We will build a deep convolutional neural network layer by layer, using Keras' high-level libraries that are relatively easy to work with to create exactly the network that we want. # # For this segmentation problem, the most common and effective networks follow a style known as 'convolutional encoder-decoder' or CED. This means that using convolutional layers we will downsample, or 'encode', our input data, and then upsample, or 'encode' back to our original input size. In this way, the convolutional layers will learn to create a mapping of our input images into a segmentation mask. # + [markdown] colab_type="text" id="yi1lcTQ50UN3" # One final note before we build the model. The filters (or 'kernels') are intialized in the background by some random distribution before training. Different distributions can greatly affect how quickly the model learns, or whether it converges at all. Each task can require different intialization distributions and usually requires playing around with different options. For the models we are using today, we already did this work for you and found that the He Normal distribution is most effective (He et al., http://arxiv.org/abs/1502.01852). We will set this parameter in all the convolutional layers. # + colab={} colab_type="code" id="ArAQs-9F0UN4" init = 'he_normal' # + [markdown] colab_type="text" id="GsqE6rXo0UN6" # Now, let's build a segmentation model! # # First, import some layers we will use: # + colab={} colab_type="code" id="wEaRc3-i0UN7" from keras.layers import Input from keras.layers import Conv2D from keras.layers import ZeroPadding2D # also, import the Model function for building the model from keras.models import Model # + [markdown] colab_type="text" id="lVB9OSoh0UN9" # We first need an input layer. Our input layer just needs the shape of the input we are providing. The shape dimensions are [sample,row,column,channel]. # # For this 2D network, our samples are different slices. We don't need to provide this dimension to the input layer, since we will feed those samples in as batches during training. But we need the rest of the dimensions. # # Keep in mind that Python uses 0-indexing. So `[1:]` means collect all the parts of the array except the first one. # + colab={} colab_type="code" id="tlE44TpJ0UN9" # create our input layer by giving it an input shape inp = Input(shape=trainX.shape[1:]) # + [markdown] colab_type="text" id="8IjLCwlL0UN_" # Now, we will add on convolutional layers # + [markdown] colab_type="text" id="3Ws1P7mt0UOA" # The syntax for adding layers to our network is: # # `newlayer = LayerType(layer_parameters)(input_layer)` # # newlayer: the variable that stores the current output of the network. # LayerType: the type of the new layer we are adding onto the network, in this case Conv2D layers. # layer_parameters: the inputs we provide to define the new layer. For Conv2D layers, this is given as (number of filters, size of filters, and type of nonlinearity applied to the layer). # input_layer: the previous layer that our new layer is going to be connected to. # # So for example: `x = Conv2D(10,(3,3), activation='relu')(inp)` creates a 2D convolutional layer with 10 filters that are 3x3 in size. The non-linearity (activation) is a Rectified Linear Unit, and it takes 'inp' as an input and gives its output as x. # + [markdown] colab_type="text" id="bll_q2P10UOB" # Without further ado, let's make a convolutional neural network! # + colab={} colab_type="code" id="FUFdLJsq0UOB" # add on a couple convolutional layers x = Conv2D(10,kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(inp) x = Conv2D(20, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) # We will have to use some specific zero padding # to keep our layer sizes friendly for this segmentation model # make a zero padding layer that does 1 pad of zeros # on all sides x = ZeroPadding2D(padding=(1,1))(x) # Add a strided convolution layer x = Conv2D(30, kernel_size=(4,4), strides=(2,2), activation='relu', kernel_initializer=init)(x) # Now repeat the process, hanging onto the second layer again x = Conv2D(30, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) x = Conv2D(40, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) x = ZeroPadding2D(padding=(1,1))(x) x = Conv2D(40, kernel_size=(4,4), strides=(2,2), activation='relu', kernel_initializer=init)(x) # We've now done 2 downsampling layers, like before. # Now for the decoding side of the network, we will start # adding skip connections # The first couple of layers are the same as usual. x = Conv2D(50, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) x = Conv2D(50, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) # now, we will reverse the downsampling using Transposed Convolutions, also # incorrectly but commonly called Deconvolution from keras.layers import Conv2DTranspose # This is now the decoding side of the network # The syntax is identical. However, we need the decoding side of the network to end # up with the same output size as our images, so the # precise order and size of layers matter x = Conv2DTranspose(40, kernel_size=(4,4), strides=(2,2), activation='relu', kernel_initializer=init)(x) # Mixing in regular Conv2D layers is sometimes necessary # for getting layer shapes to work out x = Conv2D(40, kernel_size=(3,3),activation='relu',kernel_initializer=init)(x) x = Conv2DTranspose(30, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) x = Conv2DTranspose(30, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) # Do all that again- your turn! # Repeat the last 4 layers again # Continue to reduce the number of kernels: try 20,20,10,10 x = Conv2DTranspose(20, kernel_size=(4,4),strides=(2,2),activation='relu',kernel_initializer=init)(x) x = Conv2D(20, kernel_size=(3,3),activation='relu',kernel_initializer=init)(x) x = Conv2D(10, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) x = Conv2D(10, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) # Final output layer out = Conv2D(1,kernel_size=(1,1),activation='sigmoid',kernel_initializer=init)(x) # Make the model using the input and output layers SegModel = Model(inp,out) # + colab={} colab_type="code" id="H4H1GiOz0UOD" # Print a summary of the model we just made SegModel.summary() # + [markdown] colab_type="text" id="N4yezVkV0UOG" # ### Compiling the model # # Compiling the model is the final step before it is ready to train. We need to define our loss function and optimizer that Keras will use to run the training. In this step, Keras will also randomly initialize the weights of our network- so every time the network is trained, it has a different starting point and it is possible to get different results. # + [markdown] colab_type="text" id="aHNpmhNo0UOG" # ### Loss function # The Dice coefficient is not only a good segmentation metric, is also works well as a segmentation loss function since it can be converted to being differentiable without much difficulty. Loss functions in Keras need be defined using tensor functions, using the backend API. # # Here is what that looks like: # + colab={} colab_type="code" id="GM4nvIMN0UOH" import keras.backend as K def dice_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) dice = (2. * intersection + 1)/(K.sum(y_true_f) + K.sum(y_pred_f) + 1) # We have calculated dice, but we want to maximize it. # Keras tries to minimize the loss so we simply return 1- dice return 1-dice # + [markdown] colab_type="text" id="u90AIUqB0UOJ" # ### Optimizer # There are many different optimizers that Keras allows us to use without much trouble. We have provided two examples here and you can try both to see how well they help the model train. # # Segmentation can be tricky- if you don't have enough data, the model might not converge. We are working with a limited amount of data so that is a possible issue. We have already experimented for you to find parameters that work well for this model. We found that SGD- Stochastic Gradient Descent- works best here. We set a low learning rate and some learning rate decay. We also use Nesterov momentum, which is rarely a bad idea for SGD. # The final other parameter we'll use is setting the clipnorm, which means the gradients during training will be clipped to a fixed value. This prevents an issue know as "exploding gradients" which causes the model to stop learning. # # ##### Challenge: # Experiement with these different settings and see if you can find an optimizer and combination of parameters that gets better results in the same amount of training (epochs) # + colab={} colab_type="code" id="JlxLDJQA0UOJ" from keras.optimizers import SGD # Setup a SGD optimizer with learning rate of 0.05, decay of 1e-6, momentum of .9 # Nesterov momentum enabled, and clipnorm set to 0.5 opt = SGD(lr=0.05,momentum=.9,nesterov=True,clipnorm=0.5) # Compile the segmentation model with Dice as the loss and the created optimizer SegModel.compile(loss=dice_coef,optimizer=opt) # + [markdown] colab_type="text" id="h6aeP-UE0UOL" # ### Model Training # All that's left to do is to "fit" the model to our data! # # Keras takes a few more parameters during model "fitting" (training): # * Our training data (obviously) # * Batch size- how many samples are fed in at once # * Epochs- how many times to go through all training data # * We ask Keras to constantly report progress (verbose) # * Shuffle set to True so the data is in random order for every epoch # * Our validation data that will be evaluated at the end of every epoch so we can keep an eye on overfitting # + colab={} colab_type="code" id="H_edhov00UOM" # Run the model training with our x and y training data, batch size of 32, # 7 epochs, shuffle on, and provide our validation data # Save the output to the variable 'hist' hist = SegModel.fit(trainX, trainY, batch_size=32, epochs=5, verbose=1, shuffle=True, validation_data=(valX, valY)) # + [markdown] colab_type="text" id="JoNjbXMi0UOP" # ### Evaluate Model # After the training is complete, we evaluate the model again on our validation data to see the results. # + colab={} colab_type="code" id="svbXPIEx0UOP" # Get the Dice score from evaluating the model and print it out score = SegModel.evaluate(valX, valY, verbose=0) print('Final Dice on validation set: {:.04f}'.format(1-score)) # + [markdown] colab_type="text" id="YfpDEKU90UOS" # Another way to evaluate a model is to look at how both the training and validation losses change during training. Keras gave us this data when we trained the model, now we can plot them together. # + colab={} colab_type="code" id="6RD1gVN60UOT" # Plot the losses that are stored in the 'hist' variable plt.figure(figsize=(6.0, 4.0)); plt.plot(hist.epoch,hist.history['loss'],'b-s') plt.plot(hist.epoch,hist.history['val_loss'],'r-s') plt.legend(['Training Loss', ' Validation Loss']) plt.xlabel('Epochs') plt.ylabel('Dice Loss') plt.ylim([0,1]) plt.show() # + [markdown] colab_type="text" id="U6VsVupZ0UOX" # An important thing to look for is that the validation loss isn't increasing while the training loss decreases. The divergence of the losses like this means that the model is overfitting- it is getting really good at the training data that it sees, but it is getting worse at the data that it doesn't see. This means the model won't be very helpful when we want to apply it to new data. # Due to the random initialization of the network, the exact loss plots will be different every single time you train it. However, for this example, some general statements can be made that probably apply to your results. # * The validation and training losses generally go down. This is good- the model is learning. # * The validation loss is generally higher than the training loss. This is expected- the model will learn the training data best because that is what it gets direct feedback on. The hope is that it will transfer what it learns to the validation data too. # * The validation loss spikes up at some point. This is also pretty normal. The validation data isn't part of the feedback loop so it's not guaranteed that the model will consistently get better results on it. As long as the spikes are isolated and the validation loss follows a general downward trend, it's not anything to worry about. # + [markdown] colab_type="text" id="R9es9wHk0UOX" # ##### Question 2: What techniques or strategies can be used to mitigate issues with overfitting? # + [markdown] colab_type="text" id="Rku4IjDQ0UOY" # Another useful way to evaluate a model is to just look at the outputs. We can look at a sample image to see how the mask looks compared to the ground truth. # + colab={} colab_type="code" id="a5_cVL8k0UOZ" # Get the predictions of the model on the validation inputs predictions = SegModel.predict(valX) # + colab={} colab_type="code" id="u3UOWeBK0UOc" # pick a random slice to examine disp_ind = 73 # get the CT image, the model predicted mask, and the target mask image = valX[disp_ind,...,0] predicted_mask = predictions[disp_ind,...,0] truth_mask = valY[disp_ind,...,0] # normalize image for display image = image-np.min(image) image = image/np.max(image) # create a figure plt.figure() # combine images together into one disp = np.c_[image,predicted_mask,truth_mask] # display image plt.imshow(disp,cmap='gray') plt.show() # + [markdown] colab_type="text" id="wTNgVEV40UOe" # Results will vary here. It's unlikely that the model already learned a beautiful mask, but hopefully it at least learned something useful and can produce a somewhat reasonable result. Play around with the index `(disp_ind)` and see what different slices look like. # # Sometimes it helps to get more precise visualization. We have provided a function for viewing the mask on top of the image, so we can maybe start to explain what mistakes the model is making. # + colab={} colab_type="code" id="ZAT2Agcj0UOf" from Demo_Functions import display_mask display_mask(image,predicted_mask) # + [markdown] colab_type="text" id="1v-z9wym0UOh" # ##### Question 3: Can you explain the errors made by the deep learning model? # + [markdown] colab_type="text" id="eEyn1mQa0UOh" # In segmentation, a particularly useful trick is the use of skip connetions, in which layers from the downsampling part of the network are concatenated with layers on the upsampling part. This both boosts the representational power of the model as well as improves the gradient flow, which also helps the model learn quicker. # These skip connections take a little bit more effort to implement. Luckily, Keras still makes it pretty easy. # + [markdown] colab_type="text" id="EodhD8Sm0UOi" # ## Adding Skip Connections # So far, we've been making sequential models. # Basically, it means that our network # has a single, straight path, i.e. # # ![Simple CNN floatchart](https://github.com/jmj23/deep-learning/raw/master/BootCamp/CNN_simple_flowchart.png "Simple CNN") # # Each layer has a single input and output # # But what if we wanted something more complicated? What if # we wanted to implement the skip connections that were just mentioned, for example? Then we would want something like # # ![Connection CNN floatchart](https://github.com/jmj23/deep-learning/raw/master/BootCamp/CNN_connection_flowchart.png "Connection CNN") # # # The extra connection shown is called a skip connection. Skip connections allow the model to consider features that were calculated earlier in the network again, merged with further processed features in practice, this has shown to be hugely helpful in geting precise localization in segmentation outputs. # # We'll use the same segmentation data so no need to prepare anything new. Let's jump into model creation. # + [markdown] colab_type="text" id="dMhMjOxZ0UOi" # ## Build a segmentation model with skip connections # # We will reuse the previous model, but rename some of the layers and add some new ones. Here is the previous model code: # + colab={} colab_type="code" id="EFfQZ6no0UOj" # A new layer type we will need for this model from keras.layers import concatenate # the model begins the same inp = Input(shape=trainX.shape[1:]) x = Conv2D(10,kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(inp) # we need to hold onto this layer so it can be used later in the model # as a skip connection. Rename the the output of this layer to # something besides 'x'. I prefer 'x1' but creativity is allowed. x1 = Conv2D(20, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) # Don't forget to update the input of this layer to the same name x = ZeroPadding2D(padding=(1,1))(x1) # these layers can remain the same x = Conv2D(30, kernel_size=(4,4),strides=(2,2),activation='relu',kernel_initializer=init)(x) x = Conv2D(30, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) # repeat the renaming process for this layer. I used 'x2' x2 = Conv2D(40, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) x = ZeroPadding2D(padding=(1,1))(x2) # We'll finish the encoding side of the network and begin the decoding side x = Conv2D(40, kernel_size=(4,4),strides=(2,2),activation='relu',kernel_initializer=init)(x) x = Conv2D(50, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) x = Conv2D(50, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) x = Conv2DTranspose(40, kernel_size=(4,4),strides=(2,2),activation='relu',kernel_initializer=init)(x) x = Conv2D(40, kernel_size=(3,3),activation='relu',kernel_initializer=init)(x) # Now, add a concatenation layer to grab the second layer we renamed- 'x2' # the syntax is: # x = concatenate([layer1,layer2]) # give it a shot: x = concatenate([x,x2]) # more decoding side of the network x = Conv2DTranspose(30, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) x = Conv2DTranspose(30, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) x = Conv2DTranspose(20, kernel_size=(4,4),strides=(2,2),activation='relu',kernel_initializer=init)(x) x = Conv2D(20, kernel_size=(3,3),activation='relu',kernel_initializer=init)(x) # now add the second skip connection, this time using the first layer- 'x1' x = concatenate([x,x1]) # finish the model off like before x = Conv2D(10, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) x = Conv2D(10, kernel_size=(3,3),padding='same',activation='relu',kernel_initializer=init)(x) out = Conv2D(1,kernel_size=(1,1),activation='sigmoid',kernel_initializer=init)(x) # Make the model using the input and output layers # This won't work if we don't match up the skip connections right! SegModel2 = Model(inp,out) # + [markdown] colab_type="text" id="4I-7IwIC0UOl" # Let's print out a summary of the model to make sure it's what we want. # It's a little bit harder to keep track of layers in non-sequential format, but it's still a good way to make sure things look right. # + colab={} colab_type="code" id="918xM5YD0UOl" # Print the summary of the model SegModel2.summary() # + [markdown] colab_type="text" id="_a7_Qtpc0UOn" # Now, everything else is just like the previous segmentation model. Let's try it out and see how it works! # + colab={} colab_type="code" id="XIifkyBF0UOn" # Make same optimizer as before and compile the new model opt = SGD(lr=0.05,momentum=.9,nesterov=True,clipnorm=0.5) SegModel2.compile(loss=dice_coef,optimizer=opt) # + colab={} colab_type="code" id="mbvWqH6v0UOq" # Running the training with same data, batch size, and epochs as before hist2 = SegModel2.fit(trainX, trainY, batch_size=32, epochs=5, verbose=1, shuffle=True, validation_data=(valX, valY)) # + colab={} colab_type="code" id="HQVkwSnS0UOr" # Plot the results, including the previous ones # Use different colors for the first and second model plt.figure(figsize=(6.0, 4.0)); plt.plot(hist2.epoch,hist2.history['loss'],'r-') plt.plot(hist2.epoch,hist2.history['val_loss'],'r-s') plt.plot(hist.epoch,hist.history['loss'],'b-') plt.plot(hist.epoch,hist.history['val_loss'],'b-s') plt.legend(['Model 2 Training Loss', 'Model 2 Validation Loss', 'Model 1 Training Loss', 'Model 1 Validation Loss']) plt.xlabel('Epochs') plt.ylabel('Dice Loss') plt.show() # + [markdown] colab_type="text" id="-hcxUMtq0UOt" # ##### Question 4: How can the validation loss be lower than the training loss? # + colab={} colab_type="code" id="0Vh-uMEu0UOt" # Get the predictions of the new model predictions2 = SegModel2.predict(valX) # display image with mask like before disp_ind = 73 image = valX[disp_ind,...,0] predicted_mask = predictions2[disp_ind,...,0] truth_mask = valY[disp_ind,...,0] # normalize image for display image = image-np.min(image) image = image/np.max(image) # create a figure plt.figure() # combine images together into one disp = np.c_[image,predicted_mask,truth_mask] # display image plt.imshow(disp,cmap='gray') plt.show() # + [markdown] colab_type="text" id="kK7j0OoO0UOv" # It's better! The network learned much faster, as is apparent in the loss plots. The new model also already has better overall results. Additionally, the mask has more fine detail than the previous version without skip connections. Having these skip connections definitely make a difference. The difference becomes more pronounced for deeper networks (more layers) with more parameters and larger images. # # Let's look at some of these masks on the images and make some observations. # + colab={} colab_type="code" id="THmccG6f0UOw" ims = np.split(valX[10:80:16,...,0],5) masks = np.split(predictions2[10:80:16,...,0],5) for im,mask in zip(ims,masks): display_mask(im[0],mask[0]) # + [markdown] colab_type="text" id="iVYlUDdr0UOy" # Now that you know the functional API, you can make any graph you like, train it, and use it! Once you've mastered the syntax and conceptual understanding of how to connect layers, you are only limited by your imagination as far as what kind of network you can build. # + [markdown] colab_type="text" id="RXuV8TxZ0UOz" # ## End of Segmentation Example. Happy deep learning! # + [markdown] colab_type="text" id="EjwteSR00UOz" # ## Answers to Questions # # #### 1- What could be another use for having multiple input channels? # # In MRI, multiple sequences are usually acquired. It might take some resampling of the data, but you could use multiple sequences as different channels, for example, T1, T2, and 2-point Dixon images. Including more channels in your inputs almost always results in better performance for a deep learning model. # # #### 2- What techniques or strategies can be used to mitigate issues with overfitting? # # The best solution is to use more data. That is rarely a possible solution in medical imaging, so there are some alternatives. # 1. Use data augmentation to synthesize extra data # 2. Reduce the size or complexity of the network # 3. Introduce regularization. This can include dropout, batch normalization, or L1/L2 regularization # # #### 3- Can you explain the errors made by the deep learning model? # # No! It's really difficult to explain or understand exactly what is going on inside a CNN. There's simply too many parameters involved to be able to pick apart what each one is doing. That's why training always needs validation- it's the only way to check that our model is really learning something useful. # # #### 4- How can the validation loss be lower than the training loss? # # It generally isn't, because the model learns from the training data and not the validation data. Only in contrived scenarios could the model actually perform better on the validation data than training. However, sometimes you will see lower validation loss. The explanations could be: # * The model has equivalent performance on training and validation, and slight random differences make the validation loss slightly lower # * A quirk of Keras. This is how Keras evaluates losses during training: # 1. Calculate loss of each training batch during epoch # 2. Average these losses together at end of epoch. This is the epoch's training loss # 3. Calculate total validation loss at end of epoch. # # If a model learns very quickly (frequent in the first few epochs) then the performance of the model at the end of the epoch, when it evaluates the validation data, will better than the average performance during the entire epoch.
ImageSegmentation/SegmentationBootcamp_Master.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Toy process C # # Compute the mean of each column, and plot a histogram. # + import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # - common = pd.read_csv('common.csv') average = common.mean() average average.to_csv('average.csv') plt.plot(common) plt.savefig('toy_hist.pdf')
examples/toyC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:synthetic-observables] # language: python # name: conda-env-synthetic-observables-py # --- # # Compute Timelags # In this notebook, we'll compute the timelags and cross-correlation for every channel pair and heating type and save them as FITS files. # + import os import sys import numpy as np import distributed import matplotlib.pyplot as plt import matplotlib.colors from sunpy.map import Map,GenericMap from astropy.coordinates import SkyCoord import astropy.units as u from astropy.utils.console import ProgressBar import synthesizAR from synthesizAR.instruments import InstrumentSDOAIA from synthesizAR.analysis import DistributedAIACube,AIATimelags from synthesizAR.visualize import bgry_004_idl_cmap # %matplotlib inline # - synthesizAR.version.version # Spin up a local Dask cluster. cluster = distributed.LocalCluster(n_workers=32,threads_per_worker=2,) client = distributed.Client(cluster) client channels = [94,131,171,193,211,335] channel_pairs = [(94,335), (94,171), (94,193), (94,131), (94,211), (335,131), (335,193), (335,211), (335,171), (211,131), (211,171), (211,193), (193,171), (193,131), (171,131),] heating = ['high_frequency', 'intermediate_frequency', 'low_frequency', 'cooling_outofphase_long', 'cooling'] labels = ['High', 'Intermediate', 'Low', 'Random', 'Cooling'] intensity_file_format = '/storage-home/w/wtb2/data/timelag_synthesis_v2/{}/nei/SDO_AIA/{}/map_t{:06d}.fits' result_file_format = '/storage-home/w/wtb2/projects/synthetic-observables-paper-models/paper/data/{}/{}_{}_{}.fits' timelag_bounds = (-6*u.hour,6*u.hour) # Now, compute the timelag and correlation maps for all of the channel pairs and all of the different heating models. # ## High Frequency tl = AIATimelags(*[DistributedAIACube.from_files([intensity_file_format.format('high_frequency', c, i) for i in range(500,2500)]) for c in channels]) for ca,cb in channel_pairs: timelag_map = tl.make_timelag_map(f'{ca}',f'{cb}', timelag_bounds=timelag_bounds,chunks=(tl[0].shape[1]//3,tl[0].shape[2]//3)) timelag_map.save(result_file_format.format('high_frequency', 'timelag', ca, cb)) correlation_map = tl.make_correlation_map(f'{ca}',f'{cb}', timelag_bounds=timelag_bounds,chunks=(tl[0].shape[1]//3,tl[0].shape[2]//3)) correlation_map.save(result_file_format.format('high_frequency', 'correlation', ca, cb)) # ## Intermediate Frequency tl = AIATimelags(*[DistributedAIACube.from_files([intensity_file_format.format('intermediate_frequency', c, i) for i in range(500,2500)]) for c in channels]) for ca,cb in channel_pairs: timelag_map = tl.make_timelag_map(f'{ca}',f'{cb}', timelag_bounds=timelag_bounds,chunks=(tl[0].shape[1]//3,tl[0].shape[2]//3)) timelag_map.save(result_file_format.format('intermediate_frequency', 'timelag', ca, cb)) correlation_map = tl.make_correlation_map(f'{ca}',f'{cb}', timelag_bounds=timelag_bounds,chunks=(tl[0].shape[1]//3,tl[0].shape[2]//3)) correlation_map.save(result_file_format.format('intermediate_frequency', 'correlation', ca, cb)) # ## Low Frequency tl = AIATimelags(*[DistributedAIACube.from_files([intensity_file_format.format('low_frequency', c, i) for i in range(500,2500)]) for c in channels]) for ca,cb in channel_pairs: timelag_map = tl.make_timelag_map(f'{ca}',f'{cb}', timelag_bounds=timelag_bounds,chunks=(tl[0].shape[1]//3,tl[0].shape[2]//3)) timelag_map.save(result_file_format.format('low_frequency', 'timelag', ca, cb)) correlation_map = tl.make_correlation_map(f'{ca}',f'{cb}', timelag_bounds=timelag_bounds,chunks=(tl[0].shape[1]//3,tl[0].shape[2]//3)) correlation_map.save(result_file_format.format('low_frequency', 'correlation', ca, cb)) # ## Random tl = AIATimelags(*[DistributedAIACube.from_files([intensity_file_format.format('cooling_outofphase_long', c, i) for i in range(500,2500)]) for c in channels]) for ca,cb in channel_pairs: timelag_map = tl.make_timelag_map(f'{ca}',f'{cb}', timelag_bounds=timelag_bounds,chunks=(tl[0].shape[1]//3,tl[0].shape[2]//3)) timelag_map.save(result_file_format.format('random', 'timelag', ca, cb)) correlation_map = tl.make_correlation_map(f'{ca}',f'{cb}', timelag_bounds=timelag_bounds,chunks=(tl[0].shape[1]//3,tl[0].shape[2]//3)) correlation_map.save(result_file_format.format('random', 'correlation', ca, cb)) # ## Cooling tl = AIATimelags(*[DistributedAIACube.from_files([intensity_file_format.format('cooling', c, i) for i in range(0,1000)]) for c in channels]) for ca,cb in channel_pairs: timelag_map = tl.make_timelag_map(f'{ca}',f'{cb}', timelag_bounds=timelag_bounds,chunks=(tl[0].shape[1]//3,tl[0].shape[2]//3)) timelag_map.save(result_file_format.format('cooling', 'timelag', ca, cb)) correlation_map = tl.make_correlation_map(f'{ca}',f'{cb}', timelag_bounds=timelag_bounds,chunks=(tl[0].shape[1]//3,tl[0].shape[2]//3)) correlation_map.save(result_file_format.format('cooling', 'correlation', ca, cb))
notebooks/timelags.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/satyajitghana/TSAI-DeepVision-EVA4.0/blob/master/03_PyTorch101/PyTorch101.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="_kYM3ylzYo1Q" colab_type="text" # ![PyTorch](https://devblogs.nvidia.com/wp-content/uploads/2017/04/pytorch-logo-dark.png) # # An open source machine learning framework that accelerates the path from research prototyping to production deployment. # # # + [markdown] id="WcVqSTrWY6oz" colab_type="text" # # Tensor - Pytorch's core data structure # # In Python we can create lists, lists of lists, lists of lists and so on. In NumPy there is a `numpy.ndarray` which represents `n`- dimensional array. In math there is a special name for the generalization of vectors and matrices to a higher dimensional space - a tensor # # Tensor is an entity with a defined number of dimensions called an order (rank). # # **Scalar** can be considered as a rank-0-tensor. # # **Vector** can be introduced as a rank-1-tensor. # # **Matrices** can be considered as a rank-2-tensor. # # # Tensor Basics # # Let's import the torch module first. # + id="e-oFVL2tYYqp" colab_type="code" colab={} import numpy as np import torch # + [markdown] id="uGxuR5IEaFJa" colab_type="text" # ## Tensor Creation # Let's view examples of matrices and tensors generation # # 2-dimensional (rank-2) tensor of zeros: # + id="897JQc25aD3E" colab_type="code" outputId="58464d4a-9020-4cd4-c0d9-b484f3dbeb1b" colab={"base_uri": "https://localhost:8080/", "height": 72} torch.zeros(3, 4) # + [markdown] id="pujmpEBhaPRA" colab_type="text" # Random rank-3 tensor: # _read the print below and convince yourself how this is a rank-3-tensor and learn what those 2, 3, 4 values are there for_ # + id="mBrznTNhaOL7" colab_type="code" outputId="6f1ec0e3-f2b8-4b6e-d689-4a523ae0609c" colab={"base_uri": "https://localhost:8080/", "height": 145} torch.rand(2, 3, 4) # + [markdown] id="kO4fIr15asdi" colab_type="text" # I am hoping you have noticed 4-elements in a row, 3 rows making one block and there are 2 blocks. # # Random rank-4-tensor: # + id="KiCLvMGCaVZ3" colab_type="code" outputId="c7ebbec3-c6af-444c-d08e-ef2ae00e0f72" colab={"base_uri": "https://localhost:8080/", "height": 235} torch.rand(2, 2, 2, 3) # + [markdown] id="lF6VL68s3AMV" colab_type="text" # ## Question 1: # # How many dimensions are there in a tensor defined as below? # + id="8IbXcI3x3Ibt" colab_type="code" outputId="8c1d460a-bba4-4c1d-fd64-daf7fd0e13db" colab={"base_uri": "https://localhost:8080/", "height": 54} a = torch.rand(1, 1, 1, 1) print(a) print(a.shape) # + [markdown] id="u4omDSGobWpr" colab_type="text" # ## Answer : 4 # + [markdown] id="qmgpqssRa9jF" colab_type="text" # . # # --- # # # There are many more ways to create tensor using some restrictions on values it should contatn - for the full reference, please follow the [official docs](https://pytorch.org/docs/stable/torch.html#creation-ops). # # # . # --- # # # # Python / NumPy / Pytorch interoperability # # You can create tensors from python as well as numpy arrays. You can also convert torch tensors to numpy arrays. So, the interoperability between torch and numpy is pretty good. # + id="ipgpeWPfa6gE" colab_type="code" outputId="abd1cd50-f4bb-4710-e38b-bab4985403be" colab={"base_uri": "https://localhost:8080/", "height": 126} # Simple Python List python_list = [1, 2] # Create a numpy array from python list numpy_array = np.array(python_list) # Create a torch Tensor from python list tensor_from_list = torch.tensor(python_list) # Create a torch Tensor from Numpy array tensor_from_array = torch.tensor(numpy_array) # Another way to create a torch Tensor from Numpy array (share same storage) tensor_from_array_v2 = torch.from_numpy(numpy_array) # Convert torch tensor to numpy array array_from_tensor = tensor_from_array.numpy() print('List: ', python_list) print('Array: ', numpy_array) print('Tensor: ', tensor_from_list) print('Tensor: ', tensor_from_array) print('Tensor: ', tensor_from_array_v2) print('Array: ', array_from_tensor) # + [markdown] id="X_x-86B8gqik" colab_type="text" # **Difference between** `torch.Tensor` **and** `torch.from_numpy` # # Pytorch aims to be an effective library for computations. What does it mean? It means that pytorch avoids memory copying if it can. # + [markdown] id="iAZvjS8fsKmN" colab_type="text" # **torch.tensor always copies data** \ # **torch.from_numpy always shares data from the actual numpy array** # + id="eHCKDGpygn_d" colab_type="code" outputId="0a6f2d7a-22f3-4e19-b84e-9f42c05b8575" colab={"base_uri": "https://localhost:8080/", "height": 72} numpy_array[0] = 10 print('Array: ', numpy_array) print('Tensor: ', tensor_from_array) print('Tensor: ', tensor_from_array_v2) # + id="K9b9Wb-Ofq4j" colab_type="code" colab={} a_numpy = np.array([1, 2]) a_tensor = torch.tensor(a_numpy) # + [markdown] id="-CKtdNRM3SMp" colab_type="text" # ## Question 2: # # Assume that we moved our complete (cats vs dogs) image dataset to numpy arrays. Then we use torch.from_numpy to convert these images to tensor. Then we apply a specific data augmentation strategy called "CutOut" which blocks a portion of the image directly on these tensors. What will happen to the accuracy of a model trained on this strategy compared to the one without this strategy? CutOut strategy is shown below: # # ![CutOut](https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcSnSyN835AmtQPKQbPjDHX-FmshNilbtexX95cRGQPwl56QCGDn) # # ## Question 3: # Why do you think we are observing this behavior? # # # # --- # # # # + [markdown] id="E1MATjhokSNs" colab_type="text" # ## Answer 2 # # CutOut is a image augmentation technique like dropout, but here we remove a spatial region of the image, another difference is that the cutout is performed to the input of the network, rather than at the intermediate layers like in dropout. # # referring to [1], the addition of cutout to the # ResNet18 and WRN-28-10 models increased their accuracy # on CIFAR-10 and CIFAR-100 by between 0.4 to 2.0 percentage points. We draw attention to the fact that cutout # yields these performance improvements even when applied # to complex models that already utilize batch normalization, # dropout, and data augmentation. Adding cutout to the current state-of-the-art shake-shake regularization models improves performance by 0.3 and 0.6 percentage points on # CIFAR-10 and CIFAR-100 respectively, yielding new stateof-the-art results of 2.56% and 15.20% test error. # # ![cutout](https://github.com/satyajitghana/TSAI-DeepVision-EVA4.0/blob/master/03_PyTorch101/cutout.jpg?raw=true) # # [1] <NAME>. and <NAME>., 2017. Improved regularization of convolutional neural networks with cutout. arXiv preprint arXiv:1708.04552. # # ## Answer 3 # # Intuitively, how i imagine this is that we are forcing the network to not memorize some specific deatails of the image and making it look into different parts of the image to make the prediction, this will make the network learn more general features, In 100-class classification, the model needs more fine-grained features to separate between the classes. # # Cutout Regularization should force a CNN model to develop a more diverse set of features for classifying images. Instead of just focusing on the wheels of a car, Cutout Regularization should force the model to look at other details of the image. # + [markdown] id="dZq7O-aihSOA" colab_type="text" # We have two different ways to create tensor from its NumPy counterpart - one copies memory and another one shares the same underlying storage. It works in the opposite way: # + id="TNFOwV8EhPwQ" colab_type="code" outputId="e59a8479-c1e6-4e64-a5cc-c1a6be5e3847" colab={"base_uri": "https://localhost:8080/", "height": 90} array_from_tensor = tensor_from_array.numpy() print('Tensor: ', tensor_from_array) print('Array: ', array_from_tensor) tensor_from_array[0] = 11 print('Tensor: ', tensor_from_array) print('Array: ', array_from_tensor) # + [markdown] id="HVepu-ALKYwv" colab_type="text" # If you make changes in the ``.numpy()`` then its not reflected in the tensor, but any changes in the tensor is reflected in ``.numpy()`` # + [markdown] id="UM9ytbvKhtCw" colab_type="text" # ## Data types # # The basic data type of all Deep Learning-related operations is float, but sometimes you may need something else. Pytorch support different number types for its tensors the same way NumPy does it - by specifying the data type on tensor creation or via casting. Ths full list of supported data types can be found [here](https://pytorch.org/docs/stable/tensors.html). # + id="Bd6WkzJ4hpYi" colab_type="code" outputId="10dffaeb-c23a-4e4a-8459-f62c0a419843" colab={"base_uri": "https://localhost:8080/", "height": 163} tensor = torch.zeros(2, 2) print('Tensor with default type: ', tensor) tensor = torch.zeros(2, 2, dtype=torch.float16) print('Tensor with 16-bit float: ', tensor) tensor = torch.zeros(2, 2, dtype=torch.int16) print('Tensor with integers: ', tensor) tensor = torch.zeros(2, 2, dtype=torch.bool) print('Tensor with boolean data: ', tensor) # + [markdown] id="a9F4Dkdr40TE" colab_type="text" # # # --- # # # ## Question 4: # We saw above that some times numpy and tensors share same storage and changing one changes the other. # If we define a rank-2-tensor with ones (dtype of f16), and then convert it into a numpy data type using tensor.numpy() and store it in a variable called "num", and then we perform this operation `num = num * 0.5`, will the original tensor have 1.0s or 0.5s as its element values? # # + id="9vsPMK0fJH8B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="4b332d1c-c077-4741-c4a2-4e681feaac5d" tensor = torch.ones(2, 2, dtype=torch.float16) num = tensor.numpy() print('Num : ', num) # + id="3mI8Oq4JJ6JE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="82d979c4-c298-4497-d339-ed9ae9d51cf2" num = num * 0.5; print('Num : ', num) print('Tensor: ', tensor) # + [markdown] id="OiQt8Mmm51OE" colab_type="text" # # # --- # # ## Question 5: # If the operation `num = num*5` is changed to `num[:] = num*5` will the original tensor have 1.0s or 0.5s as its element values? # # # # --- # # # + id="o5bL0WDqKlSy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="c3d2a8a2-d8b6-4d11-9d37-42dfe5f56535" tensor = torch.ones(2, 2, dtype=torch.float16) num = tensor.numpy() print('Num : ', num) print('Tensor : ', tensor) # + id="53ZSF8foKuUV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="8aeafdc0-0027-4159-806b-881a4e5e2076" num[:] = num * 5 print('Num : ', num) print('Tensor : ', tensor) # + [markdown] id="37nu2nmzK5_M" colab_type="text" # ## Answer : if ``num[:] = num * 5`` is used then the original tensor value is also changed # + [markdown] id="vzh8UB8KiVmb" colab_type="text" # ## Indexing # # Tensor provides access to its elements via the same `[]` operation as a regular python list or NumPy array. However, as you may recall from NumPy usage, the full power of math libraries is accessible only via vectorized operations, i.e. operations without explicit looping over all vector elements in python and using implicit optimized loops in C/C++/CUDA/Fortran/etc. available via special function calls. Pytorch employs the same paradigm and provides a wide range of vectorized operations. Let's take a look at some examples. # # Joining a list of tensors together with `torch.cat` # + id="GMaCDKPhiUAb" colab_type="code" outputId="4f087bb8-f7c4-49de-ec2b-5289e6969ae9" colab={"base_uri": "https://localhost:8080/", "height": 119} a = torch.zeros(3, 2) b = torch.ones(3, 2) print(torch.cat((a, b), dim=0)) # + [markdown] id="5bj4aeE86zdH" colab_type="text" # # # --- # # ## Question 6: # Is the transpose of concatenated a & b tensor on dimension 1, same as the contatenated tensor of a & b on dimension 0? # # # # --- # # # + id="N7H0IMDYLgO5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="71d24ccd-65cf-4904-9f2a-1776928f86a7" print('Along dim=0\n', torch.cat((a, b), dim=0).t()) print('Along dim=1\n', torch.cat((a, b), dim=1)) # + id="hRcLlwJDMQW9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="84527911-1d3b-4744-fb64-694e793360bc" print('Shape a', a.shape) # + [markdown] id="0TXNne69j7LP" colab_type="text" # Indexing with another tensor/array: # + id="KiE-Fsi4jVd6" colab_type="code" outputId="1a76963d-eaf9-4b8b-fdbe-16c799ebea79" colab={"base_uri": "https://localhost:8080/", "height": 102} a = torch.arange(start=0, end=10) indices = np.arange(0, 10) > 5 print(a) print(indices) print(a[indices]) indices = torch.arange(start=0, end=10) %5 print(indices) print(a[indices]) # + id="isP9bA2XOc_T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f07b876d-76c0-44ee-c844-b79184cd5dc6" torch.arange(start=0, end=10)[torch.arange(start=0, end=10) % 5] # + id="o6q0-eQ0OoIh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="866358c4-1aea-442f-c07c-b7268907188b" torch.arange(start=0, end=10) % 3 # + [markdown] id="LS4dnlu47WQu" colab_type="text" # # # --- # # ## Question 7: # # `a` is defined as `torch.arange(start=0, end=10)`. We will create `b` using the two operations as below. In both cases do we get the same value? # # # 1. indices variable created by the modulo operation on arange between 0 and 10. Then a new varialble `b` is created from `a` using the last 5 elements of indices. # 2. indices variable created by the modulo operation on arange betwenn 1 and 11. Then a new varialble `b` is created from `a` using the last 5 elements of indices. # # # # --- # # # # + id="bHgKDUYbOwc6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="cf76a2e1-4ec9-462c-dc6c-ae20640f1c8a" a = torch.arange(start=0, end=10) print('a: ', a) indices_1 = torch.arange(start=0, end=10) % 5 print('indices_1: ', indices_1) b_1 = a[indices_1[-5:]] print('b_1: ', b_1) indices_2 = torch.arange(start=1, end=11) % 5 print('indices_2: ', indices_2) b_2 = a[indices_2[-5:]] print('b_2: ', b_2) # + [markdown] id="o_CiH7NhQdhQ" colab_type="text" # ## Answer: We get the same values but in a different order # + [markdown] id="eQ4ZCVsTk-KH" colab_type="text" # What should we do if we have, say, rank-2-tensor and want to select only some rows? # + id="_GtRpotjkt1q" colab_type="code" outputId="5701b15d-5653-4bb2-c036-36edf8ae547d" colab={"base_uri": "https://localhost:8080/", "height": 136} tensor = torch.rand((5, 3)) rows = torch.tensor([0, 2]) print(tensor) tensor[rows] # + [markdown] id="zIJnr_N2_Qaf" colab_type="text" # # # --- # # ## Question 8: # # Consider a tensor defined as `torch.rand((6, 5))`. Is the shape of the new tensor created by taking the 0th, 2nd and 4th row of the old tensor same as the shape of the a newer tensor created by taking the 0th, 2nd and 4th row of the old tensor after transposing it by operation `torch.transpose(tensor, 0, 1)` ? # # # # --- # # # # + id="79wrIoALRc-V" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="6529db77-ce97-4943-f24d-78657e5da66c" tensor = torch.rand((6, 5)) print('tensor: \n', tensor) print(tensor[[0, 2, 4], :].shape) print(tensor.t()[[0, 2, 4], :].shape) # + [markdown] id="naDzFMkslU0b" colab_type="text" # ## Tensor Shapes # # Reshaping a tensor is a frequently used operation. We can change the shape of a tensor without the memory copying overhead. There are two methods for that: `reshape` and `view`. # # The difference is the following: # # # * view tries to return a tensor, and it shares the same memory with the original tensor. In case, if it cannot reuse the same memory due to [some reason](https://pytorch.org/docs/stable/tensors.html?highlight=view#torch.Tensor.view), it just fails. # * reshape always returns the tensor with the desired shape and tries to reuse the memory. If it cannot, it creates a copy # # Let's see with the help of an example: # + id="HClDkqLLlMJh" colab_type="code" outputId="58fc7b31-a6e3-4149-dd50-b67fd32e0b2c" colab={"base_uri": "https://localhost:8080/", "height": 170} tensor = torch.rand(2, 3, 4) print('Pointer to data: ', tensor.data_ptr()) print('Shape: ', tensor.shape) reshaped = tensor.reshape(24) view = tensor.view(3, 2, 4) print('Reshaped tensor - pointer to data', reshaped.data_ptr()) print('Reshaped tensor shape ', reshaped.shape) print('Viewed tensor - pointer to data', view.data_ptr()) print('Viewed tensor shape ', view.shape) assert tensor.data_ptr() == view.data_ptr() assert np.all(np.equal(tensor.numpy().flat, reshaped.numpy().flat)) print('Original stride: ', tensor.stride()) print('Reshaped stride: ', reshaped.stride()) print('Viewed stride: ', view.stride()) # + [markdown] id="jIN5jSppm4yC" colab_type="text" # The basic rule about reshaping the tensor is definitely that you cannot change the total number of elements in it, so the product of all tensor's dimensions should always be the same. It gives us the ability to avoid specifying one dimension when reshaping the tensor - Pytorch can calculate it for us: # + id="e3D19ERFmzOl" colab_type="code" outputId="b49f6393-7a79-4da7-bb31-b5d8cd885e3a" colab={"base_uri": "https://localhost:8080/", "height": 71} print(tensor.reshape(3, 2, 4).shape) print(tensor.reshape(3, 2, -1).shape) print(tensor.reshape(3, -1, 4).shape) # + [markdown] id="ObgCQKUiETak" colab_type="text" # # # --- # # # ## Question 9: # # Consider a tensor `a` created with [1, 2, 3] and [1, 2, 3] of size (2, 3) is reshaped with operation `.reshape(-1, 2)`. Also consider a tensor `b` created with [[2, 1]] and of size (1, 2), later operated with `view(2, -1)` operation. # # If we do a dot product of a and b (using `torch.mm`) and perform the sum of all the elements (using `torch.sum`) what do we get? (enter int value without any decimal point in the quiz) # # # # --- # # # + id="kYaaUPE5XdZX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="52de52f5-c078-4b2b-f9e6-9a7804fb3ae0" a = torch.tensor([[1, 2, 3], [1, 2, 3]]) print('a shape: ', a.shape) a_reshaped = a.reshape(-1, 2) b = torch.tensor([[2, 1]]) print('b shape: ', b.shape) b_viewed = b.view(2, -1) print('sum: ', torch.sum(torch.mm(a_reshaped, b_viewed))) # + [markdown] id="ekUdvKcoYoui" colab_type="text" # ## Answer: 18 # + [markdown] id="mza7QPg3ndeV" colab_type="text" # **Alternative ways to view tensors** - `expand` or `expand_as`. # # # # * `expand` - requires the desired shape as an input # * `expand_as` - uses the shape of another tensor # # These operations "repeat" tensor's values along the specified axes without actually copying the data. # # As the documentation says, expand: # # # > returns a new view of the self tensor with singleton dimensions expanded to a larger size. Tensor can be also expanded to a larger number of dimensions, and the new ones will be appended at the front. For the new dimensions, the size cannot be set to -1. # # **Use case:** # # # # * index multi-channel tensor with single-channel mask - imagine a color image with 3 channels (RGB) and binary mask for the area of interest on that image. We cannot index the image with this kind of mask directly since the dimensions are different, but we can use `expand_as` operation to create a view of the mask that has the same dimensions as the image we want to apply it to, but has not copied the data. # + id="iz33E-V7nPQT" colab_type="code" outputId="8bcfc71f-b0bb-4c37-e5e0-5c4ffa38aacb" colab={"base_uri": "https://localhost:8080/", "height": 596} # %matplotlib inline from matplotlib import pyplot as plt # Create a black image image = torch.zeros(size=(3, 256, 256), dtype=torch.int) # Leave the borders and make the rest of the image Green image[1, 18:256 - 18, 18:256 - 18] = 255 # Create a mask of the same size mask = torch.zeros(size=(256, 256), dtype=torch.bool) # Assuming the green region in the original image is the Region of interest, change the mask to white for that area mask[18:256 - 18, 18:256 - 18] = 1 # Create a view of the mask with the same dimensions as the original image mask_expanded = mask.expand_as(image) print(mask_expanded.shape) mask_np = mask_expanded.numpy().transpose(1, 2, 0) * 255 image_np = image.numpy().transpose(1, 2, 0) fig, ax = plt.subplots(1, 2) ax[0].imshow(image_np) ax[1].imshow(mask_np) plt.show() image[0, mask] += 128 fig, ax = plt.subplots(1, 2) ax[0].imshow(image_np) ax[1].imshow(mask_np) plt.show() image[mask_expanded] += 128 image.clamp_(0, 255) fig, ax = plt.subplots(1, 2) ax[0].imshow(image_np) ax[1].imshow(mask_np) plt.show() # + [markdown] id="jiXBx0k3ptOI" colab_type="text" # In the example above, one can also find a couple of useful tricks: # # # * `clamp` method and function is a Pytorch's analogue of NumPy's `clip` function # * many operations on tensors have in-place form, that does not return modified data, but change values in the tensor. The in-place version of the operation has trailing underscore according to Pytorch's naming convension - in the exmaple above it is `clamp_` # * tensors have the same indexing as Numpy's arrays - one can use `:` seperated range, negative indexes and so on. # # # . # # # --- # # # Images and their representations # # Now, let's discuss images, their representations and how different Python librarties work with them. # # Probably, the most well-known library for image loading and simple processing is [Pillow](https://pillow.readthedocs.io/en/stable/). # # However, many people in deep learning area stick with OpenCV for image loading and processing with some usage of another libraries when it is justified by performance/functionality. This is because OpenCV is in general much faster than the other libraries. Here you can find a couple of benchmarks: # # * https://www.kaggle.com/zfturbo/benchmark-2019-speed-of-image-reading # * https://github.com/albumentations-team/albumentations#benchmarking-results # # To sum up the benchmarks above, there are two most common image formats, PNG and JPEGs. If your data is in PNG format - use OpenCV to read it. If it is in JPEG - use libturbojpeg. For image processing, use OpenCV if possible. _We will be using PIL a lot along with these._ # # As you will read the code from others, you may find out that some of them use Pillow/something else to read data. You should know, that color image representations in OpenCV and other libraries are different - OpenCV uses "BGR" channel order, while others use "RGB" one. # # To change "BRG" <-> "RGB" the only thing we need to do it to change channel order. # + id="ZYv4sZMmpndu" colab_type="code" outputId="1b6e97b2-94f7-4880-b92f-2077b2a58d9e" colab={"base_uri": "https://localhost:8080/", "height": 152} # %matplotlib inline from matplotlib import pyplot as plt import cv2 from urllib.request import urlretrieve URL = 'https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcRCA40ftnscVzfV8ft8e7vIzQXfXeZdtco8nknJrfCUW6INI40U' urlretrieve(URL, 'mars.jpg') bgr_image = cv2.imread('mars.jpg') # remember to add your own image in case you run this block, if you want to use the same image, # download it from: https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcRCA40ftnscVzfV8ft8e7vIzQXfXeZdtco8nknJrfCUW6INI40U rgb_image = bgr_image[..., ::-1] fig, ax = plt.subplots(1, 2) ax[0].imshow(bgr_image) ax[1].imshow(rgb_image) plt.show() # + [markdown] id="yoi4TeNBGHle" colab_type="text" # # # --- # # ## Question 10: # # Looking at the results above it can be said that the pixel values in the blue channels would be very small compared to red channel. True/False? # # # --- # # # # + [markdown] id="LZJH3lMUZ5Rx" colab_type="text" # ## Answer: Why ? ``::-1`` will only reverse the channel order, the values of the respective channels remain unchanged # + [markdown] id="b3ugFd57zNwa" colab_type="text" # # Autograd # # Pytorch supports automatic differentiation. The module which implements this is called **AutoGrad**. It calculates the gradients and keeps track in forward and backward passes. For primitive tensors, you need to enable or disable it using the `required_grad` flag. But, for advanced tensors, it is enabled by default # + id="VMOp4aiou6JR" colab_type="code" outputId="9b67f16a-48fb-441a-ac5c-d5216682c90b" colab={"base_uri": "https://localhost:8080/", "height": 187} a = torch.rand((3, 5), requires_grad = True) print(a) result = a * 5 print(result) # grad can be implicitly created only for scalar outputs # so let's calculate the sum here so that the output becomes a scalar and we can apply a backward pass mean_result = result.sum() print(mean_result) # calculate gradient mean_result.backward() # print gradient of a print(a.grad) # + [markdown] id="ym0Amk2IGfLx" colab_type="text" # # # --- # # ## Question 11: # # Why the gradient of a is all 5s above? # # # # --- # # # + [markdown] id="LzD_vaclaI7L" colab_type="text" # ## Answer: The derivative of ``a * 5`` w.r.t ``a`` is ``5`` # + [markdown] id="7PDgGq2R0k7I" colab_type="text" # As we see, Pytorch automagically calculated the gradient value for us. It looks to be the correct value - we multiplied an input by 5, so the gradient of this operation equals to 5. # # # Disabling Autograd for tensors # # We don't need to compute gradients for all the variables that are involved in the pipeline. The Pytorch API provides 2 ways to disable autograd. # # `detach` - returns a copy of the tensor with autograd disabled. This # # 1. copy is built on the same memory as the original tensor, so in-place size / stride / storage changes (such as resize_ / resizeas / set / transpose) modifications are not allowed. # 2. torch.no_grad() - It is a context manager that allows you to guard a series of operations from autograd without creating new tensors. # + id="yqVG9fQb0cLW" colab_type="code" outputId="2081f403-31a2-4296-c670-b899e246a8cc" colab={"base_uri": "https://localhost:8080/", "height": 71} a = torch.rand((3, 5), requires_grad=True) detached_a = a.detach() detached_result = detached_a * 5 result = a * 10 # we cannot do backward pass that is required for autograd using multideminsional output, # so let's calculate the sum here mean_result = result.sum() mean_result.backward() a.grad # + id="uqpch2Be02J7" colab_type="code" outputId="0a5ee8e8-81b8-46f0-c7fc-ac406657e928" colab={"base_uri": "https://localhost:8080/", "height": 71} a = torch.rand((3, 5), requires_grad=True) with torch.no_grad(): detached_result = a * 5 result = a * 10 # we cannot do backward pass that is required for autograd using multideminsional output, # so let's calculate the sum here mean_result = result.sum() mean_result.backward() a.grad # + [markdown] id="vjh2rYOPJUAZ" colab_type="text" # # Custom Network # # A fully-connected ReLU network with one hidden layer and no biases, trained to predict y from x by minimizing squared Euclidean distance. # # This implementation uses PyTorch tensors to manually compute the forward pass, loss, and backward pass. # # A PyTorch Tensor is basically the same as a numpy array: it does not know anything about deep learning or computational graphs or gradients, and is just a generic n-dimensional array to be used for arbitrary numeric computation. # # The biggest difference between a numpy array and a PyTorch Tensor is that a PyTorch Tensor can run on either CPU or GPU. To run operations on the GPU, just cast the Tensor to a cuda datatype. # + id="0nf5RaB104Vp" colab_type="code" outputId="f1666d3a-7fe4-4f62-eac3-4d4ec49f4319" colab={"base_uri": "https://localhost:8080/", "height": 1000} dtype = torch.float device = torch.device("cpu") # device = torch.device("cuda:0") # Uncomment this to run on GPU # N is batch size; D_in is input dimension; # H is hidden dimension; D_out is output dimension. N, D_in, H, D_out = 64, 1000, 100, 10 # Create random input and output data x = torch.randn(N, D_in, device=device, dtype=dtype) y = torch.randn(N, D_out, device=device, dtype=dtype) # Randomly initialize weights w1 = torch.randn(D_in, H, device=device, dtype=dtype) w2 = torch.randn(H, D_out, device=device, dtype=dtype) learning_rate = 1e-6 for t in range(500): # Forward pass: compute predicted y h = x.mm(w1) h_relu = h.clamp(min=0) y_pred = h_relu.mm(w2) # Compute and print loss loss = (y_pred - y).pow(2).sum().item() print(t, loss) # Backprop to compute gradients of w1 and w2 with respect to loss grad_y_pred = 2*(y_pred - y) grad_w2 = h_relu.t().mm(grad_y_pred) grad_h_relu = grad_y_pred.mm(w2.t()) grad_h = grad_h_relu.clone() grad_h[h < 0] = 0 grad_w1 = x.t().mm(grad_h) # Update weights using gradient descent w1 -= learning_rate * grad_w1 w2 -= learning_rate * grad_w2 # + [markdown] id="ycwxAPHZLNST" colab_type="text" # # # --- # ## Question 12 # # In the code above, why do we have 2 in '2.0*(y_pred - y)`? # # ## Question 13 # In the code above, what does `grad_h[h < 0] = 0` signify? # # ## Question 14 # In the code above, how many "epochs" have we trained the model for? # # ## Question 15 # In the code above, if we take the trained model, and run it on fresh inputs, the trained model will be able to predict fresh output with high accuracy. # # ## Question 16 # In the code above, if we dont use clone in `grad_h = grad_h_relu.clone()` the model will still train without any issues. # + [markdown] id="A5PBVvMroJzz" colab_type="text" # ## Asnwer 12: Derivative of ``(y_pred-y)**2`` is ``2*(y_pred-y) # ## Answer 13: Derivative of ReLU for ``x < 0`` is ``0`` and for ``x > 0`` is ``1`` # ## Answer 14: 500 epoch # ## Answer 15: # ## Answer 16: # + id="CnWkOtnGoGrE" colab_type="code" colab={}
03_PyTorch101/PyTorch101.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:gammaALPs] # language: python # name: conda-env-gammaALPs-py # --- # # Example to calculate photon-ALP oscillations from NGC 1275 # This notebook demonstrates how to calculate the photon-ALP transition probability for NGC 1275, the central AGN of the Perseus cluster. The assumed B-field environments are the same as in Ajello et al. (2016), http://inspirehep.net/record/1432667, and include the cluster field and the magnetic field of the Milky Way. from gammaALPs.core import Source, ALP, ModuleList from gammaALPs.base import environs, transfer import numpy as np import matplotlib.pyplot as plt from matplotlib.patheffects import withStroke from ebltable.tau_from_model import OptDepth from astropy import constants as c # %matplotlib inline # ### Set the ALP # Initialize an ALP object, that stores the ALP mass $m$ (in neV) and the coupling $g$ (in $10^{-11}\mathrm{GeV}^{-1}$). m, g = 1.,1. alp = ALP(m,g) # ## Set the source # Set the source properties (redshift and sky coordinates) in the ```Source``` containier ngc1275 = Source(z = 0.017559, ra = '03h19m48.1s', dec = '+41d30m42s') print (ngc1275.z) print (ngc1275.ra, ngc1275.dec) print (ngc1275.l, ngc1275.b) # ### Init the module list # Initialize the list of transfer modules that will store the different magnetic field environments. # Energies are supplied in GeV as ```numpy.ndarray``` EGeV = np.logspace(1.,3.5,250) # Now initialize the initial photon polarization. Since we are dealing with a gamma-ray source, no ALPs are initially present in the beam (third diagonal element is zero). The polarization density matrix is normalized such that its trace is equal to one, $\mathrm{Tr}(\rho_\mathrm{in}) = 1$. pin = np.diag((1.,1.,0.)) * 0.5 m = ModuleList(alp, ngc1275, pin = pin, EGeV = EGeV) # ### Add modules: # Now we add propagation modules for the cluster, the EBL, and the Galactic magnetic field. m.add_propagation("ICMGaussTurb", 0, # position of module counted from the source. nsim = 10, # number of random B-field realizations B0 = 10., # rms of B field n0 = 3.9e-2, # normalization of electron density n2 = 4.05e-3, # second normalization of electron density, see Churazov et al. 2003, Eq. 4 r_abell = 500., # extension of the cluster r_core = 80., # electron density parameter, see Churazov et al. 2003, Eq. 4 r_core2 = 280., # electron density parameter, see Churazov et al. 2003, Eq. 4 beta = 1.2, # electron density parameter, see Churazov et al. 2003, Eq. 4 beta2= 0.58, # electron density parameter, see Churazov et al. 2003, Eq. 4 eta = 0.5, # scaling of B-field with electron denstiy kL = 0.18, # maximum turbulence scale in kpc^-1, taken from A2199 cool-core cluster, see Vacca et al. 2012 kH = 9., # minimum turbulence scale, taken from A2199 cool-core cluster, see Vacca et al. 2012 q = -2.1, # turbulence spectral index, taken from A2199 cool-core cluster, see Vacca et al. 2012 seed=0 # random seed for reproducability, set to None for random seed. ) m.add_propagation("EBL",1, model = 'dominguez') # EBL attenuation comes second, after beam has left cluster m.add_propagation("GMF",2, model = 'jansson12', model_sum = 'ASS') # finally, the beam enters the Milky Way Field # List the module names: print(m.modules.keys()) # We can also change the ALP parameters before running the modules: m.alp.m = 30. m.alp.g = 0.5 # ### Test the new F_q implementation bfield = m.modules[0].Bfield_model k = np.logspace(np.log10(bfield.kMin), np.log10(bfield.kH), bfield.dkSteps) plt.semilogx(k, bfield.Fq(k) / bfield.Fq_old(k), ls='-') plt.axvline(bfield.kL, ls='--') plt.semilogx(k, bfield.Fq(k)) plt.semilogx(k, bfield.Fq_longitudinal(k)) # ### Run all modules # Now we run the modules. If ```multiprocess``` key word is larger than two, this will be split onto multiple cores with python's ```multiprocess``` module. # The ```px,py,pa``` variables contain the mixing probability into the two photon polarization states (x,y) and into the axion state (a). px,py,pa = m.run(multiprocess=2) # Test the matmul multiplication routine for i, T in enumerate(m._Tenv): print(i, T.shape) from gammaALPs.base.transfer import calc_conv_prob def calc_conv_prob_new(pin, pout, T): return np.squeeze(np.real(np.trace( (np.matmul(pout, np.matmul(T, np.matmul(pin, np.transpose(T.conjugate(), axes=(0,1,2))) ) ) ), axis1=1, axis2=2))) def calc_conv_prob_by_hand(pin, pout, T): # gives the same result as calc_conv_prob # loop over energies result = np.zeros(T.shape[0], dtype=np.float) for ie in range(T.shape[0]): Tdagger = np.transpose(np.conjugate(T[ie])) inner_most = np.dot(pin, Tdagger) inner = np.dot(T[ie], inner_most) outer = np.dot(pout, inner) result[ie] = np.real(np.trace(outer)) return result # + ## wrong order Tfinal_wrong = np.matmul( np.matmul(m._Tenv[0][0], m._Tenv[1][0]), m._Tenv[2][0] ) ## right order Tfinal_right = np.matmul( np.matmul(m._Tenv[2][0], m._Tenv[1][0]), m._Tenv[0][0] ) print (Tfinal_wrong.shape) # + px_wrong = calc_conv_prob(m.pin, m.px, Tfinal_wrong) px_also_wrong = calc_conv_prob_new(m.pin, m.px, Tfinal_wrong) py_wrong = calc_conv_prob(m.pin, m.py, Tfinal_wrong) py_also_wrong = calc_conv_prob_new(m.pin, m.py, Tfinal_wrong) px_maybe_right = calc_conv_prob(m.pin, m.px, Tfinal_right) px_also_maybe_right = calc_conv_prob_by_hand(m.pin, m.px, Tfinal_right) py_maybe_right = calc_conv_prob(m.pin, m.py, Tfinal_right) py_also_maybe_right = calc_conv_prob_by_hand(m.pin, m.py, Tfinal_right) # + plt.figure(dpi=150) plt.semilogx(m.EGeV, py_wrong + px_wrong) plt.semilogx(m.EGeV, px_also_wrong + py_also_wrong, ls='--') plt.semilogx(m.EGeV, py_maybe_right + px_maybe_right, ls='-.') #plt.semilogx(m.EGeV, px_also_maybe_right, ls=':') # - # ## Plot the output # + pgg = px + py # the total photon survival probability print (pgg.shape) print (np.min(np.median(pgg, axis = 0))) print (np.min(np.max(pgg, axis = 0))) effect = dict(path_effects=[withStroke(foreground="w", linewidth=2)]) for p in pgg: # plot all realizations plt.semilogx(m.EGeV, p) plt.xlabel('Energy (GeV)') plt.ylabel('Photon survival probability') plt.legend(loc = 0, fontsize = 'medium') plt.annotate(r'$m_a = {0:.1f}\,\mathrm{{neV}}, g_{{a\gamma}} = {1:.1f} \times 10^{{-11}}\,\mathrm{{GeV}}^{{-1}}$'.format(m.alp.m,m.alp.g), xy = (0.95,0.1), size = 'x-large', xycoords = 'axes fraction', ha = 'right',**effect) plt.gca().set_xscale('log') plt.gca().set_yscale('log') plt.subplots_adjust(left = 0.2) plt.savefig("pgg.png", dpi = 150) # - # # Save results # Save the results in an astropy table. from astropy.table import Table c = {} c['pgg'] = np.vstack((EGeV, pgg)) t = Table(c) t.write('ngc1275.fits', overwrite = True) t1 = Table.read('ngc1275.fits') t1 # ### Plot the magnetic field of the cluster, stored in module 0 plt.plot(m.modules["ICMGaussTurb"].r,m.modules["ICMGaussTurb"].B * np.sin(m.modules["ICMGaussTurb"].psi), lw=1) plt.plot(m.modules["ICMGaussTurb"].r,m.modules["ICMGaussTurb"].B * np.cos(m.modules["ICMGaussTurb"].psi), lw=1, ls = '--') plt.ylabel('$B$ field ($\mu$G)') plt.xlabel('$r$ (kpc)') # And plot the electron density: plt.loglog(m.modules["ICMGaussTurb"].r,m.modules[0].nel * 1e-3) plt.ylabel('$n_\mathrm{el}$ (cm$^{-3}$)') plt.xlabel('$r$ (kpc)') # You can also manipulate the magnetic field and electron density at run time # #### Calculate the coherence length of the transversal component $B$ field # It is also possible to compute the spatial correlation $C(x_3) = \langle B_\perp(\vec{x}) B_\perp(\vec{x} + x_3 \vec{e}_3)\rangle$ of the transversal magnetic field along the line of sight $x_3$: x3 = np.linspace(0.,50.,1000) # distance in kpc from cluster center c = m.modules["ICMGaussTurb"].Bfield_model.spatialCorr(x3) plt.plot(x3,c / c[0]) plt.xlabel("$x_3$ (kpc)") plt.ylabel("$C(x_3) / C(0)$") plt.grid(True) # This is turn can be used to calculate the coherence length of the field, # $$ \Lambda_C = \frac{1}{C(0)} \int\limits_0^\infty C(x_3)dx_3. $$ # # + from scipy.integrate import simps x3 = np.linspace(0.,1e3,1000) # distance in kpc from cluster center c = m.modules["ICMGaussTurb"].Bfield_model.spatialCorr(x3) Lambda_c = simps(c, x3) / c[0] print ("Coherence length of the field is Lambda_C = {0:.3e} kpc".format(Lambda_c)) # - # #### Calculate the rotation measure of the field m.modules["ICMGaussTurb"].Bfield_model.seed = 0 # or None rm = m.modules["ICMGaussTurb"].Bfield_model.rotation_measure(m.modules["ICMGaussTurb"].r, n_el=m.modules["ICMGaussTurb"].nel * 1e-3, nsim=1000) # Taylor et al. (2006) found RM values between 6500 and 7500 rad m^-2. Comparing B-field realizations to that number: # + from scipy.stats import norm n, bins, _ = plt.hist(np.sort((rm)), bins=30, density=True, label="Simulated RM") plt.xlabel("Rotation Measure (rad m${}^{-2}$)") plt.ylabel("Density") mean = np.mean(rm) var = np.var(rm) print ("RM mean +/- sqrt(var) in rad m^-2: {0:.2f} +/- {1:.2f}".format(mean, np.sqrt(var))) plt.plot(bins, norm.pdf(bins, loc=mean, scale=np.sqrt(var)), lw=2, label="Gaussian Fit\n$\mu = {0:.2f}$\n$\sigma={1:.2f}$".format(mean, np.sqrt(var))) print ("{0:.3f}% of B field realizations have |RM| > 7500 rad m^-2".format((np.abs(rm) > 7500).sum() / rm.size * 100.)) plt.legend() plt.gca().tick_params(labelleft=False, left=False, right=False) plt.savefig("sim_rm_perseus.png", dpi=150) # - # ### Plot the magnetic field of the Milky Way plt.plot(m.modules["GMF"].r, m.modules["GMF"].B * np.sin(m.modules["GMF"].psi), lw = 1) plt.plot(m.modules["GMF"].r, m.modules["GMF"].B * np.cos(m.modules["GMF"].psi), lw = 1) plt.ylabel('$B$ field ($\mu$G)') plt.xlabel('$r$ (kpc)')
notebooks/example_NGC1275_test_env.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt import audacity import TransferFunctions as tf import scipy.signal as sig # %matplotlib notebook # + dfiles = ['transfer/b-foot/2tieclip_reference_sines.aup'] dfiles.extend(('transfer/b-foot/2301_openstart.aup', 'transfer/b-foot/2301_closing_2.aup', 'transfer/b-foot/2301_closing_4.aup', 'transfer/b-foot/2301_closing_6.aup', 'transfer/b-foot/2301_closing_8.aup', 'transfer/b-foot/2301_closing_10.aup', 'transfer/b-foot/2301_closing_11.aup', 'transfer/b-foot/2301_closing_12.aup', 'transfer/b-foot/2301_closing_13.aup', 'transfer/b-foot/2301_closing_14.aup', 'transfer/b-foot/2301_closing_15.aup', 'transfer/b-foot/2301_closing_16.aup', )) # - def my_tfe(y,x,Fs=1.0, NFFT=1024): fy=[] fx=[] for istart in range(0,len(x)-NFFT,NFFT): xi = x[istart:istart+NFFT] yi = y[istart:istart+NFFT] fy.append(np.fft.fft(yi)) fx.append(np.fft.fft(xi)) fx=np.array(fx) fy=np.array(fy) ff = np.arange(NFFT)/NFFT*Fs return np.mean(fy[1:-1],axis=0)/np.mean(fx[1:-1],axis=0),ff,fx,fy #return np.mean(fy[1:-1]/fx[1:-1],axis=0),ff # + file=dfiles[1] print(file) au = audacity.Aup(file) rdata = [] maxl = 0 for ii in range(au.nchannels): rdata.append(au.get_channel_data(ii)) maxl = max(maxl,len(rdata[-1])) data = np.zeros((maxl,len(rdata))) for ii,rd in enumerate(rdata): data[:len(rd),ii]=rd fresp,ff,fx,fy=my_tfe(data[:,3],data[:,2],Fs=au.rate,NFFT=2**13-1) plt.figure() #plt.plot(ff,20*np.log10(np.abs(fresp))) #_=plt.plot(ff,20*np.log10(np.abs(fy)).T) _=plt.plot(ff,(np.angle(fy)).T[:,1:5]) # - fig,ax = plt.subplots(4,sharex=True) #plt.plot(ff,20*np.log10(np.abs(fresp))) #_=plt.plot(ff,20*np.log10(np.abs(fy)).T) _=ax[0].plot(ff,20*np.log10(np.abs((fy)).T[:,1:5])) _=ax[1].plot(ff,(np.angle(fy)).T[:,1:5]) _=ax[0].plot(ff,20*np.log10(np.abs(np.mean(fy[1:-1],axis=0)))) _=ax[1].plot(ff,(np.angle(np.mean(fy[1:-1],axis=0)))) _=ax[2].plot(ff,20*np.log10(np.abs((fx)).T[:,1:5])) _=ax[3].plot(ff,(np.angle(fx)).T[:,1:5]) _=ax[2].plot(ff,20*np.log10(np.abs(np.mean(fx[1:-1],axis=0)))) _=ax[3].plot(ff,(np.angle(np.mean(fx[1:-1],axis=0)))) ff=np.arange(2**13-1)/(2**13-1)*au.rate # + fig,ax = plt.subplots(2,sharex=True) #plt.plot(ff,20*np.log10(np.abs(fresp))) #_=plt.plot(ff,20*np.log10(np.abs(fy)).T) _=ax[0].plot(ff,20*np.log10(np.abs((fresp)))) _=ax[1].plot(ff,(np.angle(fresp))) fresp1,ff1=tf.tfe(data[:,3],data[:,2],Fs=au.rate,NFFT=2**11) _=ax[0].plot(ff1,20*np.log10(np.abs((fresp1)))) _=ax[1].plot(ff1,(-np.angle(fresp1))) # + # Load File, read data, and pad to have same length on all channels nfft=1024*2 tfdata = [] for file in dfiles: print(file) au = audacity.Aup(file) rdata = [] maxl = 0 for ii in range(au.nchannels): rdata.append(au.get_channel_data(ii)) maxl = max(maxl,len(rdata[-1])) data = np.zeros((maxl,len(rdata))) for ii,rd in enumerate(rdata): data[:len(rd),ii]=rd delay=tf.determineDelay(data[:,0]/np.mean(data[:,0]),data[:,2]/np.mean(data[:,2]),maxdel=2**15) print("Delay: %d samples"%delay) data[:,0]=np.roll(data[:,0],delay) sr=au.rate tfxy,ff=tf.tfe(data[:,3],data[:,2],Fs=sr,NFFT=nfft) #tfxy,ff=my_tfe(data[:,3],data[:,2],Fs=sr,NFFT=nfft) #coh,ff=tf.cohere(data[:,1],data[:,2],Fs=sr,NFFT=nfft) ff,coh=sig.coherence(data[:,2],data[:,3],fs=sr,nperseg=nfft) datadict={'tf':tfxy,'coh':coh} for chno in [au.nchannels-2,au.nchannels-1]: tfxy,ff=tf.tfe(data[:,chno],data[:,0],Fs=sr,NFFT=nfft) #coh,ff=tf.cohere(data[:,1],data[:,2],Fs=sr,NFFT=nfft) # ff,coh=sig.coherence(data[:,0],data[:,chno],fs=sr,nperseg=nfft) datadict['mic%d'%(chno-1)]=tfxy datadict['sensRMS']=np.sqrt(np.mean((data[:,1]-np.mean(data[:,1]))**2)) tfdata.append(datadict) # + import re positions = [] for file in dfiles: matches = re.findall('_[0-9]+',file) thispos = np.nan if len(matches) > 0: thispos = int(matches[0][1:]) positions.append(thispos) plt.figure() plt.plot(positions, [xx['sensRMS'] for xx in tfdata],'o-') # - import re [re.findall('[0-9]+',xx) for xx in dfiles] # + fig,ax = plt.subplots(2,sharex=True) recno = 1 refno = 0 fig.set_label('Recording %d, internal vs external spectra' % recno) ax[0].plot(ff,20*np.log10(np.abs(tfdata[recno]['mic2']/tfdata[refno]['mic2'])), label='Internal Mic') ax[1].plot(ff,(np.angle(tfdata[recno]['mic2']/tfdata[refno]['mic2']))) ax[0].plot(ff,20*np.log10(np.abs(tfdata[recno]['mic1']/tfdata[refno]['mic1'])), label='External Mic') ax[1].plot(ff,(np.angle(tfdata[recno]['mic1']/tfdata[refno]['mic1']))) ax[0].axvline(1319, C='r') ax[0].legend(loc='lower right') ax[0].set_xlim((0, 2500)) # + fig,ax = plt.subplots(3,sharex=True, figsize=((8, 8))) fig.set_label('Transfer functions') # chno = 1 refno = 0 for recno in [1,2,3,4,5,6,7,8,9,10,11,12]: #ax[0].plot(ff,20*np.log10(np.abs(tfdata[recno]['tf']/tfdata[refno]['tf'])), label='recording %d' % recno) ax[0].plot(ff,20*np.log10(np.abs(tfdata[recno]['tf'])), label='recording %d' % recno) ax[0].set_xlim((0, 4000)) ax[0].legend(loc='lower right') ax[0].axvline(1319) #ax[1].plot(ff,(np.angle(tfdata[recno]['tf']/tfdata[refno]['tf']))) ax[1].plot(ff,(np.angle(tfdata[recno]['tf']))) #coh,ff=tf.cohere(data[:,1],data[:,2],Fs=sr,NFFT=nfft) ax[2].plot(ff,np.min([tfdata[recno]['coh'],tfdata[refno]['coh']],axis=0)) # + gamma=1.4 P_a=101000 rho_a=1.29 D=0.0139 S=np.pi*D l=0.006 # L=l+0.3*D A=S*l bore=0.0186 foot=0.16 V=(np.pi*bore**2/4)*foot omega=np.sqrt((gamma*A*P_a)/(V*l*rho_a)) f=omega/(2*np.pi) S/l, f # + # range? # - np.mean((data[:,1]-np.mean(data[:,1]))**2)
sumsines_transferfunct_[test].ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <table> # <tr><td align="right" style="background-color:#ffffff;"> # <img src="../images/logo.jpg" width="20%" align="right"> # </td></tr> # <tr><td align="right" style="color:#777777;background-color:#ffffff;font-size:12px;"> # <NAME> | April 15, 2019 (updated) # </td></tr> # <tr><td align="right" style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;"> # This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. # </td></tr> # </table> # $ \newcommand{\bra}[1]{\langle #1|} $ # $ \newcommand{\ket}[1]{|#1\rangle} $ # $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ # $ \newcommand{\dot}[2]{ #1 \cdot #2} $ # $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ # $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ # $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ # $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ # $ \newcommand{\mypar}[1]{\left( #1 \right)} $ # $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ # $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ # $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ # $ \newcommand{\onehalf}{\frac{1}{2}} $ # $ \newcommand{\donehalf}{\dfrac{1}{2}} $ # $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ # $ \newcommand{\vzero}{\myvector{1\\0}} $ # $ \newcommand{\vone}{\myvector{0\\1}} $ # $ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $ # $ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ # $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ # $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ # $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ # $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ # $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ # $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ # <h2> <font color="blue"> Solutions for </font>Drawing a Qubit</h2> # <a id="task1"></a> # <h3> Task 1 </h3> # # Write a function that returns a randomly created 2-dimensional (real-valued) quantum state. # # <table align="left"><tr><td><i> # You may use your code written for <a href="B28_Quantum_State.ipynb#task2">a task given in notebook "Quantum States"</a>. # </i></td></tr></table> # <br><br> # # Create 100 random quantum states by using your function, and then draw all of them as points. # <h3>Solution</h3> # A function for randomly creating a 2-dimensional quantum state: # randomly create a 2-dimensional quantum state from random import randrange def random_quantum_state(): first_entry = randrange(101) first_entry = first_entry/100 first_entry = first_entry**0.5 if randrange(2) == 0: first_entry = -1 * first_entry second_entry = 1 - (first_entry**2) second_entry = second_entry**0.5 if randrange(2) == 0: second_entry = -1 * second_entry return [first_entry,second_entry] # Drawing randomly created 100 quantum states as blue points: # + # import the drawing methods from matplotlib.pyplot import plot, figure # draw a figure figure(figsize=(6,6), dpi=60) # draw the origin plot(0,0,'ro') for i in range(100): # create a random quantum state quantum_state = random_quantum_state(); # draw a blue point for the random quantum state x = quantum_state[0]; y = quantum_state[1]; plot(x,y,'bo') # - # <a id="task2"></a> # <h3> Task 2 </h3> # # Repeat the previous task by drawing the quantum states as vectors (arrows) instead of points. # # <i>Please keep the codes below for drawing axes for getting a better visual focus.</i> # <h3>Solution</h3> # A function for randomly creating a 2-dimensional quantum state: # randomly create a 2-dimensional quantum state from random import randrange def random_quantum_state(): first_entry = randrange(101) first_entry = first_entry/100 first_entry = first_entry**0.5 if randrange(2) == 0: first_entry = -1 * first_entry second_entry = 1 - (first_entry**2) second_entry = second_entry**0.5 if randrange(2) == 0: second_entry = -1 * second_entry return [first_entry,second_entry] # Drawing randomly created 100 quantum states as blue vectors (arrows): # + # import the drawing methods from matplotlib.pyplot import plot, figure, arrow # %run qlatvia.py # draw a figure figure(figsize=(6,6), dpi=60) draw_axes(); # draw the origin plot(0,0,'ro') for i in range(100): # create a random quantum state quantum_state = random_quantum_state(); # draw a blue vector for the random quantum state x = quantum_state[0]; y = quantum_state[1]; # shorten the line length to 0.92 # line_length + head_length (0.08) should be 1 x = 0.92 * x y = 0.92 * y arrow(0,0,x,y,head_width=0.04,head_length=0.08,color="blue") # - # <a id="task3"></a> # <h3> Task 3 </h3> # # Write a function that displays a quantum state with a name. # # The parameters of the function should be (x,y,name). # # Randomly pick a quantum state and display it with the axes and the unit circle. # # The arrow head should be on the unit circle. # # The name of the quantum state should be displayed out of the unit circle. # # Test your function with 6 random quantum states. # # Save your function for later usage. # <h3>Solution</h3> # + # # %%writefile FILENAME.py # import the drawing methods from matplotlib.pyplot import figure, arrow, text def display_quantum_state(x,y,name): x1 = 0.92 * x y1 = 0.92 * y arrow(0,0,x1,y1,head_width=0.04,head_length=0.08,color="blue") x2 = 1.15 * x y2 = 1.15 * y text(x2,y2,name) # + # # test your function # # import the drawing methods from matplotlib.pyplot import figure figure(figsize=(6,6), dpi=80) # size of the figure # include our predefined functions # %run qlatvia.py # draw axes draw_axes() # draw the unit circle draw_unit_circle() for i in range(6): s = random_quantum_state() display_quantum_state(s[0],s[1],"v"+str(i)) #draw_quantum_state(s[0],s[1],"v"+str(i)) # -
bronze/B50_Drawing_a_Qubit_Solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from sklearn.manifold import TSNE from utils import * # + # Load data path = '.\data\stock_port.csv' df = load_data(path) # Convert df to sparse matrix sp_matrix, row_ind_dict, col_ind_dict = convert_data_sparse_matrix(df) # Basic Info print('Dimension of sparse_matrix is ', sp_matrix.shape) row_dim = sp_matrix.shape[0] col_dim = sp_matrix.shape[1] # Calculate shareholding % by stock_code sp_matrix_stock = sp_matrix / np.sum(sp_matrix, axis = 1).reshape(row_dim, -1) # Calculate shareholding % by shareholder # sp_matrix_shareholder = sp_matrix / np.sum(sp_matrix, axis = 1).reshape(row_dim, -1) sp_matrix_shareholder = sp_matrix / np.sum(sp_matrix, axis = 0).reshape(-1, col_dim) # Element-wise multiply two matrix sp_matrix_stock_shareholder = sp_matrix_stock * sp_matrix_shareholder # + # Apply TSNE to sp_matrix_stock_shareholder X_embedded = TSNE(n_components = 2, perplexity = 100, learning_rate = 500).fit_transform(sp_matrix_stock_shareholder) # - df_tsne = pd.DataFrame(X_embedded, columns = ['X1', 'X2']) df_tsne['stock_code'] = df_tsne.reset_index()['index'].apply(lambda x: {j:i for i,j in row_ind_dict.items()}[x]) df_tsne highlight_stock_code = input('Stock Code: ') df_tsne['highlight'] = df_tsne['stock_code'].apply(lambda x: 1 if x == highlight_stock_code else 0) df_tsne # + import plotly.express as px fig = px.scatter(df_tsne,'X1','X2', hover_name = 'stock_code', color = 'highlight') fig.add_scatter() fig.show() # - # Find out row that has > 50% of its shareholding held by a single shareholder (sp_matrix_stock > 0.01).sum() sp_matrix_shareholder.max() sp_matrix_stock.max() ind = np.unravel_index(np.argmax(sp_matrix_stock, axis = None), sp_matrix_stock.shape) {j:i for i, j in row_ind_dict.items()}[ind[0]] {j:i for i, j in col_ind_dict.items()}[ind[1]] sp_matrix_shareholder[ind[0], ind[1]] df = pd.read_csv('stock_port.csv') df['stock_code'] = df['stock_code'].apply(lambda x: ('00000' + str(x))[-5:]) df.head() df['name_of_ccass_participant'].unique() # + # Prepare zeros matrix row_dim = len(df['stock_code'].unique()) col_dim = len(df['name_of_ccass_participant'].unique()) print('Row dimension: ', row_dim) print('Column dimension: ', col_dim) sparse_matrix = np.zeros((row_dim, col_dim)) # + # Prepare label to index dictionaries # One for stock_code, one for name of ccass_participant stock_code_ind = {stock_code:ind for ind, stock_code in enumerate(sorted(df['stock_code'].unique().tolist()))} shareholder_ind = {shareholder:ind for ind, shareholder in enumerate(sorted(df['name_of_ccass_participant'].unique().tolist()))} # - # apply the dict to df df['stock_code_ind'] = df['stock_code'].apply(lambda x: stock_code_ind[x]) df['shareholder_ind'] = df['name_of_ccass_participant'].apply(lambda x: shareholder_ind[x]) df # + for ind, row in df.iterrows(): # Get index and shareholding stock_code_ind = row['stock_code_ind'] shareholder_ind = row['shareholder_ind'] shareholding = row['shareholding'] # Assign to sparse matrix sparse_matrix[stock_code_ind, shareholder_ind] += shareholding # -
DataAnalysis/DataAnalysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Naive Disambiguation (Method 1) # # __<NAME> (DTU/2K16/MC/013)__ # # __Natural Langauge Processing (Dr. <NAME>)__ # # Naive disambiguation as the name suggests is a simple and naive method to disambiguate the sense of a given word. This method doesn't use any surrounding words or any other similarity metric for that matter, but simply uses the first sense given by the synsets in the `wordnet` interface in `nltk`. # Importing all required packages import nltk from nltk.corpus import wordnet nltk.download('wordnet') import pprint # create a method returns the first synset of a given word def naive_disambiguation(word: str): synsets = wordnet.synsets(word) sense = synsets[0] return sense # Test your own word and disambiguate the meaning of it word = 'java' # you can replae this here sense = naive_disambiguation(word) print('Definition:', sense.definition()) print('Examples:') pprint.pprint(sense.examples()) # In the above example we see the the word __java__ is disambiguated to the island Java irrespective of where we use the word __java__ in our document. We can use it in _"Java is my favourite Programming Language"_ or _"I have a cup of Java in the morning"_. # # We can see all senses of the word __Java__ below: for synset in wordnet.synsets('java'): print(synset.definition())
notebooks/naive-disambiguation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center><h1><font color="00695c">Board Games</font></h1></center> # # This notebook gives a deep-dive into the board games that I have played over the last 6 months. The data is relatively simple, but makes it that more interesting to see how much information we can retrieve. # # ## <a name="table">Table of Contents</a> # # 1. [Functions](#functions) # <br> # 2. [Preprocess Data](#preprocess) # # 2.1 [Load Data](#load) # # 2.2 [NaN Values (i.e., " ?")](#nan) # # 2.3 [Preprocessing Steps](#preprocessing) # 3. [EDA](#eda) # # 3.1 [Categorical Variables](#categorical) # # 3.2 [Target Variable](#target) # ## <a name="functions">1. Functions</a> # [Back to Table of Contents](#table) # + import pandas as pd import numpy as np import math import matplotlib.pyplot as plt # %matplotlib inline def calculate_score(row, name='M'): score_list = str(row).split("+") for score in score_list: score_alpha = ''.join(x for x in score if x.isalpha()) if name == score_alpha: score_digit = ''.join(c for c in score if c.isdigit()) return score_digit return None def calculate_individual_score(df): """ Calculate the scores for each individual and returns a new column for each person """ people = {"Ilse": "I", "Maarten": "M", "Edith": "E", "Guy": "G", "Mam": "MA", "Pap": "PA", "Wouter": "WO", "Evi": "EV", "Iris": "IR", "Daan": "DA", "Christopher": "C"} for key, item in people.items(): df['{}_points'.format(key)] = df.apply(lambda row: calculate_score(row.Scores, item), 1) return df def calculate_won(row, person): """ Calculates if a person has won that game based on whether its name appears in the Winner column """ if (type(row) == str): if person == row: return 1 else: return 0 else: if (math.isnan(row)) & (type(person)!=str): return 1 else: return 0 def prepare_ilse_vs_maarten(df): ilse_vs_maarten = df.loc[(df.Players=="I+M"), :] ilse_vs_maarten = ilse_vs_maarten[['Date', 'Players', 'Game', 'Scores', 'Winner', 'Version', 'Ilse_points', 'Maarten_points']].copy() ilse_vs_maarten.Ilse_points = ilse_vs_maarten.Ilse_points.astype(float) ilse_vs_maarten.Maarten_points = ilse_vs_maarten.Maarten_points.astype(float) for column, person in zip(['Ilse_won', 'Maarten_won', 'Draw', 'Lost'], ['I', 'M', 'I+M', None]): ilse_vs_maarten[column] = ilse_vs_maarten.apply(lambda row: calculate_won(row.Winner, person), 1) return ilse_vs_maarten def get_winner_based_on_points(row, points_columns): """ Convert None to 0 as with all games the higher the points the better. Then, simply check who got the highest score """ scores = [int(row[person]) if row[person] else 0 for person in points_columns] if any(scores): index_max = scores.index(max(scores)) return points_columns[index_max].split("_")[0] else: return None def calculate_results_per_person(df): # Calculate the winner of games based on the total number of points per person points_columns = ['Ilse_points', 'Maarten_points', 'Edith_points', 'Guy_points', 'Mam_points', 'Pap_points', 'Wouter_points', 'Evi_points', 'Iris_points', 'Daan_points', 'Christopher_points'] participants = [person.split("_")[0] for person in points_columns] df['Winner_Points'] = df.apply(lambda row: get_winner_based_on_points(row, points_columns), 1) # Only select games that has a winner based on points temp_df = df.loc[df.Winner_Points.dropna().index, :] print("% of games played based on points: {}".format(len(temp_df)/len(df)*100)) # Calculate summary statistics per person games_played = [len(temp_df[person].dropna()) for person in points_columns] games_won = [len(temp_df[temp_df.Winner_Points == participant]) for participant in participants] avg_participants = [temp_df.loc[temp_df[person].dropna().index, 'Participants'].mean() for person in points_columns] std_participants = [temp_df.loc[temp_df[person].dropna().index, 'Participants'].std() for person in points_columns] # Put it all together results = pd.DataFrame(np.vstack((games_played, games_won, participants, avg_participants, std_participants)).T) results.columns = ['Played', 'Won', 'Participant', 'Avg_Participants_Per_Game', 'Std_Participants'] results.Played = results.Played.astype(int) results.Won = results.Won.astype(int) results.Avg_Participants_Per_Game = results.Avg_Participants_Per_Game.astype(float) results.Std_Participants = results.Std_Participants.astype(float) results = results.sort_values("Played", ascending=False) results['Win_Percentage'] = results.Won/results.Played*100 return results def create_stats_per_board_game(ilse_vs_maarten): """ Calculate sum, mean, median, std of scores per board game """ matches = (ilse_vs_maarten.groupby(['Game', 'Players', 'Version']) .agg({"Ilse_points":['mean', 'median', 'std'], "Maarten_points":['mean', 'median', 'std'], "Ilse_won":['sum'], "Maarten_won":['sum'], "Draw":['sum'], "Lost":['sum']}) .reset_index()) matches.columns = [' '.join(col).strip() for col in matches.columns.values] matches.columns = [column.replace("sum", "").strip() for column in matches.columns] matches['Total'] = matches.apply(lambda row: row.Lost+row.Maarten_won+row.Ilse_won+row.Draw, 1) matches['Full_game'] = matches.apply(lambda row: row.Game + " " + row.Version if row.Version != 'Normal' else row.Game, 1) return matches # - # ## <a name="preprocess">2. Preprocess</a> # [Back to Table of Contents](#table) # Prepare the data such that each person that has played a match gets its own column tracking its points. df = pd.read_excel("matches.xlsx") df = calculate_individual_score(df) df['Participants'] = df.apply(lambda row: row.Players.count("+")+1, 1) # ## <a name="generalstats">2. General Stats person Person</a> # [Back to Table of Contents](#table) # #### <a name="generalstats-played">2.1. Played Games + Won</a> # [Back to Table of Contents](#table) results_per_person = calculate_results_per_person(df) results_per_person # + plt.rcdefaults() fig, ax = plt.subplots() # Example data people = results_per_person.Participant y_pos = np.arange(len(people)) won = results_per_person.Won played = results_per_person.Played ax.barh(y_pos, played, align='center', color='#80cbc4') ax.barh(y_pos, won, align='center', color='#00897b') ax.set_yticks(y_pos) ax.set_yticklabels(people) ax.invert_yaxis() # labels read top-to-bottom ax.set_title('Number of Games Played vs. Numbers of Games Won') plt.show() # + plt.rcdefaults() fig, ax = plt.subplots() # Example data people = results_per_person.Participant y_pos = np.arange(len(people)) percentage = results_per_person.Win_Percentage ax.barh(y_pos, percentage, align='center', color='#80cbc4') ax.set_yticks(y_pos) ax.set_yticklabels(people) ax.invert_yaxis() # labels read top-to-bottom ax.set_title('Number of Games Played vs. Numbers of Games Won') plt.show() # - # ## <a name="mevsthewife">3. Me vs. The Wife</a> # [Back to Table of Contents](#table) # #### <a name="mevsthewife-selection">3.1. Selection</a> # [Back to Table of Contents](#table) # A selection of the data is taken showing matches between me and the wife. ilse_vs_maarten = prepare_ilse_vs_maarten(df) ilse_vs_maarten.head(5) # #### <a name="mevsthewife-stats">3.2. Stats per Board Game</a> # [Back to Table of Contents](#table) matches = create_stats_per_board_game(ilse_vs_maarten) matches.head() # **To prepare for D3** (not used) file = [{"Name": "Ilse"}] for row in matches.iterrows(): file[0][row[1].Full_game] = row[1]['I_won']
boardgame/notebooks/EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- def isPalindrome(s): if(s == None or len(s) ==0 ): return True left = 0 right = len(s) - 1 while(left < right): while(left <right and not (s[left].isalnum())): print(s[left]) left = left + 1 print("inner left:", left) while(left<right and not (s[right].isalnum())): print(s[right]) right = right - 1 print("inner right:", right) if(s[left].lower() != s[right].lower()): print("exit") return False left = left + 1 right = right - 1 print("outer left:", left) print("outer right:", right) return True isPalindrome("A man, a plan, a canal: Panama") isPalindrome("race a car")
125. Valid Palindrome.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + #1. Write a Python program to convert kilometers to miles? km = int(input("Enter the distance in kilometers : ")) miles = km * 0.621371 print("no of miles : ",miles) # + #2. Write a Python program to convert Celsius to Fahrenheit? c = float(input("Enter the temperature in celcius : ")) f = (c * 9/5) + 32 print("the given temperature is equivalent to {} fahrenheit".format(f)) # + #3. Write a Python program to display calendar? import calendar y = int(input("Enter year: ")) m = int(input("Enter month: ")) print(calendar.month(y,m)) # + #4. Write a Python program to solve quadratic equation? import cmath a = 1 b = 5 c = 6 d = (b**2) - (4*a*c) sol1 = (-b-cmath.sqrt(d))/(2*a) sol2 = (-b+cmath.sqrt(d))/(2*a) print("the solutions are : {},{}".format(sol1,sol2)) # + #5. Write a Python program to swap two variables without temp variable? a = int(input("Enter first digit ")) b = int(input("Enter second digit ")) print("Before swap : a = {},b = {}",format(a,b)) a,b = b,a print("after swap : a = {},b = {}",format(a,b)) # -
Programming_Assingment_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="OoasdhSAp0zJ" # ##### Copyright 2019 The TensorFlow Authors. # + cellView="form" colab_type="code" id="cIrwotvGqsYh" colab={} #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="C81KT2D_j-xR" # # 추정기(Estimator)로 선형 모델 만들기 # # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/tutorials/estimator/linear"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org에서 보기</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/ko/tutorials/estimator/linear.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/ko/tutorials/estimator/linear.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃허브(GitHub) 소스 보기</a> # </td> # </table> # + [markdown] colab_type="text" id="tUP8LMdYtWPz" # Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도 불구하고 [공식 영문 문서](https://github.com/tensorflow/docs/blob/master/site/en/guide/gpu.ipynb)의 내용과 일치하지 않을 수 있습니다. 이 번역에 개선할 부분이 있다면 [tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다. 문서 번역이나 리뷰에 참여하려면 [<EMAIL>](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)로 메일을 보내주시기 바랍니다. # + [markdown] colab_type="text" id="nNgxmCgx8aAM" # ## 개요 # # 이 문서에서는 `tf.estimator` API를 사용하여 로지스틱 회귀 모델(logistic regression model)을 훈련합니다. 이 모델은 다른 더 복잡한 알고리즘의 기초로 사용할 수 있습니다. # # + [markdown] colab_type="text" id="vkC_j6VpqrDw" # ## 설정 # + colab_type="code" id="rutbJGmpqvm3" colab={} # !pip install sklearn # + colab_type="code" id="54mb4J9PqqDh" colab={} from __future__ import absolute_import, division, print_function, unicode_literals import os import sys import numpy as np import pandas as pd import matplotlib.pyplot as plt from IPython.display import clear_output from six.moves import urllib # + [markdown] colab_type="text" id="fsjkwfsGOBMT" # ## 타이타닉 데이터셋을 불러오기 # 타이타닉 데이터셋을 사용할 것입니다. 성별, 나이, 클래스, 기타 등 주어진 정보를 활용하여 승객이 살아남을 것인지 예측하는 것을 목표로 합니다. # + colab_type="code" id="bNiwh-APcRVD" colab={} try: # Colab only # %tensorflow_version 2.x except Exception: pass import tensorflow.compat.v2.feature_column as fc import tensorflow as tf # + colab_type="code" id="DSeMKcx03d5R" colab={} # 데이터셋 불러오기. dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv') dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv') y_train = dftrain.pop('survived') y_eval = dfeval.pop('survived') # + [markdown] colab_type="text" id="jjm4Qj0u7_cp" # ## 데이터 탐험하기 # + [markdown] colab_type="text" id="UrQzxKKh4d6u" # 데이터셋은 다음의 특성을 가집니다 # + colab_type="code" id="rTjugo3n308g" colab={} dftrain.head() # + colab_type="code" id="y86q1fj44lZs" colab={} dftrain.describe() # + [markdown] colab_type="text" id="8JSa_duD4tFZ" # 훈련셋은 627개의 샘플로 평가셋은 264개의 샘플로 구성되어 있습니다. # + colab_type="code" id="Fs3Nu5pV4v5J" colab={} dftrain.shape[0], dfeval.shape[0] # + [markdown] colab_type="text" id="RxCA4Nr45AfF" # 대부분의 승객은 20대와 30대 입니다. # + colab_type="code" id="RYeCMm7K40ZN" colab={} dftrain.age.hist(bins=20) # + [markdown] colab_type="text" id="DItSwJ_B5B0f" # 남자 승객이 여자 승객보다 대략 2배 많습니다. # + colab_type="code" id="b03dVV9q5Dv2" colab={} dftrain.sex.value_counts().plot(kind='barh') # + [markdown] colab_type="text" id="rK6WQ29q5Jf5" # 대부분의 승객은 "삼등석" 입니다. # + colab_type="code" id="dgpJVeCq5Fgd" colab={} dftrain['class'].value_counts().plot(kind='barh') # + [markdown] colab_type="text" id="FXJhGGL85TLp" # 여자는 남자보다 살아남을 확률이 훨씬 높습니다. 이는 명확하게 모델에 유용한 특성입니다. # + colab_type="code" id="lSZYa7c45Ttt" colab={} pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive') # + [markdown] colab_type="text" id="VqDKQLZn8L-B" # ## 모델을 위한 특성 공학(feature engineering) # 추정기는 [특성 열(feature columns)](https://www.tensorflow.org/guide/feature_columns)이라는 시스템을 사용하여 모델이 각각의 입력 특성을 어떻게 해석할지 설명합니다. 추정기가 숫자 입력 벡터를 요구하면, *특성 열*은 모델이 어떻게 각 특성을 변환해야하는지 설명합니다. # # 효과적인 모델 학습에서는 적절한 특성 열을 고르고 다듬는 것이 키포인트 입니다. 하나의 특성 열은 특성 딕셔너리(dict)의 원본 입력으로 만들어진 열(*기본 특성 열*)이거나 하나 이상의 기본 열(*얻어진 특성 열*)에 정의된 변환을 이용하여 새로 생성된 열입니다. # # 선형 추정기는 수치형, 범주형 특성을 모두 사용할 수 있습니다. 특성 열은 모든 텐서플로 추정기와 함께 작동하고 목적은 모델링에 사용되는 특성들을 정의하는 것입니다. 또한 원-핫-인코딩(one-hot-encoding), 정규화(normalization), 버킷화(bucketization)와 같은 특성 공학 방법을 지원합니다. # + [markdown] colab_type="text" id="puZFOhTDkblt" # ### 기본 특성 열 # + colab_type="code" id="GpveXYSsADS6" colab={} CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck', 'embark_town', 'alone'] NUMERIC_COLUMNS = ['age', 'fare'] feature_columns = [] for feature_name in CATEGORICAL_COLUMNS: vocabulary = dftrain[feature_name].unique() feature_columns.append(tf.feature_column.categorical_column_with_vocabulary_list(feature_name, vocabulary)) for feature_name in NUMERIC_COLUMNS: feature_columns.append(tf.feature_column.numeric_column(feature_name, dtype=tf.float32)) # + [markdown] colab_type="text" id="Gt8HMtwOh9lJ" # `input_function`은 입력 파이프라인을 스트리밍으로 공급하는 `tf.data.Dataset`으로 데이터를 변환하는 방법을 명시합니다. `tf.data.Dataset`은 데이터 프레임, CSV 형식 파일 등과 같은 여러 소스를 사용합니다. # + colab_type="code" id="qVtrIHFnAe7w" colab={} def make_input_fn(data_df, label_df, num_epochs=10, shuffle=True, batch_size=32): def input_function(): ds = tf.data.Dataset.from_tensor_slices((dict(data_df), label_df)) if shuffle: ds = ds.shuffle(1000) ds = ds.batch(batch_size).repeat(num_epochs) return ds return input_function train_input_fn = make_input_fn(dftrain, y_train) eval_input_fn = make_input_fn(dfeval, y_eval, num_epochs=1, shuffle=False) # + [markdown] colab_type="text" id="P7UMVkQnkrgb" # 다음과 같이 데이터셋을 점검할 수 있습니다: # + colab_type="code" id="8ZcG_3KiCb1M" colab={} ds = make_input_fn(dftrain, y_train, batch_size=10)() for feature_batch, label_batch in ds.take(1): print('특성 키:', list(feature_batch.keys())) print() print('클래스 배치:', feature_batch['class'].numpy()) print() print('레이블 배치:', label_batch.numpy()) # + [markdown] colab_type="text" id="lMNBMyodjlW3" # 또한 `tf.keras.layers.DenseFeatures` 층을 사용하여 특정한 특성 열의 결과를 점검할 수 있습니다: # + colab_type="code" id="IMjlmbPlDmkB" colab={} age_column = feature_columns[7] tf.keras.layers.DenseFeatures([age_column])(feature_batch).numpy() # + [markdown] colab_type="text" id="f4zrAdCIjr3s" # `DenseFeatures`는 조밀한(dense) 텐서만 허용합니다. 범주형 데이터를 점검하려면 우선 범주형 열에 indicator_column 함수를 적용해야 합니다: # + colab_type="code" id="1VXmXFTSFEvv" colab={} gender_column = feature_columns[0] tf.keras.layers.DenseFeatures([tf.feature_column.indicator_column(gender_column)])(feature_batch).numpy() # + [markdown] colab_type="text" id="MEp59g5UkHYY" # 모든 기본 특성을 모델에 추가한 다음에 모델을 훈련해 봅시다. 모델을 훈련하려면 `tf.estimator` API를 이용한 메서드 호출 한번이면 충분합니다: # + colab_type="code" id="aGXjdnqqdgIs" colab={} linear_est = tf.estimator.LinearClassifier(feature_columns=feature_columns) linear_est.train(train_input_fn) result = linear_est.evaluate(eval_input_fn) clear_output() print(result) # + [markdown] colab_type="text" id="3tOan4hDsG6d" # ### 도출된 특성 열 # + [markdown] colab_type="text" id="NOG2FSTHlAMu" # 이제 정확도 75%에 도달했습니다. 별도로 각 기본 특성 열을 사용하면 데이터를 설명하기에는 충분치 않을 수 있습니다. 예를 들면, 성별과 레이블간의 상관관계는 성별에 따라 다를 수 있습니다. 따라서 `gender="Male"`과 'gender="Female"`의 단일 모델가중치만 배우면 모든 나이-성별 조합(이를테면 `gender="Male" 그리고 'age="30"` 그리고 `gender="Male"` 그리고 `age="40"`을 구별하는 것)을 포함시킬 수 없습니다. # # 서로 다른 특성 조합들 간의 차이를 학습하기 위해서 모델에 *교차 특성 열*을 추가할 수 있습니다(또한 교차 열 이전에 나이 열을 버킷화할 수 있습니다): # + colab_type="code" id="AM-RsDzNfGlu" colab={} age_x_gender = tf.feature_column.crossed_column(['age', 'sex'], hash_bucket_size=100) # + [markdown] colab_type="text" id="DqDFyPKQmGTN" # 조합 특성을 모델에 추가하고 모델을 다시 훈련합니다: # + colab_type="code" id="s8FV9oPQfS-g" colab={} derived_feature_columns = [age_x_gender] linear_est = tf.estimator.LinearClassifier(feature_columns=feature_columns+derived_feature_columns) linear_est.train(train_input_fn) result = linear_est.evaluate(eval_input_fn) clear_output() print(result) # + [markdown] colab_type="text" id="rwfdZj7ImLwb" # 이제 정확도 77.6%에 도달했습니다. 기본 특성만 이용한 학습보다는 약간 더 좋았습니다. 더 많은 특성과 변환을 사용해서 더 잘할 수 있다는 것을 보여주세요! # + [markdown] colab_type="text" id="8_eyb9d-ncjH" # 이제 훈련 모델을 이용해서 평가셋에서 승객에 대해 예측을 할 수 있습니다. 텐서플로 모델은 한번에 샘플의 배치 또는 일부에 대한 예측을 하도록 최적화되어있습니다. 앞서, `eval_input_fn`은 모든 평가셋을 사용하도록 정의되어 있었습니다. # + colab_type="code" id="wiScyBcef6Dq" colab={} pred_dicts = list(linear_est.predict(eval_input_fn)) probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts]) probs.plot(kind='hist', bins=20, title='예측 확률') # + [markdown] colab_type="text" id="UEHRCd4sqrLs" # 마지막으로, 수신자 조작 특성(receiver operating characteristic, ROC)을 살펴보면 정탐률(true positive rate)과 오탐률(false positive rate)의 상충관계에 대해 더 잘 이해할 수 있습니다. # + colab_type="code" id="kqEjsezIokIe" colab={} from sklearn.metrics import roc_curve from matplotlib import pyplot as plt fpr, tpr, _ = roc_curve(y_eval, probs) plt.plot(fpr, tpr) plt.title('ROC curve') plt.xlabel('오탐률(false positive rate)') plt.ylabel('정탐률(true positive rate)') plt.xlim(0,) plt.ylim(0,)
site/ko/tutorials/estimator/linear.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W3D4_ReinforcementLearning/student/W3D4_Tutorial4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>[![Kaggle](https://kaggle.com/static/images/open-in-kaggle.svg)](https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D4_ReinforcementLearning/student/W3D4_Tutorial4.ipynb) # - # # Tutorial 4: From Reinforcement Learning to Planning # **Week 3, Day 4: Reinforcement Learning** # # **By Neuromatch Academy** # # __Content creators:__ <NAME> and <NAME> with help from <NAME> # # __Content reviewers:__ <NAME> and <NAME> # + [markdown] colab_type="text" # **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** # # <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p> # - # --- # # # Tutorial Objectives # # In this tutorial you will implement one of the simplest model-based Reinforcement Learning algorithms, Dyna-Q. You will understand what a world model is, how it can improve the agent's policy, and the situations in which model-based algorithms are more advantagenous than their model-free counterparts. # # * You will implement a model-based RL agent, Dyna-Q, that can solve a simple task; # * You will investigate the effect of planning on the agent's behavior; # * You will compare the behaviors of a model-based and model-free agent in light of an environmental change. # --- # # Setup # # Imports import numpy as np import matplotlib.pyplot as plt from scipy.signal import convolve as conv # + cellView="form" #@title Figure settings # %config InlineBackend.figure_format = 'retina' plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle") # + cellView="form" #@title Helper functions def epsilon_greedy(q, epsilon): """Epsilon-greedy policy: selects the maximum value action with probabilty (1-epsilon) and selects randomly with epsilon probability. Args: q (ndarray): an array of action values epsilon (float): probability of selecting an action randomly Returns: int: the chosen action """ be_greedy = np.random.random() > epsilon if be_greedy: action = np.argmax(q) else: action = np.random.choice(len(q)) return action def q_learning(state, action, reward, next_state, value, params): """Q-learning: updates the value function and returns it. Args: state (int): the current state identifier action (int): the action taken reward (float): the reward received next_state (int): the transitioned to state identifier value (ndarray): current value function of shape (n_states, n_actions) params (dict): a dictionary containing the default parameters Returns: ndarray: the updated value function of shape (n_states, n_actions) """ # value of previous state-action pair prev_value = value[int(state), int(action)] # maximum Q-value at current state if next_state is None or np.isnan(next_state): max_value = 0 else: max_value = np.max(value[int(next_state)]) # reward prediction error delta = reward + params['gamma'] * max_value - prev_value # update value of previous state-action pair value[int(state), int(action)] = prev_value + params['alpha'] * delta return value def learn_environment(env, model_updater, planner, params, max_steps, n_episodes, shortcut_episode=None): # Start with a uniform value function value = np.ones((env.n_states, env.n_actions)) # Run learning reward_sums = np.zeros(n_episodes) episode_steps = np.zeros(n_episodes) # Dyna-Q state model = np.nan*np.zeros((env.n_states, env.n_actions, 2)) # Loop over episodes for episode in range(n_episodes): if shortcut_episode is not None and episode == shortcut_episode: env.toggle_shortcut() state = 64 action = 1 next_state, reward = env.get_outcome(state, action) model[state, action] = reward, next_state value = q_learning(state, action, reward, next_state, value, params) state = env.init_state # initialize state reward_sum = 0 for t in range(max_steps): # choose next action action = epsilon_greedy(value[state], params['epsilon']) # observe outcome of action on environment next_state, reward = env.get_outcome(state, action) # sum rewards obtained reward_sum += reward # update value function value = q_learning(state, action, reward, next_state, value, params) # update model model = model_updater(model, state, action, reward, next_state) # execute planner value = planner(model, value, params) if next_state is None: break # episode ends state = next_state reward_sums[episode] = reward_sum episode_steps[episode] = t+1 return value, reward_sums, episode_steps class world(object): def __init__(self): return def get_outcome(self): print("Abstract method, not implemented") return def get_all_outcomes(self): outcomes = {} for state in range(self.n_states): for action in range(self.n_actions): next_state, reward = self.get_outcome(state, action) outcomes[state, action] = [(1, next_state, reward)] return outcomes class QuentinsWorld(world): """ World: Quentin's world. 100 states (10-by-10 grid world). The mapping from state to the grid is as follows: 90 ... 99 ... 40 ... 49 30 ... 39 20 21 22 ... 29 10 11 12 ... 19 0 1 2 ... 9 54 is the start state. Actions 0, 1, 2, 3 correspond to right, up, left, down. Moving anywhere from state 99 (goal state) will end the session. Landing in red states incurs a reward of -1. Landing in the goal state (99) gets a reward of 1. Going towards the border when already at the border will stay in the same place. """ def __init__(self): self.name = "QuentinsWorld" self.n_states = 100 self.n_actions = 4 self.dim_x = 10 self.dim_y = 10 self.init_state = 54 self.shortcut_state = 64 def toggle_shortcut(self): if self.shortcut_state == 64: self.shortcut_state = 2 else: self.shortcut_state = 64 def get_outcome(self, state, action): if state == 99: # goal state reward = 0 next_state = None return next_state, reward reward = 0 # default reward value if action == 0: # move right next_state = state + 1 if state == 98: # next state is goal state reward = 1 elif state % 10 == 9: # right border next_state = state elif state in [11, 21, 31, 41, 51, 61, 71, 12, 72, 73, 14, 74, 15, 25, 35, 45, 55, 65, 75]: # next state is red reward = -1 elif action == 1: # move up next_state = state + 10 if state == 89: # next state is goal state reward = 1 if state >= 90: # top border next_state = state elif state in [2, 12, 22, 32, 42, 52, 62, 3, 63, self.shortcut_state, 5, 65, 6, 16, 26, 36, 46, 56, 66]: # next state is red reward = -1 elif action == 2: # move left next_state = state - 1 if state % 10 == 0: # left border next_state = state elif state in [17, 27, 37, 47, 57, 67, 77, 16, 76, 75, 14, 74, 13, 23, 33, 43, 53, 63, 73]: # next state is red reward = -1 elif action == 3: # move down next_state = state - 10 if state <= 9: # bottom border next_state = state elif state in [22, 32, 42, 52, 62, 72, 82, 23, 83, 84, 25, 85, 26, 36, 46, 56, 66, 76, 86]: # next state is red reward = -1 else: print("Action must be between 0 and 3.") next_state = None reward = None return int(next_state) if next_state is not None else None, reward # HELPER FUNCTIONS FOR PLOTTING def plot_state_action_values(env, value, ax=None): """ Generate plot showing value of each action at each state. """ if ax is None: fig, ax = plt.subplots() for a in range(env.n_actions): ax.plot(range(env.n_states), value[:, a], marker='o', linestyle='--') ax.set(xlabel='States', ylabel='Values') ax.legend(['R','U','L','D'], loc='lower right') def plot_quiver_max_action(env, value, ax=None): """ Generate plot showing action of maximum value or maximum probability at each state (not for n-armed bandit or cheese_world). """ if ax is None: fig, ax = plt.subplots() X = np.tile(np.arange(env.dim_x), [env.dim_y,1]) + 0.5 Y = np.tile(np.arange(env.dim_y)[::-1][:,np.newaxis], [1,env.dim_x]) + 0.5 which_max = np.reshape(value.argmax(axis=1), (env.dim_y,env.dim_x)) which_max = which_max[::-1,:] U = np.zeros(X.shape) V = np.zeros(X.shape) U[which_max == 0] = 1 V[which_max == 1] = 1 U[which_max == 2] = -1 V[which_max == 3] = -1 ax.quiver(X, Y, U, V) ax.set( title='Maximum value/probability actions', xlim=[-0.5, env.dim_x+0.5], ylim=[-0.5, env.dim_y+0.5], ) ax.set_xticks(np.linspace(0.5, env.dim_x-0.5, num=env.dim_x)) ax.set_xticklabels(["%d" % x for x in np.arange(env.dim_x)]) ax.set_xticks(np.arange(env.dim_x+1), minor=True) ax.set_yticks(np.linspace(0.5, env.dim_y-0.5, num=env.dim_y)) ax.set_yticklabels(["%d" % y for y in np.arange(0, env.dim_y*env.dim_x, env.dim_x)]) ax.set_yticks(np.arange(env.dim_y+1), minor=True) ax.grid(which='minor',linestyle='-') def plot_heatmap_max_val(env, value, ax=None): """ Generate heatmap showing maximum value at each state """ if ax is None: fig, ax = plt.subplots() if value.ndim == 1: value_max = np.reshape(value, (env.dim_y,env.dim_x)) else: value_max = np.reshape(value.max(axis=1), (env.dim_y,env.dim_x)) value_max = value_max[::-1,:] im = ax.imshow(value_max, aspect='auto', interpolation='none', cmap='afmhot') ax.set(title='Maximum value per state') ax.set_xticks(np.linspace(0, env.dim_x-1, num=env.dim_x)) ax.set_xticklabels(["%d" % x for x in np.arange(env.dim_x)]) ax.set_yticks(np.linspace(0, env.dim_y-1, num=env.dim_y)) if env.name != 'windy_cliff_grid': ax.set_yticklabels( ["%d" % y for y in np.arange( 0, env.dim_y*env.dim_x, env.dim_x)][::-1]) return im def plot_rewards(n_episodes, rewards, average_range=10, ax=None): """ Generate plot showing total reward accumulated in each episode. """ if ax is None: fig, ax = plt.subplots() smoothed_rewards = (conv(rewards, np.ones(average_range), mode='same') / average_range) ax.plot(range(0, n_episodes, average_range), smoothed_rewards[0:n_episodes:average_range], marker='o', linestyle='--') ax.set(xlabel='Episodes', ylabel='Total reward') def plot_performance(env, value, reward_sums): fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(16, 12)) plot_state_action_values(env, value, ax=axes[0,0]) plot_quiver_max_action(env, value, ax=axes[0,1]) plot_rewards(n_episodes, reward_sums, ax=axes[1,0]) im = plot_heatmap_max_val(env, value, ax=axes[1,1]) fig.colorbar(im) # - # --- # # # Section 1: Model-based RL # + cellView="form" # @title Video 1: Model-based RL from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="zT_legTotF0", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # The algorithms introduced in the previous tutorials are all *model-free*, as they do not require a model to use or control behavior. In this section, we will study a different class of algorithms called model-based. As we will see next, in contrast to model-free RL, model-based methods use a model to build a policy. # # But what is a model? A model (sometimes called a world model or internal model) is a representation of how the world will respond to the agent's actions. You can think of it as a representation of how the world *works*. With such a representation, the agent can simulate new experiences and learn from these simulations. This is advantageous for two reasons. First, acting in the real world can be costly and sometimes even dangerous: remember Cliff World from Tutorial 3? Learning from simulated experience can avoid some of these costs or risks. Second, simulations make fuller use of one's limited experience. To see why, imagine an agent interacting with the real world. The information acquired with each individual action can only be assimilated at the moment of the interaction. In contrast, the experiences simulated from a model can be simulated multiple times -- and whenever desired -- allowing for the information to be more fully assimilated. # ## Section 1.1 Quentin's World Environment # # In this tutorial, our RL agent will act in the Quentin's world, a 10x10 grid world. # # <img alt="QuentinsWorld" width="560" height="560" src="https://github.com/NeuromatchAcademy/course-content/blob/master/tutorials/W2D5_ReinforcementLearning/static/W2D5_Tutorial4_QuentinsWorld.png?raw=true"> # # In this environment, there are 100 states and 4 possible actions: right, up, left, and down. The goal of the agent is to move, via a series of steps, from the start (green) location to the goal (yellow) region, while avoiding the red walls. More specifically: # * The agent starts in the green state, # * Moving into one of the red states incurs a reward of -1, # * Moving into the world borders stays in the same place, # * Moving into the goal state (yellow square in the upper right corner) gives you a reward of 1, and # * Moving anywhere from the goal state ends the episode. # # Now that we have our environment and task defined, how can we solve this using a model-based RL agent? # --- # # Section 2: Dyna-Q # # In this section, we will implement Dyna-Q, one of the simplest model-based reinforcement learning algorithms. A Dyna-Q agent combines acting, learning, and planning. The first two components -- acting and learning -- are just like what we have studied previously. Q-learning, for example, learns by acting in the world, and therefore combines acting and learning. But a Dyna-Q agent also implements planning, or simulating experiences from a model--and learns from them. # # In theory, one can think of a Dyna-Q agent as implementing acting, learning, and planning simultaneously, at all times. But, in practice, one needs to specify the algorithm as a sequence of steps. The most common way in which the Dyna-Q agent is implemented is by adding a planning routine to a Q-learning agent: after the agent acts in the real world and learns from the observed experience, the agent is allowed a series of $k$ *planning steps*. At each one of those $k$ planning steps, the model generates a simulated experience by randomly sampling from the history of all previously experienced state-action pairs. The agent then learns from this simulated experience, again using the same Q-learning rule that you implemented for learning from real experience. This simulated experience is simply a one-step transition, i.e., a state, an action, and the resulting state and reward. So, in practice, a Dyna-Q agent learns (via Q-learning) from one step of **real** experience during acting, and then from k steps of **simulated** experience during planning. # # There's one final detail about this algorithm: where does the simulated experiences come from or, in other words, what is the "model"? In Dyna-Q, as the agent interacts with the environment, the agent also learns the model. For simplicity, Dyna-Q implements model-learning in an almost trivial way, as simply caching the results of each transition. Thus, after each one-step transition in the environment, the agent saves the results of this transition in a big matrix, and consults that matrix during each of the planning steps. Obviously, this model-learning strategy only makes sense if the world is deterministic (so that each state-action pair always leads to the same state and reward), and this is the setting of the exercise below. However, even this simple setting can already highlight one of Dyna-Q major strengths: the fact that the planning is done at the same time as the agent interacts with the environment, which means that new information gained from the interaction may change the model and thereby interact with planning in potentially interesting ways. # # # Since you already implemented Q-learning in the previous tutorial, we will focus here on the extensions new to Dyna-Q: the model update step and the planning step. For reference, here's the Dyna-Q algorithm that you will help implement: # --- # **TABULAR DYNA-Q** # # Initialize $Q(s,a)$ and $Model(s,a)$ for all $s \in S$ and $a \in A$. # # Loop forever: # # > (a) $S$ &larr; current (nonterminal) state <br> # > (b) $A$ &larr; $\epsilon$-greedy$(S,Q)$ <br> # > (c) Take action $A$; observe resultant reward, $R$, and state, $S'$ <br> # > (d) $Q(S,A)$ &larr; $Q(S,A) + \alpha \left[R + \gamma \max_{a} Q(S',a) - Q(S,A)\right]$ <br> # > (e) $Model(S,A)$ &larr; $R,S'$ (assuming deterministic environment) <br> # > (f) Loop repeat $k$ times: <br> # >> $S$ &larr; random previously observed state <br> # >> $A$ &larr; random action previously taken in $S$ <br> # >> $R,S'$ &larr; $Model(S,A)$ <br> # >> $Q(S,A)$ &larr; $Q(S,A) + \alpha \left[R + \gamma \max_{a} Q(S',a) - Q(S,A)\right]$ <br> # # # --- # ## Exercise 1: Dyna-Q Model Update # # In this exercise you will implement the model update portion of the Dyna-Q algorithm. More specifically, after each action that the agent executes in the world, we need to update our model to remember what reward and next state we last experienced for the given state-action pair. def dyna_q_model_update(model, state, action, reward, next_state): """ Dyna-Q model update Args: model (ndarray): An array of shape (n_states, n_actions, 2) that represents the model of the world i.e. what reward and next state do we expect from taking an action in a state. state (int): the current state identifier action (int): the action taken reward (float): the reward received next_state (int): the transitioned to state identifier Returns: ndarray: the updated model """ ############################################################### ## TODO for students: implement the model update step of Dyna-Q # Fill out function and remove raise NotImplementedError("Student exercise: implement the model update step of Dyna-Q") ############################################################### # Update our model with the observed reward and next state model[...] = ... return model # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_ReinforcementLearning/solutions/W3D4_Tutorial4_Solution_2b244095.py) # # # - # Now that we have a way to update our model, we can use it in the planning phase of Dyna-Q to simulate past experiences. # ## Exercise 2: Dyna-Q Planning # # In this exercise you will implement the other key part of Dyna-Q: planning. We will sample a random state-action pair from those we've experienced, use our model to simulate the experience of taking that action in that state, and update our value function using Q-learning with these simulated state, action, reward, and next state outcomes. Furthermore, we want to run this planning step $k$ times, which can be obtained from `params['k']`. # # For this exercise, you may use the `q_learning` function to handle the Q-learning value function update. Recall that the method signature is `q_learning(state, action, reward, next_state, value, params)` and it returns the updated `value` table. def dyna_q_planning(model, value, params): """ Dyna-Q planning Args: model (ndarray): An array of shape (n_states, n_actions, 2) that represents the model of the world i.e. what reward and next state do we expect from taking an action in a state. value (ndarray): current value function of shape (n_states, n_actions) params (dict): a dictionary containing learning parameters Returns: ndarray: the updated value function of shape (n_states, n_actions) """ ############################################################ ## TODO for students: implement the planning step of Dyna-Q # Fill out function and remove raise NotImplementedError("Student exercise: implement the planning step of Dyna-Q") ############################################################# # Perform k additional updates at random (planning) for _ in range(...): # Find state-action combinations for which we've experienced a reward i.e. # the reward value is not NaN. The outcome of this expression is an Nx2 # matrix, where each row is a state and action value, respectively. candidates = np.array(np.where(~np.isnan(model[:,:,0]))).T # Write an expression for selecting a random row index from our candidates idx = ... # Obtain the randomly selected state and action values from the candidates state, action = ... # Obtain the expected reward and next state from the model reward, next_state = ... # Update the value function using Q-learning value = ... return value # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_ReinforcementLearning/solutions/W3D4_Tutorial4_Solution_8d78a96b.py) # # # - # With a way to update our model and a means to use it in planning, it is time to see it in action. The following code sets up the our agent parameters and learning environment, then passes your model update and planning methods to the agent to try and solve Quentin's World. Notice that we set the number of planning steps $k=10$. # + # set for reproducibility, comment out / change seed value for different results np.random.seed(1) # parameters needed by our policy and learning rule params = { 'epsilon': 0.05, # epsilon-greedy policy 'alpha': 0.5, # learning rate 'gamma': 0.8, # temporal discount factor 'k': 10, # number of Dyna-Q planning steps } # episodes/trials n_episodes = 500 max_steps = 1000 # environment initialization env = QuentinsWorld() # solve Quentin's World using Dyna-Q results = learn_environment(env, dyna_q_model_update, dyna_q_planning, params, max_steps, n_episodes) value, reward_sums, episode_steps = results plot_performance(env, value, reward_sums) # - # Upon completion, we should see that our Dyna-Q agent is able to solve the task quite quickly, achieving a consistent positive reward after only a limited number of episodes (bottom left). # --- # # Section 3: How much to plan? # # Now that you implemented a Dyna-Q agent with $k=10$, we will try to understand the effect of planning on performance. How does changing the value of $k$ impact our agent's ability to learn? # # The following code is similar to what we just ran, only this time we run several experiments over several different values of $k$ to see how their average performance compares. In particular, we will choose $k \in \{0, 1, 10, 100\}$. Pay special attention to the case where $k = 0$ which corresponds to no planning. This is, in effect, just regular Q-learning. # # The following code will take a bit of time to complete. To speed things up, try lowering the number of experiments or the number of $k$ values to compare. # + # set for reproducibility, comment out / change seed value for different results np.random.seed(1) # parameters needed by our policy and learning rule params = { 'epsilon': 0.05, # epsilon-greedy policy 'alpha': 0.5, # learning rate 'gamma': 0.8, # temporal discount factor } # episodes/trials n_experiments = 10 n_episodes = 100 max_steps = 1000 # number of planning steps planning_steps = np.array([0, 1, 10, 100]) # environment initialization env = QuentinsWorld() steps_per_episode = np.zeros((len(planning_steps), n_experiments, n_episodes)) for i, k in enumerate(planning_steps): params['k'] = k for experiment in range(n_experiments): results = learn_environment(env, dyna_q_model_update, dyna_q_planning, params, max_steps, n_episodes) steps_per_episode[i, experiment] = results[2] # Average across experiments steps_per_episode = np.mean(steps_per_episode, axis=1) # Plot results fig, ax = plt.subplots() ax.plot(steps_per_episode.T) ax.set(xlabel='Episodes', ylabel='Steps per episode', xlim=[20, None], ylim=[0, 160]) ax.legend(planning_steps, loc='upper right', title="Planning steps"); # - # After an initial warm-up phase of the first 20 episodes, we should see that the number of planning steps has a noticable impact on our agent's ability to rapidly solve the environment. We should also notice that after a certain value of $k$ our relative utility goes down, so it's important to balance a large enough value of $k$ that helps us learn quickly without wasting too much time in planning. # --- # # Section 4: When the world changes... # # In addition to speeding up learning about a new environment, planning can also help the agent to quickly incorporate new information about the environment into its policy. Thus, if the environment changes (e.g. the rules governing the transitions between states, or the rewards associated with each state/action), the agent doesn't need to experience that change *repeatedly* (as would be required in a Q-learning agent) in real experience. Instead, planning allows that change to be incorporated quickly into the agent's policy, without the need to experience the change more than once. # # In this final section, we will again have our agents attempt to solve Quentin's World. However, after 200 episodes, a shortcut will appear in the environment. We will test how a model-free agent using Q-learning and a Dyna-Q agent adapt to this change in the environment. # # <img alt="QuentinsWorldShortcut" width="560" height="560" src="https://github.com/NeuromatchAcademy/course-content/blob/master/tutorials/W2D5_ReinforcementLearning/static/W2D5_Tutorial4_QuentinsWorldShortcut.png?raw=true"> # # # The following code again looks similar to what we've run previously. Just as above we will have multiple values for $k$, with $k=0$ representing our Q-learning agent and $k=10$ for our Dyna-Q agent with 10 planning steps. The main difference is we now add in an indicator as to when the shortcut appears. In particular, we will run the agents for 400 episodes, with the shortcut appearing in the middle after episode #200. # # When this shortcut appears we will also let each agent experience this change once i.e. we will evaluate the act of moving upwards when in the state that is below the now-open shortcut. After this single demonstration, the agents will continue on interacting in the environment. # # + # set for reproducibility, comment out / change seed value for different results np.random.seed(1) # parameters needed by our policy and learning rule params = { 'epsilon': 0.05, # epsilon-greedy policy 'alpha': 0.5, # learning rate 'gamma': 0.8, # temporal discount factor } # episodes/trials n_episodes = 400 max_steps = 1000 shortcut_episode = 200 # when we introduce the shortcut # number of planning steps planning_steps = np.array([0, 10]) # Q-learning, Dyna-Q (k=10) # environment initialization steps_per_episode = np.zeros((len(planning_steps), n_episodes)) # Solve Quentin's World using Q-learning and Dyna-Q for i, k in enumerate(planning_steps): env = QuentinsWorld() params['k'] = k results = learn_environment(env, dyna_q_model_update, dyna_q_planning, params, max_steps, n_episodes, shortcut_episode=shortcut_episode) steps_per_episode[i] = results[2] # Plot results fig, ax = plt.subplots() ax.plot(steps_per_episode.T) ax.set(xlabel='Episode', ylabel='Steps per Episode', xlim=[20,None], ylim=[0, 160]) ax.axvline(shortcut_episode, linestyle="--", color='gray', label="Shortcut appears") ax.legend(('Q-learning', 'Dyna-Q', 'Shortcut appears'), loc='upper right'); # - # If all went well, we should see the Dyna-Q agent having already achieved near optimal performance before the appearance of the shortcut and then immediately incorporating this new information to further improve. In this case, the Q-learning agent takes much longer to fully incorporate the new shortcut. # --- # # Summary # # In this notebook, you have learned about model-based reinforcement learning and implemented one of the simplest architectures of this type, Dyna-Q. Dyna-Q is very much like Q-learning, but instead of learning only from real experience, you also learn from **simulated** experience. This small difference, however, can have huge benefits! Planning *frees* the agent from the limitation of its own environment, and this in turn allows the agent to speed-up learning -- for instance, effectively incorporating environmental changes into one's policy. # # Not surprisingly, model-based RL is an active area of research in machine learning. Some of the exciting topics in the frontier of the field involve (i) learning and representing a complex world model (i.e., beyond the tabular and deterministic case above), and (ii) what to simulate -- also known as search control -- (i.e., beyond the random selection of experiences implemented above). # # The framework above has also been used in neuroscience to explain various phenomena such as planning, memory sampling, memory consolidation, and even dreaming!
tutorials/W3D4_ReinforcementLearning/student/W3D4_Tutorial4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Optimization for Grover's Algorithm # + from qiskit import QuantumRegister, ClassicalRegister import numpy as np import matplotlib.pyplot as plt import math from qiskit.tools.visualization import circuit_drawer from qiskit.providers.aer import noise from qiskit import * from qiskit import( QuantumCircuit, execute, Aer) from qiskit.visualization import plot_histogram import pytket from pytket.qiskit import qiskit_to_tk from pytket.qiskit import tk_to_qiskit # - # ## Target Alogithm is QHT (Quantum Hough Transform). # + """ #Import necessary libraries import qiskit from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute, Aer, IBMQ, transpile, execute from qiskit.visualization import plot_state_hinton, plot_histogram, plot_bloch_vector, plot_state_qsphere from qiskit.providers.ibmq import least_busy import numpy as np %config InlineBackend.figure_format = 'svg' # Makes the images look nice %matplotlib inline """ #Configuration of Initial Parameters n = 4 #number of qubits --- must be a power of 2. N = 2**n M = 2 #number of solutions to mark solution_known = True phi_estimated = 0.0 #enter the estimated value to start #Calculate Phase-Shift angle and Number of Iterations beta = np.arcsin(np.sqrt(M/N)) J = np.floor(((np.pi/2) - beta)/beta) + 1 phi_val = 2*np.arcsin(np.sin(np.pi/(4*J + 2))/np.sin(beta)) J = int(J) print("Number of Qubits, n \t\t= ", n) print("Number of Marked Solutions, M \t= ", M, "\n") print("Phase-Shift Parameter, φ \t= ", phi_val) print("Number of Iternations, J \t= ", J) print("β value \t\t\t= ", beta) if solution_known == False: phi_val = phi_estimated else: pass # + #Quantum Circuit Definition qc = QuantumCircuit(n) #Created equal superposition for i in range(n): qc.h(i) for k in range(1): #O_operator(): #Even marked state = 254 qc.x(1) qc.x(2) #qc.x(3) qc.mcx([1,2,3],0) #qc.x(1) #qc.x(2) #qc.x(3) #----- #qc.x(1) #qc.x(2) #qc.x(3) qc.mcu1(phi_val, [1,2,3], 0) #multi-control phase #qc.x(1) #qc.x(2) #qc.x(3) #----- #qc.x(1) #qc.x(2) #qc.x(3) qc.mcx([1,2,3],0) #qc.x(1) #qc.x(2) #qc.x(3) #Odd marked state = 1 #qc.x(1) #qc.x(2) qc.x(3) qc.mcu1(phi_val, [1,2,3], 0) #multi-control phase qc.x(1) qc.x(2) qc.x(3) #odd-values #qc.append() #even-values #qc.append() #W_inverse_operator #just use the default inverse() function to make W^-1 operartor for i in range(n): qc.h(i) """ qc.barrier() #def Io_operator(): qc.x(1) qc.x(2) qc.x(3) qc.mcx([1,2,3],0) qc.x(1) qc.x(2) qc.x(3) #----- qc.x(1) qc.x(2) qc.x(3) qc.mcu1(phi_val, [1,2,3], 0) #multi-control phase qc.x(1) qc.x(2) qc.x(3) #----- qc.x(1) qc.x(2) qc.x(3) qc.mcx([1,2,3],0) qc.x(1) qc.x(2) qc.x(3) """ #W_operator() #for i in range(n): #qc.h(i) #qc.barrier() #qc.measure_all() qc.draw(output='mpl') # + #Quantum Circuit Definition n=6 qc = QuantumCircuit(n) #Created equal superposition for i in range(4): qc.h(i+2) for k in range(1): qc.x(3) qc.x(5) qc.ccx(3,4,1) qc.ccx(5,1,0) qc.cx(0,2) qc.ccx(5,1,0) qc.ccx(3,4,1) qc.ccx(3,4,1) qc.ccx(5,1,0) qc.mcu1(phi_val,[0],2) qc.ccx(5,1,0) qc.ccx(3,4,1) qc.ccx(3,4,1) qc.ccx(5,1,0) qc.cx(0,2) qc.ccx(5,1,0) qc.ccx(3,4,1) qc.x(5) qc.ccx(3,4,1) qc.ccx(5,1,0) qc.mcu1(phi_val,[0],2) qc.ccx(5,1,0) qc.ccx(3,4,1) qc.barrier() qc.x(3) qc.x(4) qc.x(5) for i in range(4): qc.h(i+2) qc.draw(output='mpl') # + #Quantum Circuit Definition n=6 qc = QuantumCircuit(n) #Created equal superposition for i in range(4): qc.h(i+2) for k in range(1): qc.x(3) qc.x(5) qc.ccx(3,4,1) qc.ccx(5,1,0) qc.cx(0,2) qc.mcu1(phi_val,[0],2) qc.cx(0,2) qc.ccx(5,1,0) qc.x(5) qc.ccx(5,1,0) qc.mcu1(phi_val,[0],2) qc.ccx(5,1,0) qc.ccx(3,4,1) qc.barrier() qc.x(3) qc.x(4) qc.x(5) for i in range(4): qc.h(i+2) qc.draw(output='mpl') # + #Quantum Circuit Definition qc = QuantumCircuit(n) #Created equal superposition for i in range(n): qc.h(i) for k in range(J): #O_operator(): #Even marked state = 254 qc.x(1) qc.x(2) #qc.x(3) qc.mcx([1,2,3],0) qc.x(1) qc.x(2) #qc.x(3) #----- qc.x(1) qc.x(2) #qc.x(3) qc.mcu1(phi_val, [1,2,3], 0) #multi-control phase qc.x(1) qc.x(2) #qc.x(3) #----- qc.x(1) qc.x(2) #qc.x(3) qc.mcx([1,2,3],0) qc.x(1) qc.x(2) #qc.x(3) #Odd marked state = 1 qc.x(1) qc.x(2) qc.x(3) qc.mcu1(phi_val, [1,2,3], 0) #multi-control phase qc.x(1) qc.x(2) qc.x(3) #odd-values #qc.append() #even-values #qc.append() #W_inverse_operator #just use the default inverse() function to make W^-1 operartor for i in range(n): qc.h(i) qc.barrier() #def Io_operator(): qc.x(1) qc.x(2) qc.x(3) qc.mcx([1,2,3],0) qc.x(1) qc.x(2) qc.x(3) #----- qc.x(1) qc.x(2) qc.x(3) qc.mcu1(phi_val, [1,2,3], 0) #multi-control phase qc.x(1) qc.x(2) qc.x(3) #----- qc.x(1) qc.x(2) qc.x(3) qc.mcx([1,2,3],0) qc.x(1) qc.x(2) qc.x(3) #W_operator() for i in range(n): qc.h(i) qc.barrier() qc.measure_all() qc.draw(output='mpl')
sample/QuatumHoughTransform_sample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 (seaborn-dev) # language: python # name: seaborn-dev # --- # + active="" # .. _introduction: # # .. currentmodule:: seaborn # # An introduction to seaborn # ========================== # # .. raw:: html # # <div class=col-md-9> # # Seaborn is a library for making statistical graphics in Python. It is built on top of `matplotlib <https://matplotlib.org/>`_ and closely integrated with `pandas <https://pandas.pydata.org/>`_ data structures. # # Here is some of the functionality that seaborn offers: # # - A dataset-oriented API for examining :ref:`relationships <scatter_bubbles>` between :ref:`multiple variables <faceted_lineplot>` # - Specialized support for using categorical variables to show :ref:`observations <jitter_stripplot>` or :ref:`aggregate statistics <pointplot_anova>` # - Options for visualizing :ref:`univariate <distplot_options>` or :ref:`bivariate <joint_kde>` distributions and for :ref:`comparing <horizontal_boxplot>` them between subsets of data # - Automatic estimation and plotting of :ref:`linear regression <anscombes_quartet>` models for different kinds :ref:`dependent <logistic_regression>` variables # - Convenient views onto the overall :ref:`structure <scatterplot_matrix>` of complex datasets # - High-level abstractions for structuring :ref:`multi-plot grids <faceted_histogram>` that let you easily build :ref:`complex <pair_grid_with_kde>` visualizations # - Concise control over matplotlib figure styling with several :ref:`built-in themes <aesthetics_tutorial>` # - Tools for choosing :ref:`color palettes <palette_tutorial>` that faithfully reveal patterns in your data # # Seaborn aims to make visualization a central part of exploring and understanding data. Its dataset-oriented plotting functions operate on dataframes and arrays containing whole datasets and internally perform the necessary semantic mapping and statistical aggregation to produce informative plots. # # Here's an example of what this means: # + tags=["hide"] # %matplotlib inline # - import seaborn as sns sns.set() tips = sns.load_dataset("tips") sns.relplot(x="total_bill", y="tip", col="time", hue="smoker", style="smoker", size="size", data=tips); # + active="" # A few things have happened here. Let's go through them one by one: # # 1. We import seaborn, which is the only library necessary for this simple example. # + tags=["hide-output"] import seaborn as sns # + active="" # Behind the scenes, seaborn uses matplotlib to draw plots. Many tasks can be accomplished with only seaborn functions, but further customization might require using matplotlib directly. This is explained in more detail :ref:`below <intro_plot_customization>`. For interactive work, it's recommended to use a Jupyter/IPython interface in `matplotlib mode <https://ipython.readthedocs.io/en/stable/interactive/plotting.html>`_, or else you'll have to call :ref:`matplotlib.pyplot.show` when you want to see the plot. # # 2. We apply the default default seaborn theme, scaling, and color palette. # + tags=["hide-output"] sns.set() # + active="" # This uses the `matplotlib rcParam system <https://matplotlib.org/users/customizing.html>`_ and will affect how all matplotlib plots look, even if you don't make them with seaborn. Beyond the default theme, there are :ref:`several other options <aesthetics_tutorial>`, and you can independently control the style and scaling of the plot to quickly translate your work between presentation contexts (e.g., making a plot that will have readable fonts when projected during a talk). If you like the matplotlib defaults or prefer a different theme, you can skip this step and still use the seaborn plotting functions. # # 3. We load one of the example datasets. # + tags=["hide-output"] tips = sns.load_dataset("tips") # + active="" # Most code in the docs will use the :func:`load_dataset` function to get quick access to an example dataset. There's nothing particularly special about these datasets; they are just pandas dataframes, and we could have loaded them with :ref:`pandas.read_csv` or build them by hand. Many examples use the "tips" dataset, which is very boring but quite useful for demonstration. The tips dataset illustrates the "tidy" approach to organizing a dataset. You'll get the most out of seaborn if your datasets are organized this way, and it is explained in more detail :ref:`below <intro_tidy_data>`. # # 4. We draw a faceted scatter plot with multiple semantic variables. # + tags=["hide-output"] sns.relplot(x="total_bill", y="tip", col="time", hue="smoker", style="smoker", size="size", data=tips) # + active="" # This particular plot shows the relationship between five variables in the tips dataset. Three are numeric, and two are categorical. Two numeric variables (``total_bill`` and ``tip``) determined the position of each point on the axes, and the third (``size``) determined the size of each point. One categorical variable split the dataset onto two different axes (facets), and the other determined the color and shape of each point. # # All of this was accomplished using a single call to the seaborn function :func:`relplot`. Notice how we only provided the names of the variables in the dataset and the roles that we wanted them to play in the plot. Unlike when using matplotlib directly, it wasn't necessary to translate the variables into parameters of the visualization (e.g., the specific color or marker to use for each category). That translation was done automatically by seaborn. This lets the user stay focused on the question they want the plot to answer. # # .. _intro_api_abstraction: # # API abstraction across visualizations # ------------------------------------- # # There is no universal best way to visualize data. Different questions are best answered by different kinds of visualizations. Seaborn tries to make it easy to switch between different visual representations that can be parameterized with the same dataset-oriented API. # # The function :func:`relplot` is named that way because it is designed to visualize many different statistical *relationships*. While scatter plots are a highly effective way of doing this, relationships where one variable represents a measure of time are better represented by a line. The :func:`relplot` function has a convenient ``kind`` parameter to let you easily switch to this alternate representation: # - dots = sns.load_dataset("dots") sns.relplot(x="time", y="firing_rate", col="align", hue="choice", size="coherence", style="choice", facet_kws=dict(sharex=False), kind="line", legend="full", data=dots); # + active="" # Notice how the ``size`` and ``style`` parameters are shared across the scatter and line plots, but they affect the two visualizations differently (changing marker area and symbol vs line width and dashing). We did not need to keep those details in mind, letting us focus on the overall structure of the plot and the information we want it to convey. # # .. _intro_stat_estimation: # # Statistical estimation and error bars # ------------------------------------- # # Often we are interested in the average value of one variable as a function of other variables. Many seaborn functions can automatically perform the statistical estimation that is necessary to answer these questions: # - fmri = sns.load_dataset("fmri") sns.relplot(x="timepoint", y="signal", col="region", hue="event", style="event", kind="line", data=fmri); # + active="" # When statistical values are estimated, seaborn will use bootstrapping to compute confidence intervals and draw error bars representing the uncertainty of the estimate. # # Statistical estimation in seaborn goes beyond descriptive statistics. For example, it is also possible to enhance a scatterplot to include a linear regression model (and its uncertainty) using :func:`lmplot`: # - sns.lmplot(x="total_bill", y="tip", col="time", hue="smoker", data=tips); # + active="" # .. _intro_categorical: # # Specialized categorical plots # ----------------------------- # # Standard scatter and line plots visualize relationships between numerical variables, but many data analyses involve categorical variables. There are several specialized plot types in seaborn that are optimized for visualizing this kind of data. They can be accessed through :func:`catplot`. Similar to :func:`relplot`, the idea of :func:`catplot` is that it exposes a common dataset-oriented API that generalizes over different representations of the relationship between one numeric variable and one (or more) categorical variables. # # These representations offer different levels of granularity in their presentation of the underlying data. At the finest level, you may wish to see every observation by drawing a scatter plot that adjusts the positions of the points along the categorical axis so that they don't overlap: # - sns.catplot(x="day", y="total_bill", hue="smoker", kind="swarm", data=tips); # + active="" # Alternately, you could use kernel density estimation to represent the underlying distribution that the points are sampled from: # - sns.catplot(x="day", y="total_bill", hue="smoker", kind="violin", split=True, data=tips); # + active="" # Or you could show the only mean value and its confidence interval within each nested category: # - sns.catplot(x="day", y="total_bill", hue="smoker", kind="bar", data=tips); # + active="" # .. _intro_func_types: # # Figure-level and axes-level functions # ------------------------------------- # # How do these tools work? It's important to know about a major distinction between seaborn plotting functions. All of the plots shown so far have been made with "figure-level" functions. These are optimized for exploratory analysis because they set up the matplotlib figure containing the plot(s) and make it easy to spread out the visualization across multiple axes. They also handle some tricky business like putting the legend outside the axes. To do these things, they use a seaborn :class:`FacetGrid`. # # Each different figure-level plot ``kind`` combines a particular "axes-level" function with the :class:`FacetGrid` object. For example, the scatter plots are drawn using the :func:`scatterplot` function, and the bar plots are drawn using the :func:`barplot` function. These functions are called "axes-level" because they draw onto a single matplotlib axes and don't otherwise affect the rest of the figure. # # The upshot is that the figure-level function needs to control the figure it lives in, while axes-level functions can be combined into a more complex matplotlib figure with other axes that may or may not have seaborn plots on them: # - import matplotlib.pyplot as plt f, axes = plt.subplots(1, 2, sharey=True, figsize=(6, 4)) sns.boxplot(x="day", y="tip", data=tips, ax=axes[0]) sns.scatterplot(x="total_bill", y="tip", hue="day", data=tips, ax=axes[1]); # + active="" # Controlling the size of the figure-level functions works a little bit differently than it does for other matplotlib figures. Instead of setting the overall figure size, the figure-level functions are parameterized by the size of each facet. And instead of setting the height and width of each facet, you control the height and *aspect* ratio (ratio of width to height). This parameterization makes it easy to control the size of the graphic without thinking about exactly how many rows and columns it will have, although it can be a source of confusion: # - sns.relplot(x="time", y="firing_rate", col="align", hue="choice", size="coherence", style="choice", height=4.5, aspect=2 / 3, facet_kws=dict(sharex=False), kind="line", legend="full", data=dots); # + active="" # The way you can tell whether a function is "figure-level" or "axes-level" is whether it takes an ``ax=`` parameter. You can also distinguish the two classes by their output type: axes-level functions return the matplotlib ``axes``, while figure-level functions return the :class:`FacetGrid`. # # # .. _intro_dataset_funcs: # # Visualizing dataset structure # ----------------------------- # # There are two other kinds of figure-level functions in seaborn that can be used to make visualizations with multiple plots. They are each oriented towards illuminating the structure of a dataset. One, :func:`jointplot`, focuses on a single relationship: # - iris = sns.load_dataset("iris") sns.jointplot(x="sepal_length", y="petal_length", data=iris); # + active="" # The other, :func:`pairplot`, takes a broader view, showing all pairwise relationships and the marginal distributions, optionally conditioned on a categorical variable : # - sns.pairplot(data=iris, hue="species"); # + active="" # Both :func:`jointplot` and :func:`pairplot` have a few different options for visual representation, and they are built on top of classes that allow more thoroughly customized multi-plot figures (:class:`JointGrid` and :class:`PairGrid`, respectively). # # .. _intro_plot_customization: # # Customizing plot appearance # --------------------------- # # The plotting functions try to use good default aesthetics and add informative labels so that their output is immediately useful. But defaults can only go so far, and creating a fully-polished custom plot will require additional steps. Several levels of additional customization are possible. # # The first way is to use one of the alternate seaborn themes to give your plots a different look. Setting a different theme or color palette will make it take effect for all plots: # - sns.set(style="ticks", palette="muted") sns.relplot(x="total_bill", y="tip", col="time", hue="smoker", style="smoker", size="size", data=tips); # + active="" # For figure-specific customization, all seaborn functions accept a number of optional parameters for switching to non-default semantic mappings, such as different colors. (Appropriate use of color is critical for effective data visualization, and seaborn has :ref:`extensive support <palette_tutorial>` for customizing color palettes). # # Finally, where there is a direct correspondence with an underlying matplotlib function (like :func:`scatterplot` and ``plt.scatter``), additional keyword arguments will be passed through to the matplotlib layer: # - sns.relplot(x="total_bill", y="tip", col="time", hue="size", style="smoker", size="size", palette="YlGnBu", markers=["D", "o"], sizes=(10, 125), edgecolor=".2", linewidth=.5, alpha=.75, data=tips); # + active="" # In the case of :func:`relplot` and other figure-level functions, that means there are a few levels of indirection because :func:`relplot` passes its exta keyword arguments to the underlying seaborn axes-level function, which passes *its* extra keyword arguments to the underlying matplotlib function. So it might take some effort to find the right documentation for the parameters you'll need to use, but in principle an extremely detailed level of customization is possible. # # Some customization of figure-level functions can be accomplished through additional parameters that get passed to :class:`FacetGrid`, and you can use the methods on that object to control many other properties of the figure. For even more tweaking, you can access the matplotlib objects that the plot is drawn onto, which are stored as attributes: # - g = sns.catplot(x="total_bill", y="day", hue="time", height=3.5, aspect=1.5, kind="box", legend=False, data=tips); g.add_legend(title="Meal") g.set_axis_labels("Total bill ($)", "") g.set(xlim=(0, 60), yticklabels=["Thursday", "Friday", "Saturday", "Sunday"]) g.despine(trim=True) g.fig.set_size_inches(6.5, 3.5) g.ax.set_xticks([5, 15, 25, 35, 45, 55], minor=True); plt.setp(g.ax.get_yticklabels(), rotation=30); # + active="" # Because the figure-level functions are oriented towards efficient exploration, using them to manage a figure that you need to be precisely sized and organized may take more effort than setting up the figure directly in matplotlib and using the corresponding axes-level seaborn function. Matplotlib has a comprehensive and powerful API; just about any attribute of the figure can be changed to your liking. The hope is that a combination of seaborn's high-level interface and matplotlib's deep customizability will allow you to quickly explore your data and create graphics that can be tailored into a `publication quality <https://github.com/wagnerlabpapers/Waskom_PNAS_2017>`_ final product. # # .. _intro_tidy_data: # # Organizing datasets # ------------------- # # As mentioned above, seaborn will be most powerful when your datasets have a particular organization. This format is alternately called "long-form" or "tidy" data and is described in detail by <NAME> in this `academic paper <http://vita.had.co.nz/papers/tidy-data.html>`_. The rules can be simply stated: # # 1. Each variable is a column # 2. Each observation is a row # # A helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a "variable" is something that will be assigned a role in the plot. It may be useful to look at the example datasets and see how they are structured. For example, the first five rows of the "tips" dataset look like this: # - tips.head() # + active="" # In some domains, the tidy format might feel awkward at first. Timeseries data, for example, are sometimes stored with every timepoint as part of the same observational unit and appearing in the columns. The "fmri" dataset that we used :ref:`above <intro_stat_estimation>` illustrates how a tidy timeseries dataset has each timepoint in a different row: # - fmri.head() # + active="" # Many seaborn functions can plot wide-form data, but only with limited functionality. To take advantage of the features that depend on tidy-formatted data, you'll likely find the ``pandas.melt`` function useful for "un-pivoting" a wide-form dataframe. More information and useful examples can be found `in this blog post <https://tomaugspurger.github.io/modern-5-tidy.html>`_ by one of the pandas developers. # # .. _intro_next_steps: # # Next steps # ---------- # # You have a few options for where to go next. You might first want to learn how to :ref:`install seaborn <installing>`. Once that's done, you can browse the :ref:`example gallery <example_gallery>` to get a broader sense for what kind of graphics seaborn can produce. Or you can read through the :ref:`official tutorial <tutorial>` for a deeper discussion of the different tools and what they are designed to accomplish. If you have a specific plot in mind and want to know how to make it, you could check out the :ref:`API reference <api_ref>`, which documents each function's parameters and shows many examples to illustrate usage. # + active="" # .. raw:: html # # </div>
doc/introduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Setup # # ## Importing libraries for analysis # # In this example, we'll run through some examples of mining publicly available NF drug screening data for interesting features. First, we'll setup our environment. Then, we'll get the data, and then we'll explore it. # # To explore the data, we will use a dimensionality reduction technique called umap. This is available both as an R and Python package. In this markdown, we'll use the Python implementation `umap`. # # + import synapseclient syn = synapseclient.Synapse() import os os.environ['KMP_DUPLICATE_LIB_OK']='TRUE' ##required to keep umap from crashing notebook import numpy as np import pandas as pd import seaborn as sns import umap.umap_ as umap import matplotlib.pyplot as plt syn.login() # - import feather # ## Prepare the drug-target dataset # # We've already done the work of mapping NF drug screening IDs together with target data, so we just need to filter down to get a list of targets. We'll assume everything with a pChEMBL >6 is a "real" target. First, we'll get the drug IDs and targets for those drugs. This will also map in human-readable drug names. If you'd like other synonyms, check out [this Synapse table](https://www.synapse.org/#!Synapse:syn17090820/tables/). # + targetspath = syn.get('syn20700199') targets = feather.read_dataframe(targetspath.path) # + targets_filt = (targets .query('mean_pchembl > 6') .filter(["internal_id", "hugo_gene", "std_name"]) .drop_duplicates()) pd.DataFrame.head(targets_filt) # - # # # ## Prepare the drug screening data # # In these experiments, researchers treat tumors (or cell lines) with drugs or experimental molecules to figure out whether these tumors are sensitive to these drugs. These compounds have molecular _targets_ that the drug binds to and activates or inactivates to affect a biological pathway in the tumors. We'll take a systematic look at all of these to see if a single NF tumor type - plexiform neurofibroma - is sensitive to any particular drug targets. # # This data is standardized across all public NF studies, and a variety of drug metrics were calculated each experiment. These metrics, like IC50, summarize the effect of a drug on a cell line. You can read more about dose-response metrics like IC50s [here](https://www.graphpad.com/support/faq/50-of-what-how-exactly-are-ic50-and-ec50-defined/). # # There are a _lot_ of diverse measurements in this dataset, so we should do a bit to clean up the data and narrow the scope, and eliminate metrics that might not be that accurate. # # Specificially, we will: # # -get the dataset # # -filter for Simpson AUC. AUC is one measure of cell viability. The lower the cell viability across a range of doses, the lower the AUC. We can use this value because all of the dose-ranges are the same. Note that comparing across different doses can be more difficult. # # -filter for NF1 deficient pNF cell lines # # -calculate the median AUC for each drug across all cell lines, when we have an AUC for at least three cell lines for a given drug. # # -select only the information we need # # -`inner_join` on the target dataset to filter out any drugs that we don't have target info for. # # - binarize the target dataset after it has been reduced to only the drugs we can evaluate # # + drug_data_path = syn.get("syn20700260").path drug_data = pd.read_csv(drug_data_path, low_memory=False) pd.DataFrame.head(drug_data) # + pnf = ["ipNF05.5", "ipNF06.2A", "ipNF95.11b C/T", "ipnNF95.11C", "ipNF95.6", "ipNF05.5 (mixed clone)", "ipNF95.11b C"] drug_data_pnf = (drug_data .query('response_type == "AUC_Simpson"') .query('model_name == @pnf') .groupby('drug_screen_id').filter(lambda x: len(x)==1) .assign(response = lambda x: [100 if a >= 100 else a for a in x['response']])) pd.DataFrame.head(drug_data_pnf) ax = sns.boxplot(x = "model_name", y = "response", data = drug_data_pnf) ax = ax.set_xticklabels(ax.get_xticklabels(),rotation=90) ax # + drug_data_filt = (drug_data_pnf .groupby('DT_explorer_internal_id').filter(lambda x: len(x)>3) .filter(['DT_explorer_internal_id', 'response','model_name']) .groupby(['DT_explorer_internal_id','model_name'],as_index = False).median()) target_data_filt = (drug_data_pnf .merge(targets, left_on = "DT_explorer_internal_id", right_on = "internal_id", how = "inner") .filter(["DT_explorer_internal_id", "hugo_gene"]) .assign(foo = 1) .pivot_table(values = 'foo', index = 'DT_explorer_internal_id', columns = "hugo_gene", fill_value = 0)) # - pd.DataFrame.head(target_data_filt) pd.DataFrame.head(drug_data_filt) # This dataset has many columns, here's some info about each: # model_name: the name of the cells tested in the experiment # model_type: always a cell line for this data # cellosaurus_id: if cell line is known to cellosaurus, the ID is here # organism_name: the organism of the model_name # disease_name: the disease modeled by the model # disease_efo_id: the experimental factor ontology ID for the disease # symptom_name: the symptom modeled by the model # symptom_efo_id: the experimental factor ontology ID for the symptom # experiment_synapse_id: the source synapse ID for this observation # study_synapse_id: the source synapse project for this observation # funder: who funded the work # drug_name: the source's name/ID for the drug # DT_explorer_internal_id: maps to the internal_id at synapse.org/dtexplorer - can be used to map targets and synonyms of names # dosage_unit: the unit of the dosage range # drug_screen_id: a unique ID for each dose-response curve # dosage: the range of doses tested # response_type: the summary metric this row captures # response: the value of the summary metric # response_unit: if applicable, a unit for the response value # # # Dose-response visualization # # To get a sense of what this data usually looks like, we can take a look at some specific drugs. mTOR inhibitors are a class of drugs known to inhibit plexiform neurofibromas. KIF11 inhibitors are not associated with this (as far as we know). # # Let's plot a heatmap of the AUCs of all of the MTOR and KIF11 inhibitors in our dataset: # # + mtor = (targets .query('hugo_gene == "MTOR"') .filter(["internal_id", "std_name"]) .drop_duplicates() .assign(target = "mTOR")) kif11 = (targets .query('hugo_gene == "KIF11"') .filter(["internal_id", "std_name"]) .drop_duplicates() .assign(target = "KIF11")) mtor_kif11 = (pd.concat([mtor, kif11])) mtor_kif11 = mtor_kif11[mtor_kif11['internal_id'].isin(drug_data_filt['DT_explorer_internal_id'])] drug_data_filt_heatmap = drug_data_filt[drug_data_filt['DT_explorer_internal_id'].isin(mtor_kif11['internal_id'])] drug_data_filt_heatmap = (drug_data_filt_heatmap .filter(['model_name', 'response', 'DT_explorer_internal_id']) .join(mtor_kif11.set_index('internal_id'), on='DT_explorer_internal_id') .drop(['DT_explorer_internal_id'], axis = 1) .query('std_name != "TACROLIMUS"') .pivot_table(values = 'response', index = 'std_name', columns = "model_name", fill_value = np.nan)) sns.heatmap(drug_data_filt_heatmap, square = True, robust = True, cmap = sns.cm.vlag_r) # - # It looks like KIF11 targeting drugs and MTOR targeting drugs have a mixture of effects based on the molecule and cell line. Most of these molecules have many targets, not just one, and there's lots of overlap between molecules. It might be better to map the space of all targets of a drug rather than just look at one at a time. One approach to doing this is to use a dimensionality reduction technique. # # ## Dimensionality Reduction # # To do this, we can take the binarized target matrix (`target_data_filt`) and use UMAP (`umap`) to reduce the dimensionality of the matrix. This allows us to visualize the grouping. You can learn more about how umap works in the [umap-learn documentation](https://umap-learn.readthedocs.io/en/latest/how_umap_works.html), but it's sufficient for this markdown to simply understand that we are taking a high-dimension space (drugs with many possible targets) and representing this in two dimensions, similarly to principal components analysis (PCA) or t-stochastic neighbor embedding (t-SNE). # # + fit = umap.UMAP( n_neighbors=3, n_components=2, metric='euclidean' ) u = fit.fit_transform(target_data_filt) # - # Here's a quick plot of the results: plt.scatter(u[:, 0], u[:, 1]) plt.gca().set_aspect('equal', 'datalim') plt.title('UMAP projection of the compound-target dataset', fontsize=15); # ## Plot results # # That plot is helpful, but let's layer some more information in to make it more informative. # # First, prep and map together data so we have median AUC and standard names available to plot: # + df = pd.DataFrame(u) drugs = target_data_filt.index df.columns = ['x', 'y'] df['internal_id'] = drugs std_names = (targets .filter(['internal_id', 'std_name']) .drop_duplicates()) drug_data_pnf_median = (drug_data_pnf .query('model_name != "ipNF95.11b C"') .groupby('DT_explorer_internal_id').filter(lambda x: len(x)>3) .filter(['DT_explorer_internal_id', 'response']) .groupby(['DT_explorer_internal_id'],as_index = False).median()) df = (df .merge(std_names, how = 'left') .merge(drug_data_pnf_median, left_on = 'internal_id', right_on = "DT_explorer_internal_id", how = "left") .sort_values(['response'], ascending = True)) # - # Then, plot using `matplotlib`. Here, we're looking at the 2 dimensions generated by UMAP. Each point is a drug, it's position is determined by it's target profile, and we've colored each by the AUC. Also, let's label the some of the most potent drugs (AUC < 30). # + from adjustText import adjust_text plt.figure(figsize=(20,20)) plt.scatter(df.x, df.y, c = df.response, cmap = 'PuOr') plt.gca().set_aspect('equal', 'datalim') plt.title('UMAP projection of the compound-target dataset', fontsize=15); df_short = (df .query('response < 30') .reset_index(drop = True)) texts = [plt.text(df_short.x[i], df_short.y[i], df_short.std_name[i], ha='center', va='center') for i in range(len(df_short.x))] adjust_text(texts, arrowprops=dict(arrowstyle='->', color='red')) # - # ## Clustering # # What if instead of single drugs, we want to identify groups of drugs with a low AUC? # # Well, we can use `umap` to reduce the dimensionality of the drug-target data, and then cluster these reduced dimensions using a method like HDBSCAN. So let's do that. I've taken the suggestion of the umap-learn documentation and doubled the number of nearest neighbors from before, (3 to 6). I've also moved from 2 components for nicer visualization, to 3 for some more separation. I tried several numbers of components between 3 and 50, and found that anything above three resulted in too many small clusters to be useful. # # Then, simply cluster the x/y data with `hdbscan`. # + fit = umap.UMAP( n_neighbors=6, n_components=3, metric='euclidean' ) u = fit.fit_transform(target_data_filt) import hdbscan import sklearn.cluster as cluster from sklearn.metrics import adjusted_rand_score, adjusted_mutual_info_score df = pd.DataFrame(u) df.columns = ['x', 'y', 'z'] labels = hdbscan.HDBSCAN( min_cluster_size=3 ).fit(df) # - # Here's a dendrogram showing the breakdown of the various clusters identified: labels.single_linkage_tree_.plot() # We can also color the individual clusters as defined by HDBSCAN. There are lots of very close clusters, so this isn't the most straightforward visualization, but you can get a general sense that the color of the cluster maps closely to the magnitude of the components for each point - i.e. clusters are spatially visible. # + df['labels'] = labels.labels_ plt.figure(figsize=(20,20)) plt.scatter(df.x, df.y, c = df.labels, cmap = 'gist_rainbow') plt.gca().set_aspect('equal', 'datalim') plt.title('UMAP projection of the compound-target dataset', fontsize=15); # - # Finally, we can select for only the best clusters and plot some labels see what drugs are in them Here, I'm filtering only for clusters that have a median AUC < 40 across all cell lines and drugs for that cluster. An important note - this is the _median AUC_ for each cluster, so some of these drugs may have had no effect, and that could be masked by the overall cluster average. # + active="" # df['internal_id'] = drugs # # drug_data_pnf_clustered = (drug_data_pnf # .merge(df, right_on = 'internal_id', left_on = "DT_explorer_internal_id", how = "right") # .filter(['labels','response']) # .groupby(['labels'],as_index = False).median()) # # # df_short = (df # .merge(drug_data_pnf_clustered, how = "left") # .sort_values(['response'], ascending = True) # .query('response < 40') # .merge(std_names, how = 'left') # .reset_index(drop = True)) # # plt.figure(figsize=(20,20)) # plt.scatter(df_short.x, df_short.y, c = df_short.labels, cmap = 'PuOr') # plt.gca().set_aspect('equal', 'datalim') # plt.title('UMAP projection of the compound-target dataset', fontsize=15); # # texts = [plt.text(df_short.x[i], df_short.y[i], df_short.std_name[i], ha='center', va='center') for i in range(len(df_short.x))] # adjust_text(texts, arrowprops=dict(arrowstyle='-', color='red')) # -
py_demos/2-drug-screening.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: cap_env # language: python # name: cap_env # --- # # In this notebook a simple Q learner will be trained and evaluated. The Q learner recommends when to buy or sell shares of one particular stock, and in which quantity (in fact it determines the desired fraction of shares in the total portfolio value). One initial attempt was made to train the Q-learner with multiple processes, but it was unsuccessful. # + # Basic imports import os import pandas as pd import matplotlib.pyplot as plt import numpy as np import datetime as dt import scipy.optimize as spo import sys from time import time from sklearn.metrics import r2_score, median_absolute_error from multiprocessing import Pool # %matplotlib inline # %pylab inline pylab.rcParams['figure.figsize'] = (20.0, 10.0) # %load_ext autoreload # %autoreload 2 sys.path.append('../../') import recommender.simulator as sim from utils.analysis import value_eval from recommender.agent import Agent from functools import partial # + NUM_THREADS = 1 LOOKBACK = 252*2 + 28 STARTING_DAYS_AHEAD = 20 POSSIBLE_FRACTIONS = [0.0, 1.0] # Get the data SYMBOL = 'SPY' total_data_train_df = pd.read_pickle('../../data/data_train_val_df.pkl').stack(level='feature') data_train_df = total_data_train_df[SYMBOL].unstack() total_data_test_df = pd.read_pickle('../../data/data_test_df.pkl').stack(level='feature') data_test_df = total_data_test_df[SYMBOL].unstack() if LOOKBACK == -1: total_data_in_df = total_data_train_df data_in_df = data_train_df else: data_in_df = data_train_df.iloc[-LOOKBACK:] total_data_in_df = total_data_train_df.loc[data_in_df.index[0]:] # Create many agents index = np.arange(NUM_THREADS).tolist() env, num_states, num_actions = sim.initialize_env(total_data_train_df, SYMBOL, starting_days_ahead=STARTING_DAYS_AHEAD, possible_fractions=POSSIBLE_FRACTIONS) agents = [Agent(num_states=num_states, num_actions=num_actions, random_actions_rate=0.98, random_actions_decrease=0.9999, dyna_iterations=0, name='Agent_{}'.format(i)) for i in index] # - def show_results(results_list, data_in_df, graph=False): for values in results_list: total_value = values.sum(axis=1) print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(total_value)))) print('-'*100) initial_date = total_value.index[0] compare_results = data_in_df.loc[initial_date:, 'Close'].copy() compare_results.name = SYMBOL compare_results_df = pd.DataFrame(compare_results) compare_results_df['portfolio'] = total_value std_comp_df = compare_results_df / compare_results_df.iloc[0] if graph: plt.figure() std_comp_df.plot() # ## Let's show the symbols data, to see how good the recommender has to be. print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(data_in_df['Close'].iloc[STARTING_DAYS_AHEAD:])))) # + # Simulate (with new envs, each time) n_epochs = 15 for i in range(n_epochs): tic = time() results_list = sim.simulate_period(total_data_in_df, SYMBOL, agents[0], starting_days_ahead=STARTING_DAYS_AHEAD, possible_fractions=POSSIBLE_FRACTIONS, verbose=False) toc = time() print('Epoch: {}'.format(i)) print('Elapsed time: {} seconds.'.format((toc-tic))) print('Random Actions Rate: {}'.format(agents[0].random_actions_rate)) show_results([results_list], data_in_df) # - results_list = sim.simulate_period(total_data_in_df, SYMBOL, agents[0], learn=False, starting_days_ahead=STARTING_DAYS_AHEAD, possible_fractions=POSSIBLE_FRACTIONS,) show_results([results_list], data_in_df, graph=True) # ## Let's run the trained agent, with the test set # ### First a non-learning test: this scenario would be worse than what is possible (in fact, the q-learner can learn from past samples in the test set without compromising the causality). env, num_states, num_actions = sim.initialize_env(total_data_test_df, SYMBOL, starting_days_ahead=STARTING_DAYS_AHEAD, possible_fractions=POSSIBLE_FRACTIONS) tic = time() results_list = sim.simulate_period(total_data_test_df, SYMBOL, agents[0], learn=False, starting_days_ahead=STARTING_DAYS_AHEAD, possible_fractions=POSSIBLE_FRACTIONS, verbose=False) toc = time() print('Epoch: {}'.format(i)) print('Elapsed time: {} seconds.'.format((toc-tic))) print('Random Actions Rate: {}'.format(agents[0].random_actions_rate)) show_results([results_list], data_test_df, graph=True) # ### And now a "realistic" test, in which the learner continues to learn from past samples in the test set (it even makes some random moves, though very few). env, num_states, num_actions = sim.initialize_env(total_data_test_df, SYMBOL, starting_days_ahead=STARTING_DAYS_AHEAD, possible_fractions=POSSIBLE_FRACTIONS) tic = time() results_list = sim.simulate_period(total_data_test_df, SYMBOL, agents[0], learn=True, starting_days_ahead=STARTING_DAYS_AHEAD, possible_fractions=POSSIBLE_FRACTIONS, verbose=False) toc = time() print('Epoch: {}'.format(i)) print('Elapsed time: {} seconds.'.format((toc-tic))) print('Random Actions Rate: {}'.format(agents[0].random_actions_rate)) show_results([results_list], data_test_df, graph=True) # ## What are the metrics for "holding the position"? print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(data_test_df['Close'].iloc[STARTING_DAYS_AHEAD:])))) import pickle with open('../../data/simple_q_learner.pkl', 'wb') as best_agent: pickle.dump(agents[0], best_agent)
notebooks/prod/.ipynb_checkpoints/n08_simple_q_learner-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/gabrieldepaiva/Meus_Projetos/blob/master/Instru%C3%A7%C3%B5es_Numpy.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="wLcvxmvjUgYA" colab_type="text" # ## Como usar as estatísticas do módulo Numpy e para que elas servem: # + id="R6gSpg0bU0uT" colab_type="code" colab={} import numpy as np # + id="kcwjHgl3VCbs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8b085fe5-b48c-4028-cc28-df621622b846" # Números usados nas análises desse módulo: array_exemplo = np.random.randint(0,100,10) array_exemplo # + [markdown] id="8X0tzo92VoSQ" colab_type="text" # Estatísticas usadas neste módulo: # + id="QxucrrNUV_cU" colab_type="code" colab={} # Média: np.mean(array_exemplo) print(f'Média: {np.mean(array_exemplo):.2f}') # Desvio Padrão: np.std(array_exemplo) print(f'Desvio Padrão: {np.std(array_exemplo):.2f}') # Mediana: np.median(array_exemplo) print(f'Mediana: {np.median(array_exemplo):.2f}') # Maximo: np.max(array_exemplo) print(f'Máximo: {np.max(array_exemplo):.2f}') # Mínimo: np.min(array_exemplo) print(f'Mínimo: {np.min(array_exemplo):.2f}') # Covariância: np.cov(array_exemplo) print(f'Covariância: {np.cov(array_exemplo):.2f}') # Variância: np.var(array_exemplo) print(f'Variância: {np.var(array_exemplo):.2f}') # + [markdown] id="0UWOhY_KWagh" colab_type="text" # Outras utilidades úteis: # + id="m7IMLGImWezF" colab_type="code" colab={} # Soma acumulada: array_exemplo.cumsum()
Módulos_Analise_Dados/Instruções_Numpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/dggasque/DS-Unit-2-Linear-Models/blob/master/module3-ridge-regression/LS_DS_213_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="cHYPA5n_ZdBO" colab_type="text" # Lambda School Data Science # # *Unit 2, Sprint 1, Module 3* # # --- # + [markdown] colab_type="text" id="7IXUfiQ2UKj6" # # Ridge Regression # # ## Assignment # # We're going back to our other **New York City** real estate dataset. Instead of predicting apartment rents, you'll predict property sales prices. # # But not just for condos in Tribeca... # # - [ ] Use a subset of the data where `BUILDING_CLASS_CATEGORY` == `'01 ONE FAMILY DWELLINGS'` and the sale price was more than 100 thousand and less than 2 million. # - [ ] Do train/test split. Use data from January — March 2019 to train. Use data from April 2019 to test. # - [ ] Do one-hot encoding of categorical features. # - [ ] Do feature selection with `SelectKBest`. # - [ ] Do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html). Use the scaler's `fit_transform` method with the train set. Use the scaler's `transform` method with the test set. # - [ ] Fit a ridge regression model with multiple features. # - [ ] Get mean absolute error for the test set. # - [ ] As always, commit your notebook to your fork of the GitHub repo. # # The [NYC Department of Finance](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page) has a glossary of property sales terms and NYC Building Class Code Descriptions. The data comes from the [NYC OpenData](https://data.cityofnewyork.us/browse?q=NYC%20calendar%20sales) portal. # # # ## Stretch Goals # # Don't worry, you aren't expected to do all these stretch goals! These are just ideas to consider and choose from. # # - [ ] Add your own stretch goal(s) ! # - [ ] Instead of `Ridge`, try `LinearRegression`. Depending on how many features you select, your errors will probably blow up! 💥 # - [ ] Instead of `Ridge`, try [`RidgeCV`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html). # - [ ] Learn more about feature selection: # - ["Permutation importance"](https://www.kaggle.com/dansbecker/permutation-importance) # - [scikit-learn's User Guide for Feature Selection](https://scikit-learn.org/stable/modules/feature_selection.html) # - [mlxtend](http://rasbt.github.io/mlxtend/) library # - scikit-learn-contrib libraries: [boruta_py](https://github.com/scikit-learn-contrib/boruta_py) & [stability-selection](https://github.com/scikit-learn-contrib/stability-selection) # - [_Feature Engineering and Selection_](http://www.feat.engineering/) by Kuhn & Johnson. # - [ ] Try [statsmodels](https://www.statsmodels.org/stable/index.html) if you’re interested in more inferential statistical approach to linear regression and feature selection, looking at p values and 95% confidence intervals for the coefficients. # - [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapters 1-3, for more math & theory, but in an accessible, readable way. # - [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html). # + colab_type="code" id="o9eSnDYhUGD7" colab={} # %%capture import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/' # !pip install category_encoders==2.* # If you're working locally: else: DATA_PATH = '../data/' # Ignore this Numpy warning when using Plotly Express: # FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead. import warnings warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy') # + colab_type="code" id="QJBD4ruICm1m" colab={} import pandas as pd import pandas_profiling # Read New York City property sales data df = pd.read_csv(DATA_PATH+'condos/NYC_Citywide_Rolling_Calendar_Sales.csv') # Change column names: replace spaces with underscores df.columns = [col.replace(' ', '_') for col in df] # SALE_PRICE was read as strings. # Remove symbols, convert to integer df['SALE_PRICE'] = ( df['SALE_PRICE'] .str.replace('$','') .str.replace('-','') .str.replace(',','') .astype(int) ) # + id="sKc2T4a8ZdBe" colab_type="code" colab={} # BOROUGH is a numeric column, but arguably should be a categorical feature, # so convert it from a number to a string df['BOROUGH'] = df['BOROUGH'].astype(str) # + id="srMgSjOwZdBi" colab_type="code" colab={} # Reduce cardinality for NEIGHBORHOOD feature # Get a list of the top 10 neighborhoods top10 = df['NEIGHBORHOOD'].value_counts()[:10].index # At locations where the neighborhood is NOT in the top 10, # replace the neighborhood with 'OTHER' df.loc[~df['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER' # + id="dQMvKsAbZdBn" colab_type="code" outputId="e479e149-b000-428d-de9a-5a9106b30f45" colab={"base_uri": "https://localhost:8080/", "height": 417} cond = df['BUILDING_CLASS_CATEGORY'] == '01 ONE FAMILY DWELLINGS' df = df[cond] df.dtypes # + id="E-CFW7lPQxk3" colab_type="code" colab={} df['LAND_SQUARE_FEET'] = df['LAND_SQUARE_FEET'].str.replace(',','').astype(float) # + id="CuhZ-kU-RP41" colab_type="code" outputId="cba1bcb9-dca2-4174-d859-fb44601a28f1" colab={"base_uri": "https://localhost:8080/", "height": 35} df['LAND_SQUARE_FEET'].dtypes # + id="kYVnrRkPQM1q" colab_type="code" outputId="4e0b1f9b-c9c2-4b20-d863-d1c25d61841b" colab={"base_uri": "https://localhost:8080/", "height": 391} df.head() # + id="BeuUWFKJOAvx" colab_type="code" outputId="0c1577be-06e2-44f3-99e6-1c9db3941373" colab={"base_uri": "https://localhost:8080/", "height": 417} df.isnull().sum() # + id="k5GPGofiE2OL" colab_type="code" colab={} df['SALE_DATE'] = pd.to_datetime(df['SALE_DATE'], infer_datetime_format=True) # + id="fHyUsQDOIY5K" colab_type="code" outputId="12d351a7-6472-4506-8082-0e37476f2f57" colab={"base_uri": "https://localhost:8080/", "height": 108} df['SALE_DATE'].dt.month.value_counts() # + id="vLuu4ExZhfaR" colab_type="code" colab={} cond = (df['SALE_PRICE'] > 0) & (df['SALE_PRICE'] < 2000000) df = df[cond] # + id="sfcXWvxTIvYD" colab_type="code" colab={} train = df[df['SALE_DATE'].dt.month < 4] test = df[df['SALE_DATE'].dt.month == 4] # + id="N6moKG9kWAWU" colab_type="code" outputId="57e2e92f-6da3-4575-a4bb-7e88631e91ec" colab={"base_uri": "https://localhost:8080/", "height": 408} train.describe().T # + id="qLKVtzqTJIg7" colab_type="code" outputId="aa8005d1-b086-42ce-dc1a-afcb166125e9" colab={"base_uri": "https://localhost:8080/", "height": 318} train.describe(exclude='number').T # + id="b7ZUMmIaJ-ht" colab_type="code" colab={} target = 'SALE_PRICE' high_cardinality = ['ADDRESS', 'SALE_DATE'] remove_columns = ['EASE-MENT', 'APARTMENT_NUMBER', 'BUILDING_CLASS_CATEGORY', 'TAX_CLASS_AT_TIME_OF_SALE'] # Columns are primarily NaN values or zero variation features = train.columns.drop([target] + high_cardinality + remove_columns) X_train = train[features] y_train = train[target] X_test = test[features] y_test = test[target] # + id="hfgLtP58K8VK" colab_type="code" colab={} import category_encoders as ce encoder = ce.OneHotEncoder(use_cat_names=True) X_train = encoder.fit_transform(X_train) X_test = encoder.transform(X_test) # + id="wIMblcPuLmc3" colab_type="code" outputId="54049887-d766-4c7b-939c-2611ed72fb2b" colab={"base_uri": "https://localhost:8080/", "height": 325} X_train.describe() # + id="qraMih35sozb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="c4c43711-62d4-4902-e65a-ca2a55e6898f" from sklearn.feature_selection import f_regression, SelectKBest from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error for k in range(1, len(X_train.columns)+1): print(f'{k} features') selector = SelectKBest(score_func=f_regression, k=k) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) model = LinearRegression() model.fit(X_train_selected, y_train) y_pred = model.predict(X_test_selected) mae = mean_absolute_error(y_test, y_pred) print(f'Test Mean Absolute Error: ${mae:.0f}') # + id="pNqzZVA4PSvl" colab_type="code" colab={} from sklearn.feature_selection import f_regression, SelectKBest selector = SelectKBest(score_func=f_regression, k=21) X_train_selected = selector.fit_transform(X_train, y_train) X_test_selected = selector.transform(X_test) # + id="9FB7VYVEb7WO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="6d5a5cd0-43f5-4525-e207-f33aa2d578ed" from sklearn.linear_model import Ridge, RidgeCV from sklearn.preprocessing import StandardScaler from sklearn.metrics import mean_absolute_error scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.transform(X_test) model = Ridge(alpha=100) model.fit(X_train_scaled, y_train) y_pred = model.predict(X_test_scaled) mae = mean_absolute_error(y_test, y_pred) print(f'Test Mean Absolute Error: ${mae:,.0f}') # + id="RUEE9TXEjF50" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="7f890836-0255-408a-c134-3f1f7053d9b3" model = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1e0]) model.fit(X_train_scaled, y_train) y_pred = model.predict(X_test_scaled) mae = mean_absolute_error(y_test, y_pred) print(f'Test Mean Absolute Error: ${mae:,.0f}') # + id="YEWzGbJ7tK4U" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="9f3d3c51-17ea-4831-be14-c92f170e1d1e" scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train_selected) X_test_scaled = scaler.transform(X_test_selected) model = Ridge(alpha=100) model.fit(X_train_scaled, y_train) y_pred = model.predict(X_test_scaled) mae = mean_absolute_error(y_test, y_pred) print(f'Test Mean Absolute Error: ${mae:,.0f}') # + id="74otvWqztWlJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="318316c3-0cc0-4d2e-a695-1c0eda8d4530" model = RidgeCV() model.fit(X_train_scaled, y_train) y_pred = model.predict(X_test_scaled) mae = mean_absolute_error(y_test, y_pred) print(f'Test Mean Absolute Error: ${mae:,.0f}')
module3-ridge-regression/LS_DS_213_assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # COLIN API Utils for getting filings # + import os from http import HTTPStatus import requests from colin_api.models.filing import Filing as ColinFiling from dotenv import find_dotenv, load_dotenv from flask import Flask from legal_api import db from legal_api.config import get_named_config from legal_api.models import ( Business, Filing, ) from legal_api.services import queue from legal_api.models.colin_event_id import ColinEventId # - COLIN_API = os.getenv('COLIN_API') TIMEOUT = os.getenv('TIMEOUT', 30) # + def echo_colin_api(): print (f'{COLIN_API}') def create_filing(filing_type, colin_filing, colin_event_id, corp_num): """Create legal api filing using colin filing as base""" effective_date = colin_filing['filing']['business']['foundingDate'] colin_filing['filing']['business']['identifier'] = corp_num filing = Filing(effective_date=effective_date, filing_json=colin_filing) filing._filing_type = filing_type filing.filing_date = effective_date colin_event = ColinEventId() colin_event.colin_event_id = colin_event_id filing.colin_event_ids.append(colin_event) # Override the state setting mechanism filing.skip_status_listener = True filing._status = 'PENDING' filing.source = Filing.Source.COLIN.value return filing def get_data_load_required_filing_event(legal_type, corp_num): """Determine whether corp has required filings types (incorp app or conversion ledger)""" events = get_filing_events_for_corp(legal_type, corp_num) match = next((x for x in events if x.get('filing_typ_cd') in ['OTINC', 'BEINC', 'ICORP', 'ICORU', 'ICORC', 'CONVL']), None) return match def get_filing_events_for_corp(legal_type, corp_num): """Retrieve filing events for a given corp""" colin_corp_num = corp_num if(legal_type == Business.LegalTypes.COMP.value): colin_corp_num = corp_num[-7:] r = requests.get(f'{COLIN_API}/api/v1/businesses/event/corp_num/{colin_corp_num}', timeout=TIMEOUT) if r.status_code != HTTPStatus.OK or not r.json(): return None events = dict(r.json()).get('events', []) return events def get_filing(colin_filing_type, legal_type, event_info: dict = None): # pylint: disable=redefined-outer-name """Get filing for a given event from colin""" identifier = event_info['corp_num'] event_id = event_info['event_id'] print(f'{COLIN_API}/api/v1/businesses/{legal_type}/{identifier}/filings/{colin_filing_type}?eventId={event_id}') response = requests.get( f'{COLIN_API}/api/v1/businesses/{legal_type}/{identifier}/filings/{colin_filing_type}?eventId={event_id}' ) print(response.json()) filing = dict(response.json()) return filing def get_filing_type(filing_typ_cd): """Get generic filing type """ filing_types = ColinFiling.FILING_TYPES.keys() match = next((x for x in filing_types if filing_typ_cd in ColinFiling.FILING_TYPES.get(x).get('type_code_list')), None) return match
tests/data/common/colin_api_utils.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import struct import numpy as np import matplotlib as mpl import scipy as sp import scipy.sparse from matplotlib import pyplot #Function to read from MNIST data def read(dataset = "training", path = "."): if dataset is "training": fname_img = os.path.join(path, 'train-images.idx3-ubyte') fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte') elif dataset is "testing": fname_img = os.path.join(path, 't10k-images.idx3-ubyte') fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte') else: raise ValueError("dataset must be 'testing' or 'training'") # Load everything in some numpy arrays with open(fname_lbl, 'rb') as flbl: _, _ = struct.unpack(">II", flbl.read(8)) lbl = np.fromfile(flbl, dtype=np.int8) with open(fname_img, 'rb') as fimg: _, num, rows, cols = struct.unpack(">IIII", fimg.read(16)) img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols) return (lbl, img) def show(image): """ Render a given numpy.uint8 2D array of pixel data. """ fig = pyplot.figure() ax = fig.add_subplot(1,1,1) imgplot = ax.imshow(image, cmap=mpl.cm.Greys) imgplot.set_interpolation('nearest') ax.xaxis.set_ticks_position('top') ax.yaxis.set_ticks_position('left') pyplot.show() # - #Reading Training Data and Test Data [trainLabels,trainSet]=read(dataset = "training", path = ".") [testLabels,testSet]=read(dataset = "testing", path = ".") show(trainSet[1]) print(trainLabels[1]) # + #Preprocessing Training Data and Test Data: # 1- Changing shape of each sample data from 28*28 to 1*784 # 2- Dividing each pixel magnitude in sampe by 255 trainVec=np.zeros(shape=(len(trainSet),784),dtype=float) testVec=np.zeros(shape=(len(testSet),784),dtype=float) for i in range(len(trainSet)): trainVec[i]=np.reshape(trainSet[i]/255,(1,784)) for i in range(len(testSet)): testVec[i]=np.reshape(testSet[i]/255,(1,784)) #Function to predict Class probabilities given weight vector and Sample Data #P(Y|Xi)=e^(W'Xi)/(1+sum(e^(W'Xi))) where i=0,8; P(Y|Xi)=1/(1+sum(e^(W'Xi))) where i=9 def predict(weightVectors,featureVector): fV=featureVector.reshape(1,784) classProb=np.zeros(shape=(10,1),dtype=float) classProb[0:-1]=np.exp(np.matmul(weightVectors,fV.transpose())) classProb[-1]=1 classProb=classProb/sum(classProb) return classProb #Initializing weight vector(9x784) to all 0s weightVectors=np.zeros(shape=(9,784),dtype=float) weightUpdate=np.zeros(shape=(9,784),dtype=float) runs=100 learningRate=0.4 Y=np.zeros(shape=(9,1),dtype=float) #Y contains class labels 0 to 9, which will be compared with sample class label # to return 1 or 0 for i in range(0,9): Y[i]=i; Accuracy=list() while(runs>0): runs=runs-1 #weightUpdate=np.zeros(shape=(9,784),dtype=float) count=0 for i in range(len(trainSet)): #update per sample = alpha*Xl = n*P(Yl=1 | Xl,w)*Xl alpha=learningRate*(np.equal(Y,trainLabels[i])-predict(weightVectors,trainVec[i])[0:-1])/len(trainLabels) weightUpdate=weightUpdate+alpha*trainVec[i].reshape(1,784).repeat(9,axis=0) #weightVectors=weightVectors+weightUpdate weightVectors=weightUpdate for i in range(len(testSet)): if(predict(weightVectors,testVec[i]).argmax(axis=0)==testLabels[i]): count=count+1 Accuracy.append(count/len(testLabels)) print("Run: "+str(100-runs)+"; Accuracy: "+str(count/len(testLabels))) # - Run=list() for i in range(100): Run.append(i+1) pyplot.plot(Run,Accuracy) pyplot.xlabel('No of iterations') pyplot.ylabel('Accuracy')
logisticRegression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline # plotting import matplotlib as mpl mpl.style.use('ggplot') import matplotlib.pyplot as plt # math and data manipulation import numpy as np import pandas as pd # set random seeds from numpy.random import seed from dateutil.parser import parse import seaborn as sns # modeling import keras from keras.models import Sequential from keras.layers import LSTM, Dense, Input, Bidirectional, GRU, Flatten, Dropout # progress bar from tqdm import tqdm # - train = pd.read_csv('../data/consumption_train.csv',parse_dates=['timestamp'],index_col=0) train.head(5) print (train.shape) print (train.series_id.nunique()) test = pd.read_csv('../data/cold_start_test.csv',index_col=0,parse_dates=['timestamp']) test.head(5) print (test.shape) print (test.series_id.nunique()) print (test.series_id.value_counts().describe()) print (train.timestamp.min(),train.timestamp.max(),test.timestamp.min(),test.timestamp.max()) #test.temperature = np.log(test.temperature + test.temperature.min() + .00001) sns.jointplot("temperature","consumption",data=test) plt.show() submission = pd.read_csv('../data/submission_format.csv',index_col=0,parse_dates=['timestamp']) submission.head(5) print (submission.shape) print (submission.series_id.nunique()) print (submission.series_id.value_counts().describe()) submission.groupby('series_id').prediction_window.first().value_counts() sns.distplot(test[pd.notnull(test.temperature) == True].temperature,kde=True,label='test') sns.distplot(submission[pd.notnull(submission.temperature) == True].temperature,kde=True,label='submission') plt.legend() plt.show() test.groupby('series_id').count().timestamp.divide(24).value_counts().sort_index().plot.bar() plt.show() test.groupby('series_id').count().timestamp.value_counts().sort_index().plot.bar() plt.show() def create_lagged_features(df, lag=1): if not type(df) == pd.DataFrame: df = pd.DataFrame(df, columns=['consumption']) def _rename_lag(ser, j): ser.name = ser.name + f'_{j}' return ser # add a column lagged by `i` steps for i in range(1, lag + 1): df = df.join(df.consumption.shift(i).pipe(_rename_lag, i)) df.dropna(inplace=True) return df # + from sklearn.preprocessing import MinMaxScaler def prepare_training_data(consumption_series, lag): """ Converts a series of consumption data into a lagged, scaled sample. """ # scale training data scaler = MinMaxScaler(feature_range=(0, 1)) consumption_vals = scaler.fit_transform(consumption_series.values.reshape(-1, 1)) # convert consumption series to lagged features consumption_lagged = create_lagged_features(consumption_vals, lag=lag) cols = list(consumption_lagged.columns) cols.remove('consumption') # X, y format taking the first column (original time series) to be the y X = consumption_lagged.drop('consumption', axis=1).values y = consumption_lagged.consumption.values # keras expects 3 dimensional X X = X.reshape(X.shape[0], 1, X.shape[1]) #X = X.reshape(X.shape[0], X.shape[1]) return X, y, scaler # - def prepare_training_data_without_scaling(consumption_series, lag): """ Converts a series of consumption data into a lagged, scaled sample. """ # convert consumption series to lagged features consumption_lagged = create_lagged_features(consumption_series.values, lag=lag) cols = list(consumption_lagged.columns) cols.remove('consumption') # X, y format taking the first column (original time series) to be the y X = consumption_lagged.drop('consumption', axis=1).values y = consumption_lagged.consumption.values # keras expects 3 dimensional X #X = X.reshape(X.shape[0], 1, X.shape[1]) X = X.reshape(X.shape[0], X.shape[1]) return X, y, scaler, cols # + lag = 24 # model parameters batch_size = 1 # this forces the lstm to step through each time-step one at a time batch_input_shape=(batch_size, 1, lag) # instantiate a sequential model model = Sequential() # add LSTM layer - stateful MUST be true here in # order to learn the patterns within a series #model.add(Bidirectional(lag, return_sequences=True,stateful=True, batch_input_shape = batch_input_shape)) #model.add(LSTM(12, return_sequences=True,stateful=True)) model.add(GRU(lag, stateful=True, batch_input_shape = batch_input_shape, return_sequences=True)) model.add(Dropout(.5)) model.add(GRU(lag//2, stateful=True, return_sequences=True)) model.add(Dropout(.5)) model.add(GRU(lag//4, stateful=True, return_sequences=True)) model.add(Flatten()) #model.add(LSTM(units=10, # batch_input_shape=(batch_size,23,lag), # stateful=True)) # followed by a dense layer with a single output for regression model.add(Dense(1)) # compile model.compile(loss='mean_absolute_error', optimizer='adam') # - model.summary() # + num_training_series = train.series_id.nunique() epoch = 1 for i in tqdm(range(epoch), total=epoch, desc='Learning Consumption Trends - Epoch'): count = 0 # reset the LSTM state for training on each series for ser_id, ser_data in train.groupby('series_id'): count += 1 if count%100 == 0: print ("count = {}".format(count)) # prepare the data X, y, scaler = prepare_training_data(ser_data.consumption, lag) # fit the model: note that we don't shuffle batches (it would ruin the sequence) # and that we reset states only after an entire X has been fit, instead of after # each (size 1) batch, as is the case when stateful=False model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False) model.reset_states() # - def generate_hourly_forecast(num_pred_hours, consumption, model, scaler, lag): """ Uses last hour's prediction to generate next for num_pred_hours, initialized by most recent cold start prediction. Inverts scale of predictions before return. """ # allocate prediction frame preds_scaled = np.zeros(num_pred_hours) # initial X is last lag values from the cold start X = scaler.transform(consumption.values.reshape(-1, 1))[-lag:] # forecast for i in range(num_pred_hours): # predict scaled value for next time step yhat = model.predict(X.reshape(1, 1, lag), batch_size=1)[0][0] preds_scaled[i] = yhat # update X to be latest data plus prediction X = pd.Series(X.ravel()).shift(-1).fillna(yhat).values # revert scale back to original range hourly_preds = scaler.inverse_transform(preds_scaled.reshape(-1, 1)).ravel() return hourly_preds my_submission = submission.copy() # + # %%time pred_window_to_num_preds = {'hourly': 24, 'daily': 7, 'weekly': 2} pred_window_to_num_pred_hours = {'hourly': 24, 'daily': 7 * 24, 'weekly': 2 * 7 * 24} num_test_series = my_submission.series_id.nunique() model.reset_states() for ser_id, pred_df in tqdm(my_submission.groupby('series_id'), total=num_test_series, desc="Forecasting from Cold Start Data"): # get info about this series' prediction window pred_window = pred_df.prediction_window.unique()[0] num_preds = pred_window_to_num_preds[pred_window] num_pred_hours = pred_window_to_num_pred_hours[pred_window] # prepare cold start data series_data = test[test.series_id == ser_id].consumption cold_X, cold_y, scaler = prepare_training_data(series_data, lag) # fine tune our lstm model to this site using cold start data model.fit(cold_X, cold_y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False) # make hourly forecasts for duration of pred window preds = generate_hourly_forecast(num_pred_hours, series_data, model, scaler, lag) # reduce by taking sum over each sub window in pred window reduced_preds = [pred.sum() for pred in np.split(preds, num_preds)] # store result in submission DataFrame ser_id_mask = my_submission.series_id == ser_id my_submission.loc[ser_id_mask, 'consumption'] = reduced_preds # - my_submission.tail(5) my_submission.to_csv("../data/submmission2.csv",index=False)
Energy Coldstart Forecasting/notebooks/model_bilstm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] # <img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/> # + [markdown] colab_type="text" id="view-in-github" papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] # # WorldBank - Gini index # <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/WorldBank/WorldBank_Gini_index.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a> # + [markdown] papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] # **Tags:** #worldbank #opendata #snippet #plotly # + [markdown] papermill={} tags=["naas", "awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] # **Author:** [<NAME>](https://www.linkedin.com/in/ACoAAAJHE7sB5OxuKHuzguZ9L6lfDHqw--cdnJg/) # + [markdown] colab_type="text" id="gJJvkedAw2ej" papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] # **Objective** # # The Gini index measures the equality per country. The closest to 0 the index is, the most equal the country is. The world average Gini index is at 38. # # **Data** # # GINI INDEX # # **Source** # # World Bank, Development Research Group. # # **Pitch** # # https://drive.google.com/file/d/10PXCX0Czck8QJwhinVEoKV3MZGvlAVDC/view # + [markdown] papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] # ## Input # + [markdown] papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] # ### Import library # + colab={} colab_type="code" id="ZwaKnD6LvK7A" papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] import pandas as pd from pandas_datareader import wb import plotly.graph_objects as go import plotly.express as px # + [markdown] papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] # ## Model # + [markdown] colab_type="text" id="C_kAdrt1HgIp" papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] # ### Get the association between the country and the ISO code # + colab={"base_uri": "https://localhost:8080/", "height": 402} colab_type="code" id="DhtRoZwU5iFO" outputId="f31be84e-d5ab-466f-cd52-9eca4b0ed5c3" papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] countries = wb.get_countries() countries = countries[['name', 'iso3c']] countries.columns = ['country', 'iso3c'] countries # + [markdown] colab_type="text" id="fFK9AWVdHxMw" papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] # ### Get gini index indicator per country # + colab={"base_uri": "https://localhost:8080/", "height": 431} colab_type="code" id="nWcEWO2g6FWc" outputId="955ff09a-e9bc-469c-edf9-8d1ef1f31085" papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] indicators = wb.download(indicator=['SI.POV.GINI'], country='all', start=1967, end=2018) indicators.columns = ['GINI_INDEX'] indicators # + [markdown] colab_type="text" id="kAQIQewOIB1T" papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] # ### Merge previous tables # + colab={"base_uri": "https://localhost:8080/", "height": 431} colab_type="code" id="BnruJ8fFw752" outputId="fa53f7c9-5793-4e88-fe3c-7b8aa388f658" papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] master_table = pd.merge(indicators.reset_index(), countries, left_on='country', right_on='country') master_table = master_table.set_index(['country', 'iso3c', 'year']) master_table # + [markdown] colab_type="text" id="Qyfcw-bl-45_" papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] # ### Pivot previous table and fill in undefined values with values from previous years # + colab={"base_uri": "https://localhost:8080/", "height": 502} colab_type="code" id="N0FaL0qA_FgM" outputId="afde1c99-bf54-4476-af7a-4d450c73bfcb" papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] pivoted_table = pd.pivot_table(master_table, index=['country', 'iso3c'], columns='year', values='GINI_INDEX') pivoted_table = pivoted_table.ffill(axis=1) pivoted_table # + [markdown] colab_type="text" id="JqJwPca9NjXD" papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] # ### Show a map of gini index per country over the years (from 1969 to 2018) # + colab={"base_uri": "https://localhost:8080/", "height": 817} colab_type="code" id="V-ixkyhq_VcR" outputId="0098c743-bc81-4f48-8a4d-801ddbd06640" papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] pivoted_table = pd.pivot_table(master_table, index=['country', 'iso3c'], columns='year', values='GINI_INDEX') pivoted_table = pivoted_table.ffill(axis=1) countries = list(pivoted_table.index.get_level_values(0)) locations = list(pivoted_table.index.get_level_values(1)) data = [] steps = [] i = 0 for year in pivoted_table.columns: data.append(dict( type='choropleth', name='', locations=locations, z=pivoted_table[year], hovertext=countries, colorscale=px.colors.sequential.Reds, visible=year=='2018' )) step = dict( method='restyle', args=['visible', [False] * len(pivoted_table.columns)], label=year) step['args'][1][i] = True steps.append(step) i = i + 1 layout = go.Layout( title=dict( text='Evolution of the gini index from 1969 to 2018', x=0.5, font=dict( size=21, ) ), sliders=[dict(steps=steps, active=len(data) - 1)], annotations=[dict( text='Updated in 2018 from The World Bank', showarrow=False, x=1, y=-0.05 )], autosize=True, height=800 ) fig = go.Figure(data, layout) fig # + [markdown] papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] # ## Output # + [markdown] colab_type="text" id="yIXzZ2Lalnax" papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] # ### Export HTML # + colab={} colab_type="code" id="2Hw4ojMAk5xm" papermill={} tags=["awesome-notebooks/WorldBank/WorldBank_Gini_index.ipynb"] fig.write_html("file.html")
WorldBank/WorldBank_Gini_index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### _Speech Processing Labs 2020: Signals: Module 1_ # # # 1 Sounds and Signals # # ### Learning Objectives: # * Identify a periodic signals from a time vs amplitude (time domain) plot # * Identify vocal pulses in a time vs amplitude graph, and how this relates to the concepts of period and frequency. # * Identify differences in speech sounds based on a spectrogram # # ### Need to know: # * Topic Videos: Time domain, Sound source, Periodic signal, Pitch # * [How to use Praat](../phon/phon-0-getPraat.ipynb) # * How to open and view a recording, pitch and spectrum plots and create spectral slices # # ## 1.1 Visualizing speech in the time domain # # ### Exercises # # * Record the following sentences in praat: # * 'Say writer for me' # * 'Say rider for me' # # * From the time vs amplitude graph, what differences are there between: # * the 's' and the 'a' in 'say' # * the 's' in say and the 'f' in 'for' # # * Looking at recordings of 'writer' and 'rider': are there any differences in your tutorial group in how you pronounce the following? Can you see evidence from this from the speech wave? # * 't' vs 'd' # * 'r' # * 'i' # ### Notes # + #### Add your notes here. Press Esc-m to change this code cell to a markdown cell (or select Markdown in the menu at the top) # - # ## 1.2 Periodicity and Pitch # # <div class="alert alert-success" role="alert"> # <strong>Note:</strong> The audio links below are to the sound files on the github server as this gets around some issues playing the sounds from the notebook in the chrome browser. You can download the sounds using those links. You can also click on the three dots on the right side of the audio player (if visible!) to download from there. Otherwise, you should be able to find the audio files in your copy of this repository in signals/audio. # # </div> # # # * Listen to some recordings from different instruments: # * [Violin](https://laic.github.io/uoe_speech_processing_course/signals/sounds/violin_A4_05_forte_arco-normal.mp3) # # <audio controls # src="sounds/violin_A4_05_forte_arco-normal.mp3"> # Your browser does not support the audio element # </audio> # # * [Oboe](https://laic.github.io/uoe_speech_processing_course/signals/sounds/oboe_A4_15_forte_normal.mp3) # # <audio controls # src="sounds/oboe_A4_15_forte_normal.mp3"> # Your browser does not support the audio element # </audio> # # * [Flute](https://laic.github.io/uoe_speech_processing_course/signals/sounds/flute_A4_15_forte_normal.mp3) # # <audio controls # src="sounds/flute_A4_15_forte_normal.mp3"> # Your browser does not support the audio element # </audio> # # * [Bass drum](https://laic.github.io/uoe_speech_processing_course/signals/sounds/bass-drum__025_forte_bass-drum-mallet.mp3) # # <audio controls # src="sounds/bass-drum__025_forte_bass-drum-mallet.mp3"> # Your browser does not support the audio element # </audio> # # # # * Load the recordings into Praat and measure some pitch periods manually # * Do all of the instruments recordings have the same fundamental frequency? # # # #### Optional: # * Look at the violin at different registers: # # * [Violin A4](https://laic.github.io/uoe_speech_processing_course/signals/sounds/violin_A4_05_forte_arco-normal.mp3) # # <audio controls # src="sounds/violin_A4_05_forte_arco-normal.mp3"> # Your browser does not support the audio element # </audio> # # * [Violin A3](https://laic.github.io/uoe_speech_processing_course/signals/sounds/violin_A3_15_forte_arco-normal.mp3) # # <audio controls # src="sounds/violin_A3_15_forte_arco-normal.mp3"> # Your browser does not support the audio element # </audio> # # * Measure the pitch using praat # * What's the difference in Hz # * What's the difference in semitones? (Here's a [semitone calculator](http://www.homepages.ucl.ac.uk/~sslyjjt/speech/semitone.html)) # # # * Can you record your own voice with the same pitch # * Is it exactly the same fundamental frequency? # # # * Turn on the spectrogram view in praat. What sort of differences are there between instruments? # # ### Notes # ## 1.3 Examine the spectrum of a square wave # # # * Load one of the square waves into praat, select a section from the middle of the waveform about 1 second in length, and generate a spectral slice of this portion: # # * [square_100Hz.wav](https://laic.github.io/uoe_speech_processing_course/signals/sounds/square_100Hz.wav) # # <audio controls # src="sounds/square_100Hz.wav"> # Your browser does not support the audio element # </audio> # # * [square_200Hz.wav](https://laic.github.io/uoe_speech_processing_course/signals/sounds/square_200Hz.wav) # # <audio controls # src="sounds/square_200Hz.wav"> # Your browser does not support the audio element # </audio> # # * [square_300Hz.wav](https://laic.github.io/uoe_speech_processing_course/signals/sounds/square_300Hz.wav) # # <audio controls # src="sounds/square_300Hz.wav"> # Your browser does not support the audio element # </audio> # # # You should see a spectrum showing the component frequencies for the waveform. Try playing individual peaks or groups of peaks. Compare the pitch and timbre of your selections. # # # * Try the same thing, playing back just a range of frequencies, but this time use the sine waveforms: # # * [sine_100Hz.wav](https://laic.github.io/uoe_speech_processing_course/signals/sounds/sine_100Hz.wav) # # <audio controls # src="sounds/sine_100Hz.wav"> # Your browser does not support the audio element # </audio> # # * [sine_200Hz.wav](https://laic.github.io/uoe_speech_processing_course/signals/sounds/sine_200Hz.wav) # # <audio controls # src="sounds/sine_200Hz.wav"> # Your browser does not support the audio element # </audio> # # * [sine_300Hz.wav](https://laic.github.io/uoe_speech_processing_course/signals/sounds/sine_300Hz.wav) # # <audio controls # src="sounds/sine_300Hz.wav"> # Your browser does not support the audio element # </audio> # # #### Optional: # * Can you create a sound like a square wave, starting from a sine wave? # * Can you create a sound like a sine wave, starting from a square wave? # ### Notes # ## 1.4 Filter by frequency # * Load the file [sweep.wav](https://laic.github.io/uoe_speech_processing_course/signals/sounds/sweep.wav) and examine spectral slices at different points in the file. # # <audio controls # src="sounds/sweep.wav"> # Your browser does not support the audio element # </audio> # # * How is a spectral slice from the beginning of the file different from one at the end? # # # * In the praat objects window select the sound sweep object and click on the filter button and select filter one formant. In the box that opens, set a frequency of 2500Hz and a bandwidth of 300Hz, and click OK. You should now have a new object in the list called Sound sweep_filt. # # # * How is the waveform and spectrum of this filtered object different the original sound? # # # * Try the above filtering process on a speech waveform, with filter frequencies in the typical range of speech formants, using narrow bandwidths of about 50Hz (You can use the recording you made previously) # # ### Notes # ## 1.5 Time vs Frequency Tradeoff # * Look at/listen to sentences you recorded again in praat # # # * Identify the part of the recording where a speaker says the "i" in writer: this is actually a diphthong /ai/ # * What happens to the spectral slice if you select everything between the surrounding consonants? # * What happens if you only select the first half? The second half? # * Compare this to /a/ and /i/ separately. Can you now characterize what a diphthong is? # * What happens if you include the surrounding consonants? # # # * What you see on the spectogram view of praat is based on a default **frame window** size of 5ms. What happens when you change this? # # ### Notes # ## 1.6 (Extras) Seeing the environment in a sound # # # * Recording conditions: record close to the microphone, or far away (try it in a reverberant space, such as the bathroom!) – what changes? why? # # # * Analyse some of the synthetic speech signals: # # # * [diphone.wav](https://laic.github.io/uoe_speech_processing_course/signals/sounds/diphone.wav) # # <audio controls # src="sounds/diphone.wav"> # Your browser does not support the audio element # </audio> # # # * [dnn_parametric.wav](https://laic.github.io/uoe_speech_processing_course/signals/sounds/dnn_parametric.wav) # # <audio controls # src="sounds/dnn_parametric.wav"> # Your browser does not support the audio element # </audio> # # * [hmm_parametric.wav](https://laic.github.io/uoe_speech_processing_course/signals/sounds/hmm_parametric.wav) # # <audio controls # src="sounds/hmm_parametric.wav"> # Your browser does not support the audio element # </audio> # # # * [unit_selection1.wav](https://laic.github.io/uoe_speech_processing_course/signals/sounds/unit_selection1.wav) # # <audio controls # src="sounds/unit_selection1.wav"> # Your browser does not support the audio element # </audio> # # # * [unit_selection2.wav](https://laic.github.io/uoe_speech_processing_course/signals/sounds/unit_selection2.wav) # # <audio controls # src="sounds/unit_selection2.wav"> # Your browser does not support the audio element # </audio> # # # * What are the differences between them? use different tools (your ears, the waveform, the spectrogram) # * In what ways are they similar or different to natural speech? # * Could you tell they are synthetic just by looking at the spectrogram? # ### Notes
signals/sp-m1-1-sounds-signals.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Digitize a Batch Record # Critical process data shouldn’t stay trapped in PDFs. But PDFs are messy (like the documents below). They have a mix of handwriting and text. They can be skewed and blurry. # # Digitizing batch records allows you to start to analyze the data from a batch record PDFs in less than five minutes. #install fathomdata in this environment # !pip install fathomdata import fathomdata as fd # # Add Your API Key # # You'll need a temporary API key to use this tutorial. If you don't have one already, head [here](https://docs.fathom.one/batch-record-digitization.html#get-an-api-key). Then, set your apikey using the code below. # fd.set_api_key('your-api-key-goes-here') # ### Tip # # We prefer to keep our api as an environment variable to keep to safe, and to prevent us from accidentally checking it into source control. Here's what your code might look like # # import os # os.environ["FATHOM_API_KEY"] = "your-api-key-here" # # You can also add the environment variable to your virtual environment. Edit activate file of your virtual environment (`venv/bin/activate`) and add this line to the end: # # export FATHOM_API_KEY=your-api-key-here # # Then when you need to set the api-key within your code you can do something like this: # # import os # fd.set_api_key(os.environ["FATHOM_API_KEY"]) # # Get a Sample Batch Record # # Use the code below to download a sample batch record into the Colab environment. # with open("batch3.pdf", "wb") as f: pdf = fd.get_sample_batch_record("batch3") f.write(pdf) # Take a moment to look at the example batch record PDF. You can view the batch record source at this link: [See PDF on GitHub](https://github.com/fathom-data/fathom-sample-data/blob/main/batch3.pdf) # # This batch record contains many different types of data from raw material sources to process metrics. There is a mix of handwritten and typed text and the formatting varies throughout the record. What a mess! # # *Try to find the text in the raw pdf bytes above? What happens?* # # For this tutorial, we’ll focus on extracting and cleaning any type of data stored in a table (but this is just the start!). # # Ingest the Batch Record # new_doc_id = fd.ingest_document("batch3.pdf") #update path to download location if you changed it print(f"Ingested document with ID {new_doc_id}") # That’s it! Check that the ingest was successful by listing the available records. This might take a moment. df = fd.available_documents() df.head() # If the df syntax look familiar, that’s because fathomdata is built on top of [pandas](https://pandas.pydata.org/). You can interact with this record dataframe using all the pandas [slicing and indexing tools](https://pandas.pydata.org/docs/user_guide/indexing.html). # # # Digitize a second sample batch record # # Download another sample batch record by adapting the code above. (Source file is here: [batch4.pdf](https://github.com/fathom-data/fathom-sample-data/blob/main/batch4.pdf)) # # Change the code above by replacing `batch3` with `batch4`. Make sure you get them all. There are 3 instances that need to be replaced. Run the code for a second time. When you are successful, both records will be listed in the available documents dataframe. fd.available_documents() # # Use the Digitized Data # # The extracted data is also returned in a pandas dataframe so it’s quickly available for custom analysis. doc = fd.get_document(new_doc_id) materials = doc.get_materials_df() materials.head() # Next you can try some [statistical process control analytics](https://docs.fathom.one/process-validation.html) using this data.
digitize.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment 16 Solutions # #### Q1. What is the benefit of regular expressions? # **Ans:** Regular Expressions, also known as **`regex`** or **`regexp`**, are used to match strings of text such as particular characters, words, or patterns of characters. It means that we can match and extract any string pattern from the text with the help of regular expressions. it helps the programmers to Write less and cleaner code. it also avoids multiple use of **`if/else`** statements. # #### Q2. Describe the difference between the effects of "(ab)c+" and "a(bc)+" Which of these, if any, is the unqualified pattern "abc+"? # **Ans:** Both **`(ab)c+`** and **`a(bc)+`** are valid patterns. the difference between both these patterns is in **`(ab)c+`** **`ab`** is group whereas in **`a(bc)+`** **`bc`** is a group. # #### Q3. How much do you need to use the following sentence while using regular expressions? # **`import re`** # # **Ans:** **`import re`** statement always has to be imported before using regular expressions. # #### Q4. Which characters have special significance in square brackets when expressing a range, and under what circumstances? # **Ans:** The Characters **`.`,`*`,`?`,`^`**,or,**`()`**, have a special signiface when used with square brackets. They need not be be explicitly escaped by **`\`** as in case of pattern texts in a raw string. # #### Q5. How does compiling a regular-expression object benefit you? # **Ans:** We can Combine a regular expression pattern into pattern Objects.Which can be used for pattern matching. it also helps to search a pattern again without rewritting it. # #### Q6. What are some examples of how to use the match object returned by re.match and re.search? # **Ans:** The **`re.search()`** and **`re.match()`** both are functions of re module in python. These functions are very efficient and fast for searching in strings. The function searches for some substring in a string and returns a match object if found, else it returns none. # # There is a difference between the use of both functions. Both return the first match of a substring found in the string, but **`re.match()`** searches only from the beginning of the string and return match object if found. But if a match of substring is found somewhere in the middle of the string, it returns none. # # While **`re.search()`** searches for the whole string even if the string contains multi-lines and tries to find a match of the substring in all the lines of string import re Substring ='string' String1 ='We are learning regex with geeksforgeeks regex is very useful for string matching. It is fast too.' String2 ='string We are learning regex with geeksforgeeks regex is very useful for string matching. It is fast too.' print(re.search(Substring, String1, re.IGNORECASE)) print(re.match(Substring, String1, re.IGNORECASE)) print(re.search(Substring, String2, re.IGNORECASE)) print(re.match(Substring, String2, re.IGNORECASE)) # #### Q7. What is the difference between using a vertical bar (|) as an alteration and using square brackets as a character set? # **Ans:** When **`|`** us used then patterns searches for **`or`** option. i.e **`<pattern_1>|<pattern_2>`** means it searches as **`<pattern_1>or<<pattern_2>`** in the searched string. the first occurance of matched string will be returned as the Match Object. # Using Character set in square Brackets searches for all the character set in the square bracket and if match is found, it returns it. # #### Q8. In regular-expression search patterns, why is it necessary to use the raw-string indicator (r)? In replacement strings? # **Ans:** Raw Strings are used in the regular-expression search patterns, so that blackslashes donot have to be escaped.
Python Advance/16.Assignment_16.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .cs // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: .NET (C#) // language: C# // name: .net-csharp // --- // # Learning .NET Interactive Notebooks. // As in other languages, there are existing APIs that you can use in the C# language and to be able to reference the classes and methods defined in those APIs you need to make your program aware of those APIs. And to do this in C# you adde the `using` directive at the top of your file. That directive is equivalent to the `import` in Go. // // In the example below we make a combination of the `using` directive and the `static` key work. The `using` directive brings in the `System` namespace, while the `static` keyword allows you to specify a static class so you can call its methods directly in the current file. using static System.Console; WriteLine("Hello Rob"); // The next example shows how to use the `foreach` keywork to iterate over a list of names: foreach(string name in new string[] {"Gandalf, Bilbo and Beorn"}) {WriteLine(name);}
docs/LanguagesAndFrameworks/.NET/.NET Interactive Notebooks/Learning.NETInteractiveNotebooks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Machine Learning # ## Lab \#4: Support Vector Machine Classifier # ### Textbook is available @ [https://www.github.com/a-mhamdi/isetbz](https://www.github.com/a-mhamdi/isetbz) # --- # Support vector machine or **SVM** for short is also known as a discriminative classifier. The concepts are relatively simple. Using a hyperplane with the largest possible margin, the classifier separates data points. The **SVM** classifier finds an optimal hyperplane which allows classifying new data points much more accurately compared to other classifiers such as logistic regression, knn, etc. # + import numpy as np import matplotlib.pyplot as plt plt.rcParams['figure.dpi'] = 300 # - # Load the dataset `make_blobs` from the `datasets` module of the `sklearn` library. from sklearn.datasets import make_blobs # + X, y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60) xfit = np.linspace(-1, 3.5) plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring') # Draw three lines that couple separate the data for m, b, d in [(1, 0.65, 0.33), (0.5, 1.6, 0.55), (-0.2, 2.9, 0.2)]: yfit = m * xfit + b plt.plot(xfit, yfit, '-k') plt.fill_between(xfit, yfit - d, yfit + d, edgecolor='none', color='#AAAAAA', alpha=0.4) plt.xlim(-1, 3.5); plt.grid() # - # Import the **Support Vector Machine Classifier**. from sklearn.svm import SVC # A full description of the available methods can be retrieved from the official website of [scikit-learn](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html). # # |Syntax|Description| # |---------:|:------------| # |`decision_function(X)`| Evaluates the decision function for the samples in X.| # |`fit(X, y[, sample_weight])`| Fit the SVM model according to the given training data.| # |`get_params([deep])`| Get parameters for this estimator.| # |`predict(X)`| Perform classification on samples in X.| # |`score(X, y[, sample_weight])`| Return the mean accuracy on the given test data and labels.| # |`set_params(**params)`| Set the parameters of this estimator.| # We will choose a linear kernel as the distribution of the points can be linearly separated. clf = SVC(kernel='linear') # Fit to the data the **SVM** classifier, denoted here by `clf`. clf.fit(X, y) # Define a function `plot_svc_decision_function` that will plot **SVM** decision boundaries. def plot_svc_decision_function(clf, ax=None): """Plot the decision function for a 2D SVC""" if ax is None: ax = plt.gca() x = np.linspace(plt.xlim()[0], plt.xlim()[1], 30) y = np.linspace(plt.ylim()[0], plt.ylim()[1], 30) Y, X = np.meshgrid(y, x) P = np.zeros_like(X) for i, xi in enumerate(x): for j, yj in enumerate(y): P[i, j] = clf.decision_function([[xi, yj]]) # plot the margins ax.contour(X, Y, P, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring') plot_svc_decision_function(clf) plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=200, facecolors='none'); plt.grid()
Python/ml/clf-svm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Playing with trying to get hovering tool time with density plots. import matplotlib.pyplot as plt # %matplotlib inline import mpld3 import numpy as np import pandas as pd # + # Create test dataset dat = pd.DataFrame({'one': np.random.normal(size=1000), 'two': np.random.normal(loc=0.1, size=1000), 'three': np.random.normal(loc=-0.1, size=1000) }) dat.head(3) # - dat.plot(kind='kde') # + fig, ax = plt.subplots(figsize=(8,8)) density = dat.plot(kind='kde', ax=ax) labels = [x.get_label() for x in density.get_lines()] t1 = mpld3.plugins.LineLabelTooltip(density.get_lines()[0], labels[0]) t2 = mpld3.plugins.LineLabelTooltip(density.get_lines()[1], labels[1]) t3 = mpld3.plugins.LineLabelTooltip(density.get_lines()[2], labels[2]) mpld3.plugins.connect(fig, t1, t2, t3) mpld3.display() # -
notebook/.ipynb_checkpoints/mpld3_kde_plot-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: MFE Environment # language: python # name: mfe_env # --- # + import pandas as pd import numpy as np import logging import sys from datetime import datetime # a little hacky, but works if you don't want to actually install the # custom packages sys.path.append('../') from uniswapv3_simulator.pool import Uniswapv3Pool from uniswapv3_simulator.utils import pool_init_price, solve_for_liquidity_delta from uniswapv3_simulator.math import tick_to_sqrt_price, sqrt_price_to_tick from utils import amount_to_float # + POOL = 'WETH-USDT-500' MAX_DATE = '2022-01-28' timestamp = datetime.now().strftime('%y%m%d%H%M%S') logging.basicConfig(level=logging.INFO, filename=f'./logs/{POOL}_{timestamp}.log') logging.getLogger('uniswap-v3').setLevel(logging.DEBUG) # - data = pd.read_pickle('../data/pool_data_clean.pickle') swaps = data[POOL]['swaps'] liquidity = data[POOL]['liquidity'] swaps.info() swaps.head() liquidity.info() liquidity.head() adds = liquidity.loc[liquidity['liquidity_event'] == 'ADD_LIQUIDITY', :].copy() adds = adds.sort_values('txn_time').reset_index(drop=True) adds.head() first_add_hash = adds.at[0, 'tx_hash'] print(f'First liquidity add hash: {first_add_hash}') # + # from https://etherscan.io/tx/0x5399bd3a8fa539a1899af6b3c10a526d07e5c371a0c0bbcfde7c6cbffe88a59f#eventlog liquidity_delta = amount_to_float('172485837547939', 18) # belive all liquidity amounts use 18 decimals assert liquidity_delta == adds.at[0, 'liquidity'] token0 = adds.at[0, 'token_0_amount'] token1 = adds.at[0, 'token_1_amount'] tick_lower = adds.at[0, 'price_tick_lower'] tick_upper = adds.at[0, 'price_tick_upper'] token0_decimals = adds.at[0, 'contract_decimals_token_0'] token1_decimals = adds.at[0, 'contract_decimals_token_1'] init_price = pool_init_price(token0, token1, tick_upper, tick_lower, liquidity_delta, token0_decimals, token1_decimals) token0_symb = liquidity.at[0, 'contract_ticker_symbol_token_0'] token1_symb = liquidity.at[0, 'contract_ticker_symbol_token_1'] print(f'Pool initial price ({token1_symb}/{token0_symb}): {init_price:,.12e}') print(f'Pool initial price ({token0_symb}/{token1_symb}): {1 / init_price:,.2e}') # - sqrt_price_x96 = 4648870407266953854345730 etherscan_price = sqrt_price_x96 ** 2 / 2 ** 192 print(f"Calculated initial price: {init_price:.12e}") print(f"Price per Etherscan: {etherscan_price:.12e}") # + cols = ['tx_hash', 'txn_time', 'liquidity_event'] liqu_txn = liquidity.loc[:, cols].copy() liqu_txn.reset_index(drop=False, inplace=True) liqu_txn.rename(columns={'liquidity_event': 'event', 'index': 'orig_idx'}, inplace=True) cols = ['tx_hash', 'swap_time'] swap_txn = swaps.loc[:, cols].copy() swap_txn.reset_index(drop=False, inplace=True) swap_txn.rename(columns={'swap_time': 'txn_time', 'index': 'orig_idx'}, inplace=True) swap_txn['event'] = 'SWAP' all_txn = pd.concat([liqu_txn, swap_txn], axis=0) all_txn = all_txn.sort_values('txn_time').reset_index(drop=True) all_txn.drop(all_txn.index[all_txn['txn_time'] > MAX_DATE], axis=0, inplace=True) all_txn # - CHECKS_ON = True # need to think about appropriate error tolerances # TODO: maybe base these tolerances on the average transaction size? TOKEN0_TOLS = {'atol': 1e-6, 'rtol': 1e-4} TOKEN1_TOLS = {'atol': 1e-6, 'rtol': 1e-4} LIQUIDITY_TOLS = {'atol': 1e-6, 'rtol': 1e-4} all_txn = all_txn.iloc[:50].copy() # + fee = liquidity.at[0, 'pool_fee'] / 1e+6 tick_spacing = liquidity.at[0, 'pool_tick_spacing'] pool = Uniswapv3Pool(fee, tick_spacing, init_price, token0_decimals=token0_decimals, token1_decimals=token1_decimals) print(f'{pool}') position_id = 'LP123' results = [] for i, row in all_txn.iterrows(): logging.info(f'Transaction {i}.') txn = row['event'] idx = row['orig_idx'] if 'LIQUIDITY' in txn: token0 = liquidity.at[idx, 'token_0_amount'] token1 = liquidity.at[idx, 'token_1_amount'] if txn == 'REMOVE_LIQUIDITY': token0 = -1 * token0 token1 = -1 * token1 tick_lower = liquidity.at[idx, 'price_tick_lower'] tick_upper = liquidity.at[idx, 'price_tick_upper'] liquidity_delta = liquidity.at[idx, 'liquidity'] if pd.isnull(liquidity_delta): liquidity_delta = solve_for_liquidity_delta( token0, token1, tick_lower, tick_upper, pool.sqrt_price, token0_decimals, token1_decimals ) elif CHECKS_ON: ld_calc = solve_for_liquidity_delta( token0, token1, tick_lower, tick_upper, pool.sqrt_price, token0_decimals, token1_decimals ) assert np.isclose(liquidity_delta, ld_calc, **LIQUIDITY_TOLS), ( f'Calculated liquidity_delta {ld_calc:,.12e} does ' f'not match liquidity_delta per the data {liquidity_delta:,.12e}.' ) position = pool.position_map[(position_id, tick_lower, tick_upper)] if liquidity_delta < 0: # If the liquidity_delta is very, very close to the position's total # liquidity, set liquidity_delta to the total liquidity to completely # close out the position if np.isclose(-position.liquidity, liquidity_delta): liquidity_delta = -position.liquidity # we also make sure that liquidity_delta cannot be less than the # position's total liquidity if liquidity_delta < 0: liquidity_delta = max(liquidity_delta, -position.liquidity) token0_calc, token1_calc = pool.set_position( position_id, tick_lower, tick_upper, liquidity_delta ) elif txn == 'SWAP': token0 = swaps.at[idx, 'token_0_amount'] token1 = swaps.at[idx, 'token_1_amount'] token = 0 if token0 > 0 else 1 tokens_in = token0 if token == 0 else token1 token0_calc, token1_calc = pool.swap(token, tokens_in) else: raise ValueError(f'{txn} is not a valid transaction type.') if CHECKS_ON: assert np.isclose(token0, -token0_calc, **TOKEN0_TOLS), ( f'Transaction {i:,}: token0 output {-token0_calc:,.12e} does not ' f'match token0 in the data {token0:,.12e}.' ) assert np.isclose(token1, -token1_calc, **TOKEN1_TOLS), ( f'Transaction {i:,}: token1 output {-token1_calc:,.12e} does not ' f'match token1 in the data {token1:,.12e}.' ) results.append({ 'sqrt_price': pool.sqrt_price, 'liquidity': pool.liquidity }) print(f'Completed transaction {i}.') # -
eda_notebooks/WETH-USDT-500 Pool - Testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.10 64-bit # name: python3 # --- # # Downloading and parsing GenBank files from Python # # ## Installation # 1. Fork git repo into local machine (click on fork) and clone, or simply clone main branch with # ``` # git clone https://github.com/Robaina/GenBankpy.git # ``` # 2. CD to project directory and set conda environment if not already set: # ``` # conda env create -n ncbi -f environment.yml # ``` # # 3. Activate environment: # ``` # conda activate ncbi # ``` # + # conda activate ncbi from genbankpy.parser import GenBankFastaWriter, GBK """ This package requires: pip install ncbi-acc-download """ # First we need to define the NCBI entry ids to download the data entry_ids = [ 'AE001863.1', 'AF000579.1', 'AF242489.1', 'AP003593.1', 'NC_000911.1', 'NC_007288.1' ] gbkwriter = GenBankFastaWriter.fromAccessionIDs(entry_ids=entry_ids) # gbkwriter = GenBankFastaWriter.fromGBKdirectory('gbk_data') # + # Write fasta containing all peptide sequences of these two organisms gbkwriter.writeSequencesInFasta( gene_keywords={'product': ['any']}, output_fasta='results/allPeptides.faa', sequence='protein', entry_ids=['AE001863.1', 'AP003593.1'] ) # Write fasta containing all nucleotide sequences of these two organisms gbkwriter.writeSequencesInFasta( gene_keywords={'product': ['any']}, output_fasta='results/allNucleotides.fasta', sequence='nucleotide', entry_ids=['AE001863.1', 'AP003593.1'] ) # Write fasta containing nucleotide sequences of the two organisms corresponding to Urease alpha gbkwriter.writeSequencesInFasta( gene_keywords={'product': ['urease', 'alpha']}, output_fasta='results/ureC.fasta', sequence='nucleotide' ) # Write fasta containing peptide sequences of the two organisms corresponding to Urease alpha gbkwriter.writeSequencesInFasta( gene_keywords={'product': ['urease', 'alpha']}, output_fasta='results/ureC.faa', sequence='protein', entry_ids=['AE001863.1', 'AP003593.1'] ) # Write fasta containing nucleotide sequences of all five corresponding to 16S gbkwriter.writeSequencesInFasta( gene_keywords={'product': ['16S']}, output_fasta='results/16s.fasta', sequence='nucleotide', entry_ids=None ) # - # # Initializing from list of species names # Checking if there are available genomes to download before actually downloading them, thus avoiding consequent error messages: # + available_genomes = gbkwriter.listNCBIfilesToDownload('Semidanus canariensis') if available_genomes: pass # + sp_list = ['Emiliania huxleyi'] gbkwriter = GenBankFastaWriter.fromSpecies(species_list=sp_list, only_representatives=True) gbkwriter.writeSequencesInFasta( gene_keywords={'product': ['any']}, output_fasta='results/allPeptidesEmiliania.faa', sequence='protein' ) # - # # Parsing GenBank files gbk = GBK('gbk_data/AE001863.1.gbk') gbk.cds.get_by_gene_id('DRA0303')
README.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Spark with MLRun example # This example notebook demonstrates how to execute a spark job with MLRun. # # Our spark job is a generic ETL job which pulls data from user-defined data sources, applies a SQL query on top of them, and writes the result to a user defined destination. # # The definition of the input-sources should be according to : https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrameReader # # The definition of the output destination should be according to : # https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrameWriter # + import os from os.path import isfile, join from mlrun import new_function, NewTask, mlconf #Set the mlrun database/api mlconf.dbpath = 'http://mlrun-api:8080' #Set the pyspark script path V3IO_WORKING_DIR = os.getcwd().replace('/User','/v3io/'+os.getenv('V3IO_HOME')) V3IO_SCRIPT_PATH = V3IO_WORKING_DIR+'/spark-function.py' # - # ## Define a task (job parameters) # + #Define a dict of input data sources DATA_SOURCES = {'family' : {'format': 'jdbc', 'url': 'jdbc:mysql://mysql-rfam-public.ebi.ac.uk:4497/Rfam', 'dbtable': 'Rfam.family', 'user': 'rfamro', 'password': '', 'driver': 'com.mysql.jdbc.Driver'}, 'full_region': {'format': 'jdbc', 'url': 'jdbc:mysql://mysql-rfam-public.ebi.ac.uk:4497/Rfam', 'dbtable': 'Rfam.full_region', 'user': 'rfamro', 'password': '', 'driver': 'com.mysql.jdbc.Driver'} } #Define a query to execute on the input data sources QUERY = 'SELECT family.*, full_region.evalue_score from family INNER JOIN full_region ON family.rfam_acc = full_region.rfam_acc LIMIT 10' #Define the output destination WRITE_OPTIONS = {'format': 'io.iguaz.v3io.spark.sql.kv', 'mode': 'overwrite', 'key': 'rfam_id', 'path': 'v3io://users/admin/frommysql'} #Create a task execution with parameters PARAMS = {'data_sources': DATA_SOURCES, 'query': QUERY, 'write_options': WRITE_OPTIONS} SPARK_TASK = NewTask(params=PARAMS) # - # #### Download mysql driver # !wget https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.19/mysql-connector-java-8.0.19.jar # ## Run locally (in the notebook or attched Spark service) # + #Get the list of the dpendency jars V3IO_JARS_PATH = '/igz/java/libs/' DEPS_JARS_LIST = [join(V3IO_JARS_PATH, f) for f in os.listdir(V3IO_JARS_PATH) if isfile(join(V3IO_JARS_PATH, f)) and f.startswith('v3io-') and f.endswith('.jar')] DEPS_JARS_LIST.append(V3IO_WORKING_DIR + '/mysql-connector-java-8.0.19.jar') # + #Create MLRun function which runs locally in a passthrough mode (since we use spark-submit) local_spark_fn = new_function(kind='local', mode = 'pass', command= f"spark-submit --jars {','.join(DEPS_JARS_LIST)} {V3IO_SCRIPT_PATH}") #Run the function with a task local_spark_fn.run(SPARK_TASK) # - # ## Run as a job on the Kubernetes cluster # + #Create MLRun function to run the spark-job on the kubernetes cluster serverless_spark_fn = new_function(kind='spark', command=V3IO_SCRIPT_PATH, name='my-spark-func') serverless_spark_fn.with_driver_limits(cpu="1300m") serverless_spark_fn.with_driver_requests(cpu=1, mem="4g") # gpu_type & gpus=<number_of_gpus> are supported too serverless_spark_fn.with_executor_limits(cpu="1400m") serverless_spark_fn.with_executor_requests(cpu=1, mem="4g") # gpu_type & gpus=<number_of_gpus> are supported too serverless_spark_fn.with_igz_spark() serverless_spark_fn.spec.deps['jars'] += [V3IO_WORKING_DIR+'/mysql-connector-java-8.0.19.jar'] #Set number of executors serverless_spark_fn.spec.replicas = 2 #Deploy function and install MLRun in the spark image serverless_spark_fn.deploy() run = serverless_spark_fn.run(SPARK_TASK, watch=False) # + pycharm={"name": "#%%\n"} # Get the spark job's UI URL: run.ui_url
examples/mlrun_sparkk8s.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 import json, os import pandas as pd import numpy as np import matplotlib.pyplot as plt # + with open("../results/rate_constraints/adult-income/trials/results.json", 'r') as in_file: results = json.load(in_file) with open("../results/rate_constraints/adult-income/trials/baseline.json", 'r') as in_file: baseline = json.load(in_file) results['0'] = baseline['0'] # - print("Protected group counts: ", list(results.keys())) # + def compute_score_mean(d): # compute mean of violations return np.mean(np.array(list(d.values()))*100) def compute_score_median(d): # compute mean of violations return np.median(np.array(list(d.values()))*100) def compute_score_max(d): # compute mean of violations return np.max(np.array(list(d.values()))*100) def compute_score_std(d): # compute mean of violations return np.std(np.array(list(d.values()))*100) # - def get_scores(results, key, func, filter_keys = False): # model constraint satisfaction all_scores = [] constraint_set = ['0', '1', '5', '10', '15', '22'] for num_constraints in constraint_set: trials = results[num_constraints] trial_scores = [] for trial_num in trials.keys(): trial_nums = trials[trial_num][key] if filter_keys: selected_keys = trials[trial_num]['active_constraints'] trial_nums = {k: v for k, v in trial_nums.items() if k in selected_keys} score = func(trial_nums) trial_scores.append(score) all_scores.append(trial_scores) min_scores = [] mean_scores = [] max_scores = [] baseline = 0 for i in range(len(all_scores)): if i == 0: baseline = all_scores[0] continue min_scores.append(min(all_scores[i])) mean_scores.append(np.mean(all_scores[i])) max_scores.append(max(all_scores[i])) min_scores = np.array(min_scores) mean_scores = np.array(mean_scores) max_scores = np.array(max_scores) return min_scores, mean_scores, max_scores, baseline def get_performance(results, key, func): # model performance (accuracy/tpr) all_scores = [] constraint_set = ['0', '1', '5', '10', '15', '22'] for num_constraints in constraint_set: trials = results[num_constraints] trial_scores = [] for trial_num in trials.keys(): score = trials[trial_num]['scores'][key]*100 trial_scores.append(score) all_scores.append(trial_scores) min_scores = [] mean_scores = [] max_scores = [] baseline = 0 for i in range(len(all_scores)): if i == 0: baseline = all_scores[0] continue min_scores.append(min(all_scores[i])) mean_scores.append(np.mean(all_scores[i])) max_scores.append(max(all_scores[i])) min_scores = np.array(min_scores) mean_scores = np.array(mean_scores) max_scores = np.array(max_scores) return min_scores, mean_scores, max_scores, baseline # # Result 1: Overall constraint satisfaction score # + _, train_scores, _, train_baseline = get_scores(results, 'train_violation', compute_score_mean) _, test_scores, _, test_baseline = get_scores(results, 'test_violation', compute_score_mean) active_constraint_set = [1, 5, 10, 15, 22] plt.hlines(y=train_baseline, xmin=1, xmax=22, label="Baseline (Train)", color="red", linestyle='dashed') plt.plot(active_constraint_set, train_scores, label="Train violation error", color="red") plt.hlines(y=test_baseline, xmin=1, xmax=22, label="Baseline (Test)", color="blue", linestyle='dashed') plt.plot(active_constraint_set, test_scores, label="Test violation error", color="blue") plt.title("Constraint satisfaction (Mean)") plt.ylabel("Violation magnitude") plt.xlabel("Number of constraints") plt.legend() plt.show() # + train_lower, train_scores, train_upper, train_baseline = get_scores(results, 'train_violation', compute_score_max) test_lower, test_scores, test_upper, test_baseline = get_scores(results, 'test_violation', compute_score_max) active_constraint_set = [1, 5, 10, 15, 22] plt.hlines(y=train_baseline, xmin=1, xmax=22, label="Baseline (Train)", color="red", linestyle='dashed') plt.plot(active_constraint_set, train_scores, label="Train violation error", color="red") #plt.fill_between(active_constraint_set, train_upper, train_lower, alpha=0.25, color="red") plt.hlines(y=test_baseline, xmin=1, xmax=22, label="Baseline (Test)", color="blue", linestyle='dashed') plt.plot(active_constraint_set, test_scores, label="Test violation error", color="blue") #plt.fill_between(active_constraint_set, test_upper, test_lower, alpha=0.25, color="blue") plt.title("Constraint satisfaction (Max)") plt.ylabel("Violation magnitude") plt.xlabel("Number of constraints") plt.legend() plt.show() # + train_lower, train_scores, train_upper, train_baseline = get_scores(results, 'train_violation', compute_score_median, False) test_lower, test_scores, test_upper, test_baseline = get_scores(results, 'test_violation', compute_score_median, False) active_constraint_set = [1, 5, 10, 15, 22] plt.hlines(y=train_baseline, xmin=1, xmax=22, label="Baseline (Train)", color="red", linestyle='dashed') plt.plot(active_constraint_set, train_scores, label="Train violation error", color="red") plt.hlines(y=test_baseline, xmin=1, xmax=22, label="Baseline (Test)", color="blue", linestyle='dashed') plt.plot(active_constraint_set, test_scores, label="Test violation error", color="blue") plt.title("Constraint satisfaction (Median)") plt.ylabel("Violation magnitude") plt.xlabel("Number of constraints") plt.legend() plt.show() # + train_lower, train_scores, train_upper, train_baseline = get_scores(results, 'train_violation', compute_score_std) test_lower, test_scores, test_upper, test_baseline = get_scores(results, 'test_violation', compute_score_std) active_constraint_set = [1, 5, 10, 15, 22] plt.hlines(y=train_baseline, xmin=1, xmax=22, label="Baseline (Train)", color="red", linestyle='dashed') plt.plot(active_constraint_set, train_scores, label="Train violation variance", color="red") plt.hlines(y=test_baseline, xmin=1, xmax=22, label="Baseline (Test)", color="blue", linestyle='dashed') plt.plot(active_constraint_set, test_scores, label="Test violation variance", color="blue") plt.title("Constraint satisfaction (Standard Deviation)") plt.ylabel("Violation magnitude") plt.xlabel("Number of constraints") plt.legend() plt.show() # - # ## Accuracy # + test_lower, test_scores, test_upper, test_baseline = get_performance(results, 'accuracy', None) active_constraint_set = [1, 5, 10, 15, 22] plt.hlines(y=test_baseline, xmin=1, xmax=22, label="Baseline", color="blue", linestyle='dashed') plt.plot(active_constraint_set, test_scores, label="Test", color="blue") #plt.fill_between(active_constraint_set, test_upper, test_lower, alpha=0.25, color="blue") plt.title("Test Accuracy") plt.legend() plt.show() # - # ## TPR # + test_lower, test_scores, test_upper, test_baseline = get_performance(results, 'tpr', None) active_constraint_set = [1, 5, 10, 15, 22] plt.hlines(y=test_baseline, xmin=1, xmax=22, label="Baseline", color="blue", linestyle='dashed') plt.plot(active_constraint_set, test_scores, label="Test", color="blue") #plt.fill_between(active_constraint_set, test_upper, test_lower, alpha=0.25, color="blue") plt.title("TPR") plt.legend() plt.show() # - # ## Group-wise performance variance # + train_lower, train_scores, train_upper, train_baseline = get_scores(results, 'train_rates', compute_score_std) test_lower, test_scores, test_upper, test_baseline = get_scores(results, 'test_rates', compute_score_std) active_constraint_set = [1, 5, 10, 15, 22] plt.hlines(y=train_baseline, xmin=1, xmax=22, label="Baseline (Train)", color="red", linestyle='dashed') plt.plot(active_constraint_set, train_scores, label="Train Group STD", color="red") #plt.fill_between(active_constraint_set, train_upper, train_lower, alpha=0.25, color="red") plt.hlines(y=test_baseline, xmin=1, xmax=22, label="Baseline (Test)", color="blue", linestyle='dashed') plt.plot(active_constraint_set, test_scores, label="Test Group STD", color="blue") #plt.fill_between(active_constraint_set, test_upper, test_lower, alpha=0.25, color="blue") plt.title("Group TPR Standard Deviation") plt.ylabel("Standard Deviation of group TPR") plt.xlabel("Number of constraints") plt.legend() plt.show() # - np.std(list(results['1']['0']['test_rates'].values())) # + _, train_scores, _, train_baseline = get_scores(results, 'train_violation', compute_score_mean, True) _, test_scores, _, test_baseline = get_scores(results, 'test_violation', compute_score_mean, True) active_constraint_set = [1, 5, 10, 15, 22] #plt.hlines(y=train_baseline, xmin=1, xmax=22, label="Baseline (Train)", color="red", linestyle='dashed') plt.plot(active_constraint_set, train_scores, label="Train violation error", color="red") #plt.hlines(y=test_baseline, xmin=1, xmax=22, label="Baseline (Test)", color="blue", linestyle='dashed') plt.plot(active_constraint_set, test_scores, label="Test violation error", color="blue") plt.title("Constraint satisfaction (Mean) - Active only") plt.ylabel("Violation magnitude") plt.xlabel("Number of constraints") plt.legend() plt.show() # -
Process Trial Results (Adult Income).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # A collection of random Pandas problems from stackoverflow, solved using sqltables. import sqltables # + # https://stackoverflow.com/questions/64793902/python-merge-several-columns-of-a-dataframe-without-having-duplicates-of-data Name = ['Lolo', 'Mike', 'Tobias','Luke','Sam'] Age = [19, 34, 13, 45, 52] Info_1 = ['Tall', 'Large', 'Small', 'Small',''] Info_2 = ['New York', 'Paris', 'Lisbon', '', 'Berlin'] Info_3 = ['Tall', 'Paris', 'Hi', 'Small', 'Thanks'] Data = [123,268,76,909,87] Sex = ['F', 'M', 'M','M','M'] columns = {'Name' : Name, 'Age' : Age, 'Info_1' : Info_1, 'Info_2' : Info_2, 'Info_3' : Info_3, 'Data' : Data, 'Sex' : Sex} # - db = sqltables.Database() tab = db.create_table(rows=zip(*columns.values()), column_names=columns.keys()) tab info_tab = tab \ .view("select Name, Info_1 as Info from _ union select Name, Info_2 as Info from _ union select Name, Info_3 as Info from _") \ .view("select distinct * from _") \ .view("select Name, group_concat(Info, ' ') as Info from _ where Info != '' group by Name") tab.view("select _.*, coalesce(info.Info, '') as Info from _ left join info on _.Name = info.Name", bindings={"info": info_tab}) # https://stackoverflow.com/questions/64793653/how-to-convert-a-dataframe-to-ndarray-of-0s-and-1s example = """ col_1 col_2 a 4 a 3 b 2 c 2 d 1 b 4 c 1 """ data = [row.split() for row in example.split("\n") if row != ""][1:] db = sqltables.Database() tab = db.create_table(rows=data, column_names=["col_1", "col_2"]) tab crosstab = tab.view("select * from (select distinct col_1 from _), (select distinct col_2 from _)") counts = tab.view(""" select crosstab.col_1, crosstab.col_2, count(_.col_1) as count from crosstab left join _ using (col_1, col_2) group by crosstab.col_1, crosstab.col_2 """, bindings={"crosstab": crosstab}) counts # + import json class Json_group_array: def __init__(self): self.array = [] def step(self, x): self.array.append(x) def finalize(self): return json.dumps(self.array) db._conn.create_aggregate("json_group_array", 1, Json_group_array) # - [json.loads(x) for [x] in counts.view("select json_group_array(count) from _ group by col_1 order by col_1, col_2")] # https://stackoverflow.com/questions/64793652/count-top-most-frequent-phrases-in-a-text-column-in-pandas import re example = """ Andy | max min | tea | pal no limit | toy 2011 | hess | mix Andy | Andy | toy 2011| pal """ db = sqltables.Database() tab = db.create_table(rows=([x] for x in example.split("\n")[1:-1]), column_names=["text"]) tab tab2 = db.load_values(([x] for [text] in tab for x in re.split(r"\s*\|\s*", text)), column_names=["text1"]) tab2.view("select text1, count(*) as count from _ group by text1 order by count desc limit 3") # https://stackoverflow.com/questions/64809403/change-the-value-of-a-column-based-on-finding-characters-in-another-column-with example = """ City - Country Saddle(Canada) - Other Dublin - Other Detroit - USA Vancouver - Canada NYC: US - Other """ [header, *data] = example.split("\n")[1:-1] column_names = header.split(" - ") rows = [x.split(" - ") for x in data] db = sqltables.Database() import re, sqlite3 sqlite3.enable_callback_tracebacks(True) db._conn.create_function("regexp", 2, lambda x, y: bool(re.search(x, y))) tab = db.create_table(rows=rows, column_names=column_names) tab tab.view("select City, regexp('US', City) from _") overrides = db.load_values([ [0, 'Canada', 'Canada'], [1, 'US', 'USA'], [2, None, None] ], column_names=["priority", "regex", "Country"]) overrides tab.view("select min(priority), City, coalesce(overrides.Country, _.Country) as Country from _, overrides where (overrides.regex is not null and regexp(overrides.regex, _.City)) or overrides.regex is null group by City order by priority", bindings={"overrides": overrides})
examples/Stackoverflow Pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Observations and Insights # Based on the analysis conducted below, we can reach the following observations: # # 1. Based on the summary analysis of the tumor growth for all the mice in each drug regimen, the following four drugs appear to be the most promising in decreasing or minizing the increase of tumor growth: Capomulin, Ramicane, Propriva, Ceftamin. The first two regimens show a decrease in tumor growth and the last two have the smallest growth compared to the other drug regimens. # # 2. There appears to be a strong correlation between a mouse's weight and tumor size when looking at the Capomulin drug regimen data. # # 3. Based on the summary data of all drug regimens, it appears as though the drug Ketapril led to worse outcomes than a mouse who was given a placebo. It appears as though there was a slightly larger increase in the tumor volume when compared to mice in the placebo group but further analysis is needed here. # # Code # %matplotlib inline # + # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import scipy.stats as stats # Study data files mouse_metadata_path = "data/Mouse_metadata.csv" study_results_path = "data/Study_results.csv" # Read the mouse data and the study results mouse_metadata = pd.read_csv(mouse_metadata_path) study_results = pd.read_csv(study_results_path) # Combine the data into a single dataset combined_mice_df = pd.merge(study_results, mouse_metadata, how="outer", on="Mouse ID") # - mice_sorted_df = combined_mice_df.sort_values(by=["Mouse ID", "Timepoint"]) mice_sorted_df # Checking the number of mice in the DataFrame. number_of_mice = len(mice_sorted_df["Mouse ID"].unique()) number_of_mice # ### Assumption: It is more valuable to de-duplicate the size of the tumor in the last timepoint for each mouse because the size must have been impacted by the drug regimen. # Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint. # Create a clean DataFrame by dropping the duplicate mouse by its ID. de_duped_mice_df = mice_sorted_df.drop_duplicates("Mouse ID", "last") de_duped_mice_df # adds new column showcasing the growth or decrease in tumor size from the first measurement of 45 mm3 de_duped_mice_df["Tumor Growth"] = de_duped_mice_df["Tumor Volume (mm3)"] - 45.0 de_duped_mice_df mice_sorted_df["Tumor Growth"] = de_duped_mice_df["Tumor Growth"] # Checking the number of mice in the clean DataFrame. assert (de_duped_mice_df["Mouse ID"].count()) == number_of_mice mice_sorted_df["Drug Regimen"].unique() # ## Summary Statistics # # Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen # # + # find mean of tumor volume grouped by drug regimen and creating series for tumor volume drug_regimen_group = mice_sorted_df.groupby(by="Drug Regimen") tumor_series_group = drug_regimen_group["Tumor Growth"] # - tumor_mean = tumor_series_group.mean() tumor_median = tumor_series_group.median() tumor_std = tumor_series_group.std() tumor_variance = tumor_series_group.var() tumor_sem = tumor_series_group.sem() # + # creating summary table summary_df = pd.DataFrame(data={"Mean":tumor_mean, "Median":tumor_median, "Standard Deviation":tumor_std, "Variance":tumor_variance, "SEM":tumor_sem}) summary_df # - # ## Bar Plots mice_sorted_df.columns # + # Generate a bar plot showing the number of mice per time point for each treatment throughout the course of the study using pandas. #finding the unique timepoints: timepoint_labels = mice_sorted_df["Timepoint"].unique().tolist() number_of_mice_per_timepoint = mice_sorted_df["Timepoint"].value_counts().tolist() mice_per_timepoint_df = pd.DataFrame(mice_sorted_df["Timepoint"].value_counts()) # tick_locations = [value for value in timepoint_labels] # - #Plotting using pandas mice_per_timepoint_df.plot(kind="bar", title="Number of Mice per Timepoint", xlabel="Timepoint", ylabel="Number of Mice", rot=0) plt.savefig("../Images/MiceTimepointBar_Pandas.png") plt.show() # + #Plotting using pyplot plt.bar(timepoint_labels, number_of_mice_per_timepoint, color="r", align="center", tick_label=timepoint_labels) # titles and axis labels plt.title("Number of Mice per Timepoint") plt.xlabel("Timepoint") plt.ylabel("Number of Mice") plt.savefig("../Images/MiceTimepointBar_Pyplot.png") plt.show() # - # ## Pie Plots # + mice_sorted_df.columns mice_sex_distribution_series = mice_sorted_df["Sex"].value_counts() mice_sex_distribution_list = mice_sex_distribution_series.tolist() # + # Generate a pie plot showing the distribution of female versus male mice using pandas mice_sex_distribution_series.plot(kind="pie", title="Mice Sex Distribution", legend=True, table=True, ylabel="") plt.savefig("../Images/MiceSexDistribution_Pandas.png") plt.show() # + # Generate a pie plot showing the distribution of female versus male mice using pyplot plt.pie( x=mice_sex_distribution_list, labels=["Male", "Female"], colors=["Green", "Purple"], shadow=5, startangle=90, radius=2 ) plt.title("Mice Sex Distribution") plt.axis("equal") plt.savefig("../Images/MiceSexDistribution_Pyplot.png") plt.show() # - # ## Quartiles, Outliers and Boxplots # ### Using summary_df we identified that the four most promising treatment regimens are: # 1. Capomulin # 2. Ramicane # 3. Propriva # 4. Ceftamin # # The first two regimens show a decrease in tumor growth and the last two have the smallest growth compared to the other drug regimens. # + # Calculate the final tumor volume of each mouse across four of the most promising treatment regimens. final_tumor_volume = de_duped_mice_df["Tumor Volume (mm3)"] # creating a list and dataframe to pull specific drug regimen data from chosen_drug_regimens = ["Capomulin", "Ramicane", "Propriva", "Ceftamin"] final_tumor_volume_regimens = de_duped_mice_df[["Tumor Volume (mm3)", "Drug Regimen"]] # + # creating dataframes for tumor volumes based on four most promising regimens capo_final_tumor_volume = final_tumor_volume_regimens.loc[(final_tumor_volume_regimens["Drug Regimen"] == "Capomulin")] rami_final_tumor_volume = final_tumor_volume_regimens.loc[(final_tumor_volume_regimens["Drug Regimen"] == "Ramicane")] pro_final_tumor_volume = final_tumor_volume_regimens.loc[(final_tumor_volume_regimens["Drug Regimen"] == "Propriva")] ceft_final_tumor_volume = final_tumor_volume_regimens.loc[(final_tumor_volume_regimens["Drug Regimen"] == "Ceftamin")] # + # Calculate the IQR and quantitatively determine if there are any potential outliers. - Capomulin capo_quartiles = capo_final_tumor_volume["Tumor Volume (mm3)"].quantile(q=[0.25, 0.5, 0.75]) capo_lowerq = capo_quartiles[0.25] capo_upperq = capo_quartiles[0.75] capo_iqr = capo_upperq - capo_lowerq capo_lower_bound = capo_lowerq - (1.5 * capo_iqr) capo_upper_bound = capo_upperq + (1.5 * capo_iqr) # Ramicane: rami_quartiles = rami_final_tumor_volume["Tumor Volume (mm3)"].quantile(q=[0.25, 0.5, 0.75]) rami_lowerq = rami_quartiles[0.25] rami_upperq = rami_quartiles[0.75] rami_iqr = rami_upperq - rami_lowerq rami_lower_bound = rami_lowerq - (1.5 * rami_iqr) rami_upper_bound = rami_upperq + (1.5 * rami_iqr) # Propriva: pro_quartiles = pro_final_tumor_volume["Tumor Volume (mm3)"].quantile(q=[0.25, 0.5, 0.75]) pro_lowerq = pro_quartiles[0.25] pro_upperq = pro_quartiles[0.75] pro_iqr = pro_upperq - pro_lowerq pro_lower_bound = pro_lowerq - (1.5 * pro_iqr) pro_upper_bound = pro_upperq + (1.5 * pro_iqr) # Ceftamin: ceft_quartiles = ceft_final_tumor_volume["Tumor Volume (mm3)"].quantile(q=[0.25, 0.5, 0.75]) ceft_lowerq = ceft_quartiles[0.25] ceft_upperq = ceft_quartiles[0.75] ceft_iqr = ceft_upperq - ceft_lowerq ceft_lower_bound = ceft_lowerq - (1.5 * ceft_iqr) ceft_upper_bound = ceft_upperq + (1.5 * ceft_iqr) # + print(f"Using iqr, we have deteremined that any Capomulin value below {capo_lower_bound} or above {capo_upper_bound} could potentially be an outlier") print(f"Using iqr, we have deteremined that any Ramicane value below {rami_lower_bound} or above {rami_upper_bound} could potentially be an outlier") print(f"Using iqr, we have deteremined that any Propriva value below {pro_lower_bound} or above {pro_upper_bound} could potentially be an outlier") print(f"Using iqr, we have deteremined that any Propriva value below {ceft_lower_bound} or above {ceft_upper_bound} could potentially be an outlier") # - # ### Plotting box plots for each of the drug regimens side by side data_to_plot = [capo_final_tumor_volume["Tumor Volume (mm3)"], rami_final_tumor_volume["Tumor Volume (mm3)"], pro_final_tumor_volume["Tumor Volume (mm3)"], ceft_final_tumor_volume["Tumor Volume (mm3)"]] # + # Generate a box plot of the final tumor volume of each mouse across four regimens of interest plt.figure(figsize=(11, 7)) plt.boxplot(data_to_plot, labels=chosen_drug_regimens) plt.title("Final Tumor Volume (mm3) by Drug Regimen") plt.ylabel("Final Tumor Volume (mm3)") plt.savefig("../Images/FinalTumorVolumeByDrug.png") plt.show() # - # ## Line and Scatter Plots # + # Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin capo_tumor_volume_all_df = mice_sorted_df.loc[(mice_sorted_df["Drug Regimen"] == "Capomulin")] capo_tumor_time_df = capo_tumor_volume_all_df[["Mouse ID", "Timepoint", "Tumor Volume (mm3)", "Weight (g)"]] #selecting individual mouse for line and scatter plots b128_df = capo_tumor_time_df.loc[(capo_tumor_time_df["Mouse ID"] == "b128")] b128_df # + timepoint_x_axis = b128_df["Timepoint"] tumor_volume_y_axis = b128_df["Tumor Volume (mm3)"] plt.plot(timepoint_x_axis, tumor_volume_y_axis, marker="+",color="red", linewidth=1.5) plt.title("B128 Tumor Volume (mm3) by Timepoint") plt.xlabel("Timepoint") plt.ylabel("Tumor Volume (mm3)") plt.savefig("../Images/B128TumorVolumeByTime.png") plt.show() # - average_tumor_volume_by_weight_df = capo_tumor_time_df.groupby("Weight (g)").mean() average_tumor_volume_by_weight_df # + # Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen weight_x_axis = average_tumor_volume_by_weight_df.index weight_y_axis = average_tumor_volume_by_weight_df["Tumor Volume (mm3)"] plt.scatter(weight_x_axis, weight_y_axis, marker="o",color="blue") plt.title("Average Tumor Volume (mm3) by Weight (g)") plt.xlabel("Weight (g)") plt.ylabel("Average Tumor Volume (mm3)") plt.savefig("../Images/WeightByTumorVolume.png") plt.show() # - # ## Correlation and Regression # + # Calculate the correlation coefficient and linear regression model for mouse weight and average tumor volume for the Capomulin regimen corr_coeff = stats.pearsonr(weight_x_axis, weight_y_axis) print(f"The correlation between the average tumor size and weight for a mouse on the Capomulin regimen is {round(corr_coeff[0],2)}.") # - # ### Given that the r value for the relationship between average tumor size and weight for a mouse is close to 1, we can say that there is a strong positive correlation between the two. # + # linear regression using scipy (slope, intercept, rvalue, pvalue, stderr) = stats.linregress(weight_x_axis, weight_y_axis) # finding regression values regress_values = weight_x_axis * slope + intercept # finding equation of regression line line_equation = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) plt.scatter(weight_x_axis, weight_y_axis, marker="o",color="blue") plt.plot(weight_x_axis, regress_values, "--") plt.annotate(line_equation, (20,30) ,fontsize=12,color="red") plt.title("Average Tumor Volume (mm3) by Weight (g)") plt.xlabel("Weight (g)") plt.ylabel("Average Tumor Volume (mm3)") plt.savefig("../Images/RegressionWeightByTumorVolume.png") plt.show()
Pymaceuticals/.ipynb_checkpoints/pymaceuticals_starter-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # ## CFADs vs Drosdwosky # # This notebook plots the CFADs in differing Drosdowsky/MJO regimes. from netCDF4 import Dataset from matplotlib import pyplot as plt from datetime import datetime from matplotlib import dates import numpy as np # %matplotlib inline import pandas from scipy import interpolate # Input data path cfad_netcdf_file_path_break14 = '/home/rjackson/data/cfadregime0_dros14.cdf' cfad_netcdf_file_path_monsoon14 = '/home/rjackson/data/cfadregime1_dros04.cdf' cfad_netcdf_file_path_break48 = '/home/rjackson/data/cfadregime0_dros58.cdf' cfad_netcdf_file_path_monsoon48 = '/home/rjackson/data/cfadregime1_dros58.cdf' # Load netCDF variables # + cfad_netcdf = Dataset(cfad_netcdf_file_path_break14, mode='r') cfadb14 = cfad_netcdf.variables['cfad'][:] zb14 = cfad_netcdf.variables['z'][:] bins_zb14 = cfad_netcdf.variables['bins_z'][:] cfadb14_cum = np.cumsum(cfadb14, axis=1) cum_fun = interpolate.interp1d(bins_zb14, cfadb14_cum) ranges = np.arange(1, 50, 0.1) percents = cum_fun(ranges) five_b14 = np.zeros(len(zb14)) ten_b14 = np.zeros(len(zb14)) median_b14 = np.zeros(len(zb14)) ninety_b14 = np.zeros(len(zb14)) ninety_five_b14 = np.zeros(len(zb14)) for i in range(0, len(zb14)): five_index = np.where(percents[i] < 0.05) ten_index = np.where(percents[i] < 0.10) median_index = np.where(percents[i] < 0.5) ninety_index = np.where(percents[i] < 0.9) ninety_five_index = np.where(percents[i] < 0.95) if(len(five_index[0]) == 0): five_b14[i] = 0 else: five_b14[i] = ranges[five_index[0][-1]] if(len(ten_index[0]) == 0): ten_b14[i] = 0 else: ten_b14[i] = ranges[ten_index[0][-1]] if(len(median_index[0]) == 0): median_b14[i] = 0 else: median_b14[i] = ranges[median_index[0][-1]] if(len(ninety_index[0]) == 0): ninety_b14[i] = 0 else: ninety_b14[i] = ranges[ninety_index[0][-1]] if(len(ninety_five_index[0]) == 0): ninety_five_b14[i] = 0 else: ninety_five_b14[i] = ranges[ninety_five_index[0][-1]] cfad_netcdf.close() cfad_netcdf = Dataset(cfad_netcdf_file_path_monsoon14, mode='r') cfadm14 = cfad_netcdf.variables['cfad'][:] zm14 = cfad_netcdf.variables['z'][:] bins_zm14 = cfad_netcdf.variables['bins_z'][:] cfadm14_cum = np.cumsum(cfadm14, axis=1) cum_fun = interpolate.interp1d(bins_zm14, cfadm14_cum) ranges = np.arange(1, 50, 0.1) percents = cum_fun(ranges) five_m14 = np.zeros(len(zm14)) ten_m14 = np.zeros(len(zm14)) median_m14 = np.zeros(len(zm14)) ninety_m14 = np.zeros(len(zm14)) ninety_five_m14 = np.zeros(len(zm14)) for i in range(0, len(zb14)): five_index = np.where(percents[i] < 0.05) ten_index = np.where(percents[i] < 0.10) median_index = np.where(percents[i] < 0.5) ninety_index = np.where(percents[i] < 0.9) ninety_five_index = np.where(percents[i] < 0.95) if(len(five_index[0]) == 0): five_m14[i] = 0 else: five_m14[i] = ranges[five_index[0][-1]] if(len(ten_index[0]) == 0): ten_m14[i] = 0 else: ten_m14[i] = ranges[ten_index[0][-1]] if(len(median_index[0]) == 0): median_m14[i] = 0 else: median_m14[i] = ranges[median_index[0][-1]] if(len(ninety_index[0]) == 0): ninety_m14[i] = 0 else: ninety_m14[i] = ranges[ninety_index[0][-1]] if(len(ninety_five_index[0]) == 0): ninety_five_m14[i] = 0 else: ninety_five_m14[i] = ranges[ninety_five_index[0][-1]] cfad_netcdf.close() cfad_netcdf = Dataset(cfad_netcdf_file_path_break48, mode='r') cfadb48 = cfad_netcdf.variables['cfad'][:] zb48 = cfad_netcdf.variables['z'][:] bins_zb48 = cfad_netcdf.variables['bins_z'][:] cfadm14_cum = np.cumsum(cfadb48, axis=1) cum_fun = interpolate.interp1d(bins_zb48, cfadm14_cum) ranges = np.arange(1, 50, 0.1) percents = cum_fun(ranges) one_b48 = np.zeros(len(zm14)) five_b48 = np.zeros(len(zm14)) ten_b48 = np.zeros(len(zm14)) median_b48 = np.zeros(len(zm14)) ninety_b48 = np.zeros(len(zm14)) ninety_five_b48 = np.zeros(len(zm14)) for i in range(0, len(zb14)): five_index = np.where(percents[i] < 0.05) ten_index = np.where(percents[i] < 0.10) median_index = np.where(percents[i] < 0.5) ninety_index = np.where(percents[i] < 0.9) ninety_five_index = np.where(percents[i] < 0.95) if(len(five_index[0]) == 0): five_b48[i] = 0 else: five_b48[i] = ranges[five_index[0][-1]] if(len(ten_index[0]) == 0): ten_b48[i] = 0 else: ten_b48[i] = ranges[ten_index[0][-1]] if(len(median_index[0]) == 0): median_b48[i] = 0 else: median_b48[i] = ranges[median_index[0][-1]] if(len(ninety_index[0]) == 0): ninety_b48[i] = 0 else: ninety_b48[i] = ranges[ninety_index[0][-1]] if(len(ninety_five_index[0]) == 0): ninety_five_b48[i] = 0 else: ninety_five_b48[i] = ranges[ninety_five_index[0][-1]] cfad_netcdf.close() cfad_netcdf = Dataset(cfad_netcdf_file_path_monsoon48, mode='r') cfadm48 = cfad_netcdf.variables['cfad'][:] zm48 = cfad_netcdf.variables['z'][:] bins_zm48 = cfad_netcdf.variables['bins_z'][:] cfadm14_cum = np.cumsum(cfadb48, axis=1) ranges = np.arange(1, 50, 0.1) cum_fun = interpolate.interp1d(bins_zm48, cfadm48_cum) percents = cum_fun(ranges) one_m48 = np.zeros(len(zm14)) five_m48 = np.zeros(len(zm14)) ten_m48 = np.zeros(len(zm14)) median_m48 = np.zeros(len(zm14)) ninety_m48 = np.zeros(len(zm14)) ninety_five_m48 = np.zeros(len(zm14)) for i in range(0, len(zb14)): five_index = np.where(percents[i] < 0.05) ten_index = np.where(percents[i] < 0.10) median_index = np.where(percents[i] < 0.5) ninety_index = np.where(percents[i] < 0.9) ninety_five_index = np.where(percents[i] < 0.95) if(len(five_index[0]) == 0): five_m48[i] = 0 else: five_m48[i] = ranges[five_index[0][-1]] if(len(ten_index[0]) == 0): ten_m48[i] = 0 else: ten_m48[i] = ranges[ten_index[0][-1]] if(len(median_index[0]) == 0): median_m48[i] = 0 else: median_m48[i] = ranges[median_index[0][-1]] if(len(ninety_index[0]) == 0): ninety_m48[i] = 0 else: ninety_m48[i] = ranges[ninety_index[0][-1]] if(len(ninety_five_index[0]) == 0): ninety_five_m48[i] = 0 else: ninety_five_m48[i] = ranges[ninety_five_index[0][-1]] cfad_netcdf.close() # - # Display statistical coverage product # + Y = z/1e3 X = bins_z cfadb14[cfadb14 < 0.001] = np.nan XX, YY = np.meshgrid(X,Y) plt.figure(figsize=(10,5)) plt.subplot(221) plt.contourf(XX,YY,np.log10(np.squeeze(cfadb14[:,:])), vmin=-2, vmax=1, cmap='Greys') plt.plot(five_b14,z/1e3, linestyle='--', color='k') plt.plot(ten_b14,z/1e3, linewidth=1, color='k') plt.plot(median_b14,z/1e3, linewidth=2, color='k') plt.plot(ninety_b14,z/1e3, linewidth=1, color='k') plt.plot(ninety_five_b14,z/1e3, linestyle='--', color='k') ax = plt.colorbar(ticks=[-3, -2, -1, 0]) plt.clim([-3,0]) ax.ax.set_yticklabels(['0.1%', '1%', '10%','100%']) plt.xlabel('$Z_{c}$ [dBZ]') plt.ylabel('z [km]') plt.title('Break MJO 1-4') plt.subplot(222) Y = zm14/1e3 X = bins_zm14 cfadm14[cfadm14 < 0.001] = np.nan XX, YY = np.meshgrid(X,Y) plt.contourf(XX,YY,np.log10(np.squeeze(cfadm14[:,:])), vmin=-2, vmax=1, cmap='Greys') plt.plot(five_m14,z/1e3, linestyle='--', color='k') plt.plot(ten_m14,z/1e3, linewidth=1, color='k') plt.plot(median_m14,z/1e3, linewidth=2, color='k') plt.plot(ninety_m14,z/1e3, linewidth=1, color='k') plt.plot(ninety_five_m14,z/1e3, linestyle='--', color='k') ax = plt.colorbar(ticks=[-3, -2, -1, 0, 1]) plt.clim([-3,0]) ax.ax.set_yticklabels(['0.1%', '1%', '10%','100%']) plt.xlabel('$Z_{c}$ [dBZ]') plt.ylabel('z [km]') plt.title('Monsoon MJO 1-4') Y = z/1e3 X = bins_z cfadb48[cfadb48 < 0.001] = np.nan XX, YY = np.meshgrid(X,Y) plt.figure(figsize=(10,5)) plt.subplot(223) plt.contourf(XX,YY,np.log10(np.squeeze(cfadb48[:,:])), vmin=-2, vmax=1, cmap='Greys') plt.plot(five_b48,z/1e3, linestyle='--', color='k') plt.plot(ten_b48,z/1e3, linewidth=1, color='k') plt.plot(median_b48,z/1e3, linewidth=2, color='k') plt.plot(ninety_b48,z/1e3, linewidth=1, color='k') plt.plot(ninety_five_b48,z/1e3, linestyle='--', color='k') ax = plt.colorbar(ticks=[-3, -2, -1, 0]) plt.clim([-3,0]) ax.ax.set_yticklabels(['0.1%', '1%', '10%','100%']) plt.xlabel('$Z_{c}$ [dBZ]') plt.ylabel('z [km]') plt.title('Break MJO 5-8') plt.subplot(224) Y = zm/1e3 X = bins_zm cfadm48[cfadm48 < 0.001] = np.nan XX, YY = np.meshgrid(X,Y) plt.contourf(XX,YY,np.log10(np.squeeze(cfadm48[:,:])), vmin=-2, vmax=1, cmap='Greys') plt.plot(five_m48,z/1e3, linestyle='--', color='k') plt.plot(ten_m48,z/1e3, linewidth=1, color='k') plt.plot(median_m48,z/1e3, linewidth=2, color='k') plt.plot(ninety_m48,z/1e3, linewidth=1, color='k') plt.plot(ninety_five_m48,z/1e3, linestyle='--', color='k') ax = plt.colorbar(ticks=[-3, -2, -1, 0, 1]) plt.clim([-3,0]) ax.ax.set_yticklabels(['0.1%', '1%', '10%','100%']) plt.xlabel('$Z_{c}$ [dBZ]') plt.ylabel('z [km]') plt.title('Monsoon MJO 5-8')
notebooks/CFAD-DrosMJO.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <table align="left"> # <td> # <a target="_blank" href="https://colab.research.google.com/github/gimseng/99-ML-Learning-Projects/blob/master/002/solution/Linear-regression.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # </table> # # Hi - Welcome to the Linear Regression excercise # # ## The first half of this notebook is meant for data preprocessing, it's not require but heavily encouraged to go over them and understand what is going on. # # ## The main task of the assignment is in the second half of the notebook # # ### Run cells below which import all required libraries import numpy as np import pandas as pd import sklearn, sklearn.model_selection # ### The cell below retreives the data and splits it into train_x, train_y, valid_x, valid_y, test_x, test_y # + # pick how big the validation/test portion of the data is, currently set to 20% validation, 20% test, and 60% train valid_size = 0.2 test_size = 0.2 # load the data from the .csv file github_url = 'https://raw.githubusercontent.com/gimseng/99-ML-Learning-Projects/' data_source = 'master/002/data/Housing_Prices.csv' data = pd.read_csv(github_url+data_source) def train_valid_test_split(data, valid_size, test_size): # split into train and test train, test = sklearn.model_selection.train_test_split(data, test_size=test_size) # further split train into train and validation. (valid_size needs to be recalculated to properly split train) valid_size = valid_size/(1-test_size) train, valid = sklearn.model_selection.train_test_split(train, test_size=valid_size) return train, valid, test train, valid, test = train_valid_test_split(data, valid_size, test_size) train_x, train_y = train.iloc[:, :-1], train.iloc[:, -1:] valid_x, valid_y = valid.iloc[:, :-1], valid.iloc[:, -1:] test_x, test_y = test.iloc[:, :-1], test.iloc[:, -1:] print(f'train size: {len(train_x)}, valid size: {len(valid_x)}, test size: {len(test_x)}') # - # ### The cell below scales the numeric features using a min_max_scaler (scales them between 0 and 1) numeric_columns = train_x.select_dtypes(include='number').columns # scaler = sklearn.preprocessing.MinMaxScaler() # scaler = sklearn.preprocessing.KBinsDiscretizer(n_bins=10, encode='ordinal') scaler = sklearn.preprocessing.StandardScaler() # train on concatenation of train and validation scaler.fit(pd.concat((train_x[numeric_columns], valid_x[numeric_columns]))) # apply on all data train_x[numeric_columns] = scaler.transform(train_x[numeric_columns]) valid_x[numeric_columns] = scaler.transform(valid_x[numeric_columns]) test_x[numeric_columns] = scaler.transform(test_x[numeric_columns]) # ### The cell below one-hot encodes the data # + def get_dummies_column_keys(dataFrames): '''returns a column list of get dummies for concatenated dataframes''' return pd.get_dummies(pd.concat(dataFrames)).columns.tolist() def get_dummies_using_keys(dataFrame, column_keys): '''returns get dummies result with columns matching column_keys''' result = pd.get_dummies(dataFrame) result = result.reindex(columns=column_keys).fillna(0.00) return result # get the keys for the concatenation of all datasets column_keys = get_dummies_column_keys((train_x, valid_x, test_x)) train_x = get_dummies_using_keys(train_x, column_keys) valid_x = get_dummies_using_keys(valid_x, column_keys) test_x = get_dummies_using_keys(test_x, column_keys) # - # ### The cell below appends a '1' to all rows by adding a 'Bias' feature with the value of 1 for all rows<br/> This is to simulate a bias without adding any bias functionality to the Linear Regression algorithm train_x['Bias'] = 1 valid_x['Bias'] = 1 test_x['Bias'] = 1 # ### This last cell gets the numpy arrays that we will work on from the panda dataframes # + # store the dataframes in df_* variables df_train_x, df_train_y = train_x, train_y df_valid_x, df_valid_y = valid_x, valid_y df_test_x, df_test_y = test_x, test_y # store the numpy arrays in the regular variables train_x, train_y = train_x.values, train_y.values valid_x, valid_y = valid_x.values, valid_y.values test_x, test_y = test_x.values, test_y.values # - # # Proceed with any data analysis in the next cell # # ## (Optional but encouraged) # # This is the main task of the excercise # # ## You need to implement the following functions which <br/> implement Linear Regression and all it's required functions # ### The first major function is the 'predict' function <br/> Remember the Linear Regression equation is # $$\hat{Y} = w_0 + w_1 x_1 + w_2 x_2 + ... + w_m x_m$$ # # ### hint: try to implement a 'vectorized' version by using a matrix multiplication<br/> which will massively increase performance # + def predict(w, x): ''' Return the linear regression prediction from the weights and input features Args: w: The weight vector, shape: (features, 1) x: The input data, shape: (num_samples, features) Returns: The prediction of the model, shape: (num_samples, 1) ''' ### YOUR CODE HERE # raise NotImplementedError return x @ w def init_weights(x): ''' Initialize the weights vector, the data is passed in to know how many weights you need Args: x: The input data, shape: (num_samples, features) Returns: The initial weight vector of zeros, shape: (features, 1) ''' ### YOUR CODE HERE # raise NotImplementedError return np.zeros((x.shape[1], 1)) # - # ### Next you have to implement the Mean Squared Error Loss calculation following the following formula # $$LOSS(W)=\frac{1}{2n} \sum_{i=1}^{n} (\hat{y}_i-y_i)^2$$ def loss(y_hat, y): ''' Calculate the Mean Squared Error based on the y_hat (the predicted value) and y (the true values) Args: y_hat: the predicted values of the input, shape: (num_samples, 1) y: the true values of the input, shape: (num_samples, 1) Returns: Mean Squared Error, shape: Scaler ''' ### YOUR CODE HERE # raise NotImplementedError n = y.shape[0] delta = y_hat - y return ((delta.T @ delta) / (2*n)) # ### Next you have to implement the gradient calculation for the weights accoarding to the formula # # $$\frac{\partial LOSS(W)}{\partial W_j} # = \frac{\partial}{\partial W_j} \frac{1}{2n} \sum_{i=1}^{n} (\hat{y}_i-y_i)^2 # = \frac{1}{2} \sum_{i=1}^{n} ((\hat{y}_i-y_i)*x_{i,j})$$ # + def get_gradient(w, x, y_hat, y): ''' Get the gradient of the weights using the parameters passed (Note: not all parameters passed have to be used) Args: w: The weight vector, shape: (features, 1) x: The input data, shape: (num_samples, features) y_hat: the predicted values of the input, shape: (num_samples, 1) y: the true values of the input, shape: (num_samples, 1) Returns: The gradients of the weight vector, shape: (features, 1) ''' ### YOUR CODE HERE # raise NotImplementedError n = y.shape[0] delta = y_hat - y return (x.T @ delta) / n # - # ### Next you have to implement the gradient descent update for the weights accoarding to the formula # # $$w_j:=w_j - \alpha * \frac{\partial LOSS(W)}{\partial W_j} $$ def get_updated_weights(w, w_gradients, learning_rate): ''' Calculate the new value of the weights after applying the gradient descent weight update rule Args: w: The weight vector, shape: (features, 1) w_gradients: The gradients of the weight vector, shape: (features, 1) learning_rate: The learning rate of the algorithm, shape: Scaler Returns: The updated value of the weights, shape: (features, 1) ''' ### YOUR CODE HERE # raise NotImplementedError return w - learning_rate * w_gradients # ### Finally implement the training loop, this should simply be<br/> a loop that calls the functions you implemented above def train(epochs, x, y, learning_rate=1e-3, initial_weights=None): ''' The main train loop for the algorithm. This performs the gradient update step 'epochs' times. Args: x: The input data, shape: (num_samples, features) y: the true values of the input, shape: (num_samples, 1) learning_rate: The learning rate of the algorithm, shape: Scaler initial_weights: The initial weight to start training, this should be passed to continue training Returns: The final weight values after applying 'epochs' number of updates on 'initial_weights', shape: (features, 1) ''' if initial_weights is None: weight = init_weights(x) else: weight = initial_weights for epoch in range(epochs): ### YOUR CODE HERE # raise NotImplementedError y_hat = predict(weight, train_x) w_grad = get_gradient(weight, train_x, y_hat, train_y) weight = get_updated_weights(weight, w_grad, learning_rate) return weight # ### The cell below will run your code # # ### After 200,000 epochs our results were the following: # ### Train loss: 2.39e+08<br/> Valid loss: 3.65e+08<br/> Test loss: 3.60+08 # %%time w = None for i in range(20): w = train(10000, train_x, train_y, learning_rate=5e-3, initial_weights=w) print('epoch:', (i+1)*10000, end=' ') print('Train loss:', loss(predict(w, train_x), train_y), end=' ') print('Valid loss:', loss(predict(w, valid_x), valid_y)) print('Final Model -- Test loss:', loss(predict(w, test_x), test_y))
002/solution/Linear-regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.6 64-bit # name: python386jvsc74a57bd0cbed48eac648e81ccf1c681dd9326aaada9743655527812ef1d08301103ee039 # --- # ## 图像金字塔 # # 图像金字塔简单来说就是**用多个不同的尺寸来表示一张图片**。 # 如下图,最左边的图片是原始图片,然后**从左向右图片的尺寸依次缩小直到图片的尺寸达到一个阈值**,这个阈值就是多次缩小图片的最小尺寸,不会有比这更小尺寸的图片了,像这种图片的尺寸逐步递增或递减的多张图层就是图像金字塔,每张不同尺寸的图片都称为图像金字塔的一层。 # 图像金字塔的目的就是寻找图片中出现的不同尺寸的目标(物体、动物等)。 # # ![](../dataset/2_pyramid/图像金字塔演示.jpg) # # **作用:** 因为滑动窗口的窗口大小是固定的,因此我们需要通过改变图像的大小,来使得窗口可以框住需要的物体。 import cv2 from matplotlib import pyplot as plt from IPython import display # %matplotlib inline def pyramid(image, top = (128, 128), ratio = 1.2): """生成图像金字塔的图片 Args: image ([type]): 要进行图像金字塔操作的原始图片 top (tuple, optional): 是图像将会被缩小的最小尺寸,我们将这个参数设置一个默认值为 (128, 128),第一个 128 表示图片的高,第二个 128 表示图片的宽. Defaults to (128, 128). ratio (float, optional): 表示每次图像将会被缩小 ratio 倍,我们给这个参数设置了一个默认值为 1.2. Defaults to 1.2. Yields: [type]: [description] """ yield image # 返回原始的图像 while True: (w, h) = (int(image.shape[1] / ratio), int(image.shape[0] / ratio)) # 新的图像的 宽度 和 长度 image = cv2.resize(image, (w, h), interpolation = cv2.INTER_AREA) # 调整图像的大小 if w < top[1] or h < top[0]: # 判断图像是否到达最小尺寸 break yield image # 使用一张图像进行测试 image = cv2.imread("../dataset/2_pyramid/pets.jpg") # 读取样例图像 plt.imshow(image[:,:,::-1]) # 图像显示 for i in pyramid(image, ratio = 1.5): i = i[:,:,::-1] # 展示图像, 查看图片的坐标轴在变小 plt.imshow(i) plt.pause(0.3) display.clear_output(wait=True) # ## 图像金字塔结合滑动窗口 # 在传统的目标检测方法中,使用**图像金字塔**和**滑动窗口**相结合的方式来检测出图片中不同位置和不同尺寸的目标。 # 使用滑动窗口的方法时,在图片上滑动的**矩形框尺寸是固定的**,这就导致了如果目标的尺寸相对于矩形框太大或太小都会导致我们无法检测到目标。 # # 这种情况下,我们可以在图像金字塔的每层图片上进行滑动窗口的操作来解决这个问题。 # 如下图左边的图片,狗并不能完全被矩形框包围,矩形框只能包住狗的部分面部区域; # 下图右边的图片,通过运用图像金字塔和滑动窗口相结合的方法,**矩形框的尺寸没有变化,但是在经过缩小后的图片中狗完全被矩形框包裹住了**。 # # ![](../dataset/2_pyramid/图像金字塔与滑动窗口例子.jpg) def sliding_window(image, window, step): """用于获取滑动窗口的坐标 Args: image ([type]): 输入的图像 window ([type]): 滑动窗口的大小, 输入是一组 tuple, 例如 (window_w, window_h) step ([type]): 滑动窗口每次移动的步长 Yields: [type]: [description] """ for y in range(0, image.shape[0] - window[1], step): # 控制框上下移动 for x in range(0, image.shape[1] - window[0], step): # 控制框左右移动 yield (x, y, image[y:y + window[1], x:x + window[0]]) # 返回框的左上角坐标和图像的截取部分 # + # 结合方式: 图像金字塔返回一个图像, 使用滑动窗口进行绘制矩形 (window_w, window_h) = (128, 128) # 滑动窗口的大小 for i in pyramid(image, ratio = 1.5): # 返回一个大小的图像 for (x, y, window) in sliding_window(i, (window_w, window_h), 100): if window.shape[0] != window_w or window.shape[1] != window_h: continue clone = i.copy() cv2.rectangle(clone, (x, y), (x + window_w, y + window_h), (0, 255, 0), 2) clone = clone[:,:,::-1] plt.imshow(clone) plt.pause(0.3) display.clear_output(wait=True) # - # 下图就是我们运行脚本后得到的部分图片。可以看到我们在图像金字塔的每一层都使用滑动窗口,虽然矩形框的尺寸保持不变,但是随着图片地不断缩小,矩形框逐渐包裹住目标。 # #
2021_07_12/2_图像金字塔/2_图像金字塔.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Hyperparameter Tuning using HyperDrive # # TODO: Import Dependencies. In the cell below, import all the dependencies that you will need to complete the project. # + gather={"logged": 1610297817568} from azureml.core import Workspace, Experiment, Dataset, Datastore, Model from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException from azureml.widgets import RunDetails from azureml.train.sklearn import SKLearn from azureml.train.hyperdrive.run import PrimaryMetricGoal from azureml.train.hyperdrive.policy import BanditPolicy from azureml.train.hyperdrive.sampling import GridParameterSampling, RandomParameterSampling from azureml.train.hyperdrive.runconfig import HyperDriveConfig from azureml.train.hyperdrive.parameter_expressions import choice, uniform #from azureml.core.environment import Environment #import pickle import numpy as np import pandas as pd import json import requests # + gather={"logged": 1610296102795} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} ws = Workspace.from_config() experiment_name = 'house-price' experiment1=Experiment(ws, experiment_name) # + gather={"logged": 1610296109738} if "training" not in os.listdir(): os.mkdir("./training") if "hyper" not in os.listdir(): os.mkdir("./hyper") # - # ## Create or use a compute target # + gather={"logged": 1610297829257} compute_name = "nuria-p3" try: compute_target = ComputeTarget(workspace=ws, name=compute_name) print('Using existing compute target.') except ComputeTargetException: print('Creating compute target.') cluster_type = 1 provisioning_config = AmlCompute.provisioning_configuration(vm_size='Standard_D12_V2', min_nodes=1, max_nodes=5) compute_target = ComputeTarget.create(ws, compute_name, provisioning_config) compute_target.wait_for_completion(show_output=True) # - # ## Dataset # + gather={"logged": 1610297853244} # load data and create datasets for training and testing from sklearn.datasets import fetch_california_housing from train import create_datasets x,y = fetch_california_housing(return_X_y=True) x_train,x_test,y_train,y_test = create_datasets(x,y) # + #datastore=ws.get_default_datastore() # data = pd.concat([x_train,y_train], axis=1) # + [markdown] gather={"logged": 1598531923519} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} # ## Hyperdrive Configuration # # In order to decide which model to use for the hyperparameter tuning section, I fitted several models in the [notebook](sklearn_california_dataset.ipynb) mentioned before. From those models I concluded that the best ones were a vanila OLS and, slightly better, a Stochastic Gradient Descent Regressor. This last one was the chosen one for this project. # # Among the hyperparameters that I did not change were the loss function, left as the default `squared_loss` that recover a standard OLS; the maximum number of iterations, which I set very high but with early stopping enabled to prevent a failure to converge; the learning rate schedule, left as the default `invscaling`; and the penalty, which I chose to be `elasticnet`. The parameters tuned were: # # * **alpha**: Multiplier of the regularization term. The higher the value, the stronger the regularization. I used a uniform distribution in the range (0.0001, 0.01) to cover both the documentation default value and the best result I got in my preparing work. # * **l1_ratio**: The Elastic Net mixing parameter. ``l1_ratio=0`` corresponds to L2 penalty, `l1_ratio=1` to L1. I used a uniform distribution in the range (0, 1) to cover all posibilities. # * **eta0**: The initial learning rate. I used a uniform distribution in the range (0.1, 0.9) based on my previous tests. # * **power_t**: The exponent for inverse scaling learning rate. I used a unifrom distribution in the range (0.01, 0.99) also as the result of my previous tests. # # For the early termination policy, I chose it to evaluate every iteration after the fifth one, and a slack factor of 0.2, which is the percentage distance allowed with respect to the best performing run, any child run falling outside that range will be terminated. This is used to save resources from being wasted on runs that are not performing well. # # I chose a maximum of 50 runs to be able to complete the entire project in the time allowed in the lab. And I used four maximum concurrent runs because they need to be less than the number of nodes in the compute cluster. # + gather={"logged": 1610297862059} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} # TODO: Create an early termination policy. This is not required if you are using Bayesian sampling. early_termination_policy = BanditPolicy(slack_factor = 0.2, evaluation_interval = 1, delay_evaluation = 5) # + gather={"logged": 1610298711638} ## for logistic param_sampling = RandomParameterSampling({"alpha": uniform(0.0001,0.01), "l1_ratio": uniform(0,1), "eta0": uniform(0.1,0.9), "power_t": uniform(0.01,0.99)}) # + gather={"logged": 1610298717753} #TODO: Create your estimator and hyperdrive config estimator = SKLearn(source_directory='.', compute_target=compute_name, entry_script='./train.py') # + gather={"logged": 1610298794810} hyperdrive_run_config = HyperDriveConfig(estimator=estimator, hyperparameter_sampling=param_sampling, policy=early_termination_policy, primary_metric_name="Accuracy", primary_metric_goal=PrimaryMetricGoal.MAXIMIZE, max_total_runs=50, max_concurrent_runs=4) # + gather={"logged": 1610298806046} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} #TODO: Submit your experiment hyperdrive_run = experiment1.submit(hyperdrive_run_config) # + [markdown] gather={"logged": 1598544898497} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} # ## Run Details # + gather={"logged": 1598546648408} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} RunDetails(hyperdrive_run).show() hyperdrive_run.wait_for_completion(show_output=False) # - # ## Best Model # + import joblib best_run = hyperdrive_run.get_best_run_by_primary_metric() best_run_metrics = best_run.get_metrics() parameters = best_run.get_details()['runDefinition']['arguments'] joblib.dump(parameters, filename='hyper/best-hyperdrive.joblib') print('Best Run Id: ', best_run.id) print('\n R2:', best_run_metrics['Accuracy']) # - hyperdrive_run.get_children_sorted_by_primary_metric() result = hyperdrive_run.get_metrics() accuracy = [] alpha = [] l1_ratio = [] eta0 = [] power_t = [] iteration = list(result.keys()) colum = ['Iteration','R2 Score','Alpha','L1 Ratio','Eta0','Power t'] for i in range(len(result)): accuracy.append(result[iteration[i]]['Accuracy']) alpha.append(result[iteration[i]]['Alpha']) l1_ratio.append(result[iteration[i]]['L1 Ratio']) eta0.append(result[iteration[i]]['Eta0']) power_t.append(result[iteration[i]]['Power t']) out = pd.DataFrame(np.column_stack((iteration, accuracy,alpha,l1_ratio,eta0,power_t)),columns=colum) out_sorted = out.sort_values(by=['R2 Score'], ascending=False, inplace=True, kind='quicksort', na_position='last') out.head(10) # + gather={"logged": 1598546657829} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} #TODO: Save the best model joblib.dump(parameters, filename='hyper/best-hyperdrive.joblib') # - # Register the model #model = best_run.register_model(model_name='best-hyperdrive', model_path='hyper/best-hyperdrive.joblib') model = best_run.register_model(model_name='best-hyperdrive', model_path='.') # ## Since the AutoML experiment was able to produce a better model, I will not deploy this one. This Exercise ends here. # # Note that this is not a very good model. The best $r^2$ reached was around 0.61, while the correlation between median house value and median income of the block is 0.69 #
capstoneProject/hyperparameter_tuning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Classify different data sets # ### Basic includes # + # Using pandas to load the csv file import pandas as pd import numpy as np import matplotlib.pyplot as plt from keras import models from keras import layers from keras import callbacks from keras.utils import to_categorical # reuters and fashin mnist data set from keras from keras.datasets import reuters from keras.datasets import fashion_mnist # needed to preprocess text from keras.preprocessing.text import Tokenizer # - # ### Classify the Fashion Mnist # # --- # + (fashion_train_data, fashion_train_labels), (fashion_test_data, fashion_test_labels) = fashion_mnist.load_data() print(fashion_train_labels) print(fashion_test_data.shape) test_index = 10 plt.title("Label: " + str(fashion_train_labels[test_index])) plt.imshow(fashion_train_data[test_index], cmap="gray") # - # #### TO DO: Preprocess the data # # 1. Normalize the input data set # 2. Perform one hot encoding # 3. Create a train, test, and validation set # + #Normalize train images train_images1 = fashion_train_data.reshape((60000,28*28)) train_images1 = train_images1.astype('float32') / 255 #One hot encoding for train labels train_labels1 = to_categorical(fashion_train_labels) #Normalize test images test_images1 = fashion_test_data.reshape((10000,28*28)) test_images1 = test_images1.astype('float32') / 255 #One hot encoding for test labels test_labels1 = to_categorical(fashion_test_labels) # - # #### TO DO: Define and train a network, then plot the accuracy of the training, validation, and testing # # 1. Use a validation set # 2. Propose and train a network # 3. Print the history of the training # 4. Evaluate with a test set # + #Define model num_classes_1 = 10 model = models.Sequential() #First layer of 784 model.add(layers.Dense(784, activation='relu', input_shape=(28*28,))) # Added dropout between the input and first hidden layer model.add(layers.Dropout(0.5)) #First hidden layer model.add(layers.Dense(355, activation='relu')) model.add(layers.Dropout(0.3)) #Second hidden layer model.add(layers.Dense(240, activation='relu')) model.add(layers.Dropout(0.3)) #Output layer model.add(layers.Dense(num_classes_1, activation='softmax')) model.summary() # Included the early stopping which monitors the validation loss early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=5) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # - #Train model history1 = model.fit(train_images1, train_labels1, batch_size=512, epochs=40, validation_split=0.2, callbacks=[early_stop], verbose=2) # + #Evaluate results_1 = model.evaluate(test_images1, test_labels1) print(results_1) # - history_dict1 = history1.history print(history_dict1.keys()) # + # Get values from history acc_1 = history_dict1['acc'] val_acc_1 = history_dict1['val_acc'] loss_1 = history_dict1['loss'] val_loss_1 = history_dict1['val_loss'] epochs = range(1, len(acc_1) + 1) # + #For plot # Plot of the validation and training loss f, (ax1, ax2)=plt.subplots(1,2,figsize=(20,10)) # "bo" is for "blue dot" ax1.plot(epochs, loss_1, 'bo', label='Training loss') # b is for "solid blue line" ax1.plot(epochs, val_loss_1, 'b', label='Validation loss') ax1.set_title('Training and validation loss') ax1.set_xlabel('Epochs') ax1.set_ylabel('Loss') ax1.legend() ax2.plot(epochs, acc_1, 'bo', label='Training acc') # b is for "solid blue line" ax2.plot(epochs, val_acc_1, 'b', label='Validation acc') ax2.set_title('Training and validation acc') ax2.set_xlabel('Epochs') ax2.set_ylabel('Acc') ax2.legend() plt.show() # - # ### Explanation Fashion Mnist # # For Fashion Mnist dataset I reshaped all the dataset to (60000,28*28) and then divide it by 255 so the sataset remains in values between 0 and 1. After that, I created an architecture of the input layer with 784 neurons, a dropout of 0.5 to prevent overfitting ,2 hidden layers: one with 355 and the second with 240 both of them with a dropout value of 0.3. The output layer is the 10 classifications the dataset has. All layers, except the last one, have activation relu and the last one has softmax activation. The model is compiled with loss categorical_crossentropy, optimizer rmsprop and accuracy metric; it also has an early stop that monitors 'val_loss' and has a patience value of 5. It trains with 48000 images and validates with 12000 (validation split of 0.2) and test with 10000 images, and when it is trained it uses a batch size of 512 # ## Classifying newswires # # --- # # Build a network to classify Reuters newswires into 46 different mutually-exclusive topics. # ### Load and review the data # + (reuters_train_data, reuters_train_labels),(reuters_test_data, reuters_test_labels) = reuters.load_data(num_words=10000) print(reuters_train_data.shape) print(reuters_train_labels.shape) print(reuters_train_data[0]) print(reuters_train_labels[0]) print(set(reuters_train_labels)) # - # Load the word index to decode the train data. # + word_index = reuters.get_word_index() reverse_index = dict([(value+3, key) for (key, value) in word_index.items()]) reverse_index[0] = "<PAD>" reverse_index[1] = "<START>" reverse_index[2] = "<UNKNOWN>" # unknown reverse_index[3] = "<UNUSED>" decoded_review = ' '.join([reverse_index.get(i,'?') for i in reuters_train_data[0]]) print(decoded_review) # - # #### TO DO: Preprocess the data # # 1. Normalize the input data set # 2. Perform one hot encoding # 3. Create a train, test, and validation set # + num_classes_model2 = max(reuters_train_labels)+1 tokenizer = Tokenizer(num_words=10000) train_data_token = tokenizer.sequences_to_matrix(reuters_train_data, mode='binary') test_data_token = tokenizer.sequences_to_matrix(reuters_test_data, mode='binary') # One-hot encoding the output one_hot_train_labels = to_categorical(reuters_train_labels,num_classes_model2) one_hot_test_labels = to_categorical(reuters_test_labels,num_classes_model2) # - # #### TO DO: Define and train a network, then plot the accuracy of the training, validation, and testing # # 1. Use a validation set # 2. Propose and train a network # 3. Print the history of the training # 4. Evaluate with a test set # + model1 = models.Sequential() model1.add(layers.Dense(512, activation='relu', input_shape=(10000,))) model1.add(layers.Dropout(0.5)) #model1.add(layers.Dense(64, activation='relu')) #model1.add(layers.Dropout(0.5)) #model1.add(layers.Dense(64, activation='relu')) #model1.add(layers.Dropout(0.5)) #model1.add(layers.Dense(64, activation='relu')) #model1.add(layers.Dropout(0.5)) model1.add(layers.Dense(num_classes_model2, activation='softmax')) model1.summary() # included the early stopping which monitors the validation loss early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2) model1.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # - history = model1.fit(train_data_token, one_hot_train_labels, batch_size=128, epochs=5, validation_split=0.2, callbacks=[early_stop], verbose=2) # + results = model1.evaluate(test_data_token, one_hot_test_labels) print(results) # - history_dict = history.history print(history_dict.keys()) # + acc_2 = history_dict['acc'] val_acc_2 = history_dict['val_acc'] loss_2 = history_dict['loss'] val_loss_2 = history_dict['val_loss'] epochs = range(1, len(acc_2) + 1) # + #For plot # Plot of the validation and training loss f, (ax1, ax2)=plt.subplots(1,2,figsize=(20,10)) # "bo" is for "blue dot" ax1.plot(epochs, loss_2, 'bo', label='Training loss') # b is for "solid blue line" ax1.plot(epochs, val_loss_2, 'b', label='Validation loss') ax1.set_title('Training and validation loss') ax1.set_xlabel('Epochs') ax1.set_ylabel('Loss') ax1.legend() ax2.plot(epochs, acc_2, 'bo', label='Training acc') # b is for "solid blue line" ax2.plot(epochs, val_acc_2, 'b', label='Validation acc') ax2.set_title('Training and validation acc') ax2.set_xlabel('Epochs') ax2.set_ylabel('Acc') ax2.legend() plt.show() # - # ### Explanation Reuters # # For Reuters dataset I selected the 10000 most used words, and apply to the train and test data the tokenizer.sequences_to_matrix method with the mode "binary" and then perform one hot encoding to the labels. After that, I created an architecture of the input layer with 512 neurons, a dropout of 0.5 to prevent overfitting and the output layer is the 46 classifications the dataset has. All layers, except the last one, have activation relu and the last one has softmax activation. The model is compiled with loss categorical_crossentropy, optimizer rmsprop and accuracy metric; it also has an early stop that monitors 'val_loss' and has a patience value of 5. It trains with 7185 images and validates with 1797 (validation split of 0.2) and test with 2246 images. This dataset was tested with many layer configuration: with two or more hidden layers with a number of neurons greater that the output layer but this configuration decreases the accuracy value, so that was discarded. After that I tried different optimizers such as adamax, SGD and adam but none of them improved the accuracy, rmsprop was the most stable and accurate in the results. Also I changed from mode "binary" to "freq" in the tokenizer.sequences_to_matrix, and romoved the early stop with 100 epochs in which I obtained a 81% of accuracy but with a overfit in the model. So I returned to "binary", add the early stop and reduce the epochs to 50 where I obtained between 79% and 80% of accuracy, but avoiding overfitting its possible by doing about 3 epochs. # ## Predicting Student Admissions # # --- # # Predict student admissions based on three pieces of data: # # - GRE Scores # - GPA Scores # - Class rank # ### Load and visualize the data student_data = pd.read_csv("data/student_data.csv") #print(student_data) # Plot of the GRE and the GPA from the data. X = np.array(student_data[["gre","gpa"]]) y = np.array(student_data["admit"]) admitted = X[np.argwhere(y==1)] rejected = X[np.argwhere(y==0)] plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'red', edgecolor = 'k',label='Rejected') plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'cyan', edgecolor = 'k', label='Admitted') plt.xlabel('Test (GRE)') plt.ylabel('Grades (GPA)') plt.legend() plt.show() # Plot of the data by class rank. # + f, plots = plt.subplots(2, 2, figsize=(20,10)) plots = [plot for sublist in plots for plot in sublist] for idx, plot in enumerate(plots): data_rank = student_data[student_data["rank"]==idx+1] plot.set_title("Rank " + str(idx+1)) X = np.array(data_rank[["gre","gpa"]]) y = np.array(data_rank["admit"]) admitted = X[np.argwhere(y==1)] rejected = X[np.argwhere(y==0)] plot.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'red', edgecolor = 'k') plot.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'cyan', edgecolor = 'k') plot.set_xlabel('Test (GRE)') plot.set_ylabel('Grades (GPA)') # - # #### TO DO: Preprocess the data # # 1. Normalize the input data set # 2. Perform one hot encoding # 3. Create a train, test, and validation set # + maxGRE = max(np.array(student_data["gre"])) maxGPA = max(np.array(student_data["gpa"])) num_class_3 = 2 student_data=student_data.fillna(0) preprocessed_data = pd.concat([student_data,pd.get_dummies(student_data["rank"], prefix='rank')],axis=1) preprocessed_data = preprocessed_data.drop("rank",axis=1) processed_data = preprocessed_data[:] processed_data["gre"] = processed_data["gre"] / maxGRE processed_data["gpa"] = processed_data["gpa"] / maxGPA sample = np.random.choice(processed_data.index, size=int(len(processed_data)*0.9), replace=False) train_data, test_data = processed_data.iloc[sample], processed_data.drop(sample) print("Number of training samples is", len(train_data)) print("Number of testing samples is", len(test_data)) features = train_data.drop('admit', axis=1) features_test = test_data.drop('admit', axis=1) targets = to_categorical(train_data['admit'],num_class_3) targets_test = to_categorical(test_data['admit'],num_class_3) print(features[:10]) print(targets[:10]) print(features.shape) print(features_test.shape) # - # #### TO DO: Define and train a network, then plot the accuracy of the training, validation, and testing # # 1. Use a validation set # 2. Propose and train a network # 3. Print the history of the training # 4. Evaluate with a test set # + model2 = models.Sequential() model2.add(layers.Dense(128,activation='relu', input_shape=(features.shape[1],))) model2.add(layers.Dropout(0.5)) model2.add(layers.Dense(64,activation='relu')) model2.add(layers.Dropout(0.5)) model2.add(layers.Dense(num_class_3, activation='softmax')) model2.summary() # included the early stopping which monitors the validation loss early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=10) model2.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # - history = model2.fit(features, targets, batch_size=16, epochs=100, validation_split=0.2, callbacks=[early_stop], verbose=2) # + results = model2.evaluate(features_test, targets_test) print(results) # - history_dict = history.history print(history_dict.keys()) # + acc = history_dict['acc'] val_acc = history_dict['val_acc'] loss = history_dict['loss'] val_loss = history_dict['val_loss'] epochs = range(1, len(acc) + 1) # + #For plot # Plot of the validation and training loss f, (ax1, ax2)=plt.subplots(1,2,figsize=(20,10)) # "bo" is for "blue dot" ax1.plot(epochs, loss, 'bo', label='Training loss') # b is for "solid blue line" ax1.plot(epochs, val_loss, 'b', label='Validation loss') ax1.set_title('Training and validation loss') ax1.set_xlabel('Epochs') ax1.set_ylabel('Loss') ax1.legend() ax2.plot(epochs, acc, 'bo', label='Training acc') # b is for "solid blue line" ax2.plot(epochs, val_acc, 'b', label='Validation acc') ax2.set_title('Training and validation acc') ax2.set_xlabel('Epochs') ax2.set_ylabel('Acc') ax2.legend() plt.show() # - # ### Explanation Student Admissions # # For this Students Admission dataset first I analyzed the data in order to know in which way I could normalize it. After that I did a encoding of the ranks of the students, then I normalize the GPA and GRE elements dividing each one by the highest number of their column so all will be between 0 and 1. Then I shuffle the data and prepared a 90% train and 10% test data, I separated the labels using the admit column and then apply one hot encoding to them. After that, I created an architecture of the input layer with 512 neurons, a dropout of 0.5 to prevent overfitting and an output layer of 2 classifications the dataset has. All layers, except the last one, have activation relu and the last one has softmax activation. The model is compiled with loss categorical_crossentropy, optimizer rmsprop and accuracy metric; it also has an early stop that monitors 'val_loss' and has a patience value of 5. This dataset was tested with many layer configuration: with two or more hidden layers with a number of neurons greater that the output layer but this configuration decreases the accuracy value, so that was discarded. Also I change the number of neurons to lower quantity because it give better results of general accuracy but it is has also overfitting. This could be improved with more data.
Keras_assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from gensim.models import LsiModel import pyLDAvis import pyLDAvis.gensim import warnings import pickle import gensim from pprint import pprint from gensim.corpora import Dictionary, MmCorpus from gensim.models.ldamulticore import LdaMulticore from gensim.models import LsiModel from gensim.models.ldamulticore import LdaMulticore from gensim.models.ldamodel import LdaModel import itertools as it import en_core_web_sm import spacy nlp = spacy.load('en') import nltk from nltk.corpus import stopwords from nltk import RegexpTokenizer stopwords = stopwords.words('english') # lemmatizer = nltk.WordNetLemmatizer() # + # # load the finished bag-of-words corpus from disk # trigram_bow_corpus = MmCorpus('../data/models_data_lower/spacy_trigram_bow_corpus_all.mm') # No POS preprocessing # # load the finished dictionary from disk # trigram_dictionary = Dictionary.load('../data/models_data_lower/spacy_trigram_dict_all.dict') # No POS preprocessing # load the finished bag-of-words corpus from disk trigram_bow_corpus_POS = MmCorpus('../data/spacy_trigram_bow_corpus_all_POS.mm') # With POS preprocessing # load the finished dictionary from disk trigram_dictionary_POS = Dictionary.load('../data/spacy_trigram_dict_all_POS.dict') # With POS preprocessing # - # ## Create a new model # + # %%time # Create LDA model with warnings.catch_warnings(): warnings.simplefilter('ignore') # workers => sets the parallelism, and should be # set to your number of physical cores minus one lda_alpha_auto = LdaModel(trigram_bow_corpus_POS, id2word=trigram_dictionary_POS, num_topics=25, alpha='auto', eta='auto') lda_alpha_auto.save('../data/models_data_lower/spacy_lda_model_POS_alpha_eta_auto') # - # load the finished LDA model from disk lda = LdaModel.load('../data/models_data_lower/spacy_lda_model_POS_alpha_eta_auto') topic_names = {1: u'(?)Large Tech Corps (NVIDIA, Splunk, Twitch)', 2: u'Technical Federal Contracting and Cybersecurity', 3: u'Financial Risk and Cybersecurity', 4: u'Web Development (More Frontend)', 5: u'Social Media Marketing', 6: u'Fintech, Accounting, and Investing Analysis/Data', 7: u'(?)Students, Interns, CMS/Marketing, Benefits', 8: u'Health Care (Data Systems)', 9: u'Database Administrator', 10: u'Marketing and Growth Strategy', 11: u'Quality Assurance and Testing', 12: u'Data Science', 13: u'Big Data Engineering', 14: u'Sales', 15: u'(?)Large Tech Corps Chaff: Fiserv, Adove, SAP', 16: u'Flight and Space (Hardware & Software)', 17: u'Networks, Hardware, Linux', 18: u'Supervisor, QA, and Process Improvement', 19: u'Defense Contracting', 20: u'Social Media Advertising Management', 21: u'UX and Design', 22: u'(?)Amazon Engineering/Computing/Robotics/AI', 23: u'Mobile Developer', 24: u'DevOps', 25: u'Payments, Finance, and Blockchain'} # + ## Visualize # - LDAvis_data_filepath = '../models/ldavis_prepared' # load the pre-prepared pyLDAvis data from disk with open(LDAvis_data_filepath, 'rb') as f: LDAvis_prepared = pickle.load(f) pyLDAvis.display(LDAvis_prepared) # + # Original functions from processing step, in modeling.ipynb def punct_space(token): """ helper function to eliminate tokens that are pure punctuation or whitespace """ return token.is_punct or token.is_space def line_review(filename): """ SRG: modified for a list generator function to read in reviews from the file and un-escape the original line breaks in the text """ for review in filename: yield review.replace('\\n', '\n') def lemmatized_sentence_corpus(filename): """ generator function to use spaCy to parse reviews, lemmatize the text, and yield sentences """ for parsed_review in nlp.pipe(line_review(filename), batch_size=10000, n_threads=4): for sent in parsed_review.sents: yield u' '.join([token.lemma_ for token in sent if not punct_space(token)]) # - from gensim.models import Phrases from gensim.models.word2vec import LineSentence # Load up the bigram and trigram models we trained earlier bigram_model = Phrases.load('../models/spacy_bigram_model_all_PARSED_POS') trigram_model = Phrases.load('../models/spacy_trigram_model_all_PARSED_POS') # + trigram_dictionary = trigram_dictionary_POS def vectorize_input(input_doc, bigram_model, trigram_model, trigram_dictionary): """ (1) parse input doc with spaCy, apply text pre-proccessing steps, (3) create a bag-of-words representation (4) create an LDA representation """ # parse the review text with spaCy parsed_doc = nlp(input_doc) # lemmatize the text and remove punctuation and whitespace unigram_doc = [token.lemma_ for token in parsed_doc if not punct_space(token)] # apply the first-order and secord-order phrase models bigram_doc = bigram_model[unigram_doc] trigram_doc = trigram_model[bigram_doc] # remove any remaining stopwords trigram_review = [term for term in trigram_doc if not term in stopwords] # create a bag-of-words representation doc_bow = trigram_dictionary.doc2bow(trigram_doc) # create an LDA representation document_lda = lda[doc_bow] return trigram_review, document_lda def lda_top_topics(document_lda, topic_names, min_topic_freq=0.05): ''' Print a sorted list of the top topics for a given LDA representation ''' # sort with the most highly related topics first sorted_doc_lda = sorted(document_lda, key=lambda review_lda: -review_lda[1]) for topic_number, freq in sorted_doc_lda: if freq < min_topic_freq: break # print the most highly related topic names and frequencies print('*'*56) print('{:50} {:.3f}'.format(topic_names[topic_number+1], round(freq, 3))) print('*'*56) for term, term_freq in lda.show_topic(topic_number, topn=10): print(u'{:20} {:.3f}'.format(term, round(term_freq, 3))) print('\n\n') def top_match_items(document_lda, topic_names, num_terms=100): ''' Print a sorted list of the top topics for a given LDA representation ''' # sort with the most highly related topics first sorted_doc_lda = sorted(document_lda, key=lambda review_lda: -review_lda[1]) topic_number, freq = sorted_doc_lda[0][0], sorted_doc_lda[0][1] print('*'*56) print('{:50} {:.3f}'.format(topic_names[topic_number+1], round(freq, 3))) print('*'*56) for term, term_freq in lda.show_topic(topic_number, topn=num_terms): print(u'{:20} {:.3f}'.format(term, round(term_freq, 3))) def top_match_list(document_lda, topic_names, num_terms=500): # Take the above results and just save to a list of the top 500 terms in the topic sorted_doc_lda = sorted(document_lda, key=lambda review_lda: -review_lda[1]) topic_number, freq = sorted_doc_lda[0][0], sorted_doc_lda[0][1] print('Highest probability topic:', topic_names[topic_number+1],'\t', round(freq, 3)) top_topic_skills = [] for term, term_freq in lda.show_topic(topic_number, topn=num_terms): top_topic_skills.append(term) return top_topic_skills def common_skills(top_topic_skills, user_skills): return [item for item in top_topic_skills if item in user_skills] def non_common_skills(top_topic_skills, user_skills): return [item for item in top_topic_skills if item not in user_skills] # - with open('../data/sample_resume.txt', 'r') as infile: sample1 = infile.read() with open('../data/sample_ds_resume2.txt', 'r') as infile: sample2 = infile.read() def generate_common_skills(input_sample): user_skills, my_lda = vectorize_input(input_sample, bigram_model, trigram_model, trigram_dictionary) # top_items = top_match_items(my_lda, topic_names) # print(top_items) skills_list = top_match_list(my_lda, topic_names, num_terms=500) print("Top 40 skills user has in common with topic:") pprint(common_skills(skills_list, user_skills)[:100]) print("\n\nTop 40 skills user DOES NOT have in common with topic:") pprint(non_common_skills(skills_list, user_skills)[:100]) user_skills, my_lda = vectorize_input(sample1, bigram_model, trigram_model, trigram_dictionary) print(user_skills) generate_common_skills(sample2)
sandbox/data_science/notebooks/tune_hyperparams.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Example 2: Multi-Camera Scene # + import os import json import cv2 import sophus as sp import matplotlib.pyplot as plt import fairotag as frt DATA_DIR = "data/2_multi_camera_scene" NUM_CAMERAS = 3 NUM_SAMPLES = 8 NUM_MARKERS = 17 MARKER_LENGTH = 0.02625 # - # In this example we will calibrate relative poses of three cameras within the same setup, shown below. img = cv2.imread(os.path.join(DATA_DIR, "camera_setup.jpg")) plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) # A `Scene` object can be configured by registering cameras and markers. # # Each object can belong to a frame in the scene, and objects within the same frame remain static with respect to the frame. "world" is the default, static frame provided by the `Scene` object. # + scene = frt.Scene() # Add cameras (Set camera frame "Master" to be aligned to the "world" frame) camera_names = ["Master", "Sub1", "Sub2"] scene.add_camera(camera_names[0], frame="world", pose_in_frame=sp.SE3()) for camera_name in camera_names[1:]: scene.add_camera(camera_name, frame="world") # Add markers scene.add_frame("board") scene.add_marker(0, frame="board", length=MARKER_LENGTH, pose_in_frame=sp.SE3()) for m_id in range(1, NUM_MARKERS): scene.add_marker(m_id, frame="board", length=MARKER_LENGTH) # - # Assets in the scene at any moment can be queried as such: # + # Frames frames = scene.get_frames() frame_example = frames[0] frame_example_info = scene.get_frame_info(frame_example) print(f"Frames: {frames}") print(f"Info of frame '{frame_example}':\n {frame_example_info}\n") # Cameras cameras = scene.get_cameras() camera_example = cameras[0] camera_example_info = scene.get_camera_info(camera_example) print(f"Cameras: {cameras}") print(f"Info of camera '{camera_example}':\n {camera_example_info}\n") # Markers markers = scene.get_markers() marker_example = markers[0] marker_example_info = scene.get_marker_info(marker_example) print(f"Markers: {markers}") print(f"Info of marker '{marker_example}':\n {marker_example_info}\n") # - # We will still need `CameraModule`s to perform marker detections for each cameras, so here we initialize a camera module for each camera we have in the scene, then use the modules to estimate marker poses in each snapshot. # + # Initialize camera modules camera_modules = [] for _ in range(NUM_CAMERAS): c = frt.CameraModule() camera_modules.append(c) # Assign intrinsics from file # ("realsense_intrinsics.pkl" contains intrinsics obtained through pyrealsense.) with open("data/realsense_intrinsics.json", 'r') as f: intrinsics = json.load(f) for c in camera_modules: c.set_intrinsics(frt.utils.dict2intrinsics(intrinsics)) # Register markers for c in camera_modules: for m_id in range(NUM_MARKERS): c.register_marker_size(m_id, length=MARKER_LENGTH) # Load snapshots & detect markers # (The file "y_x.jpg" contains the image for snapshot number x captured from camera y) for i in range(NUM_SAMPLES): detected_markers_map = {} for c, camera_name in zip(camera_modules, camera_names): imgfile = os.path.join(DATA_DIR, f"{camera_name}_{i}.jpg") img = cv2.imread(imgfile) detected_markers = c.detect_markers(img) detected_markers_map[camera_name] = detected_markers scene.add_snapshot(detected_markers = detected_markers_map) # - # The `Scene` objects can then directly use the marker detection outputs from the camera modules to calibrate the scene. # + # Calibrate scene scene.calibrate_extrinsics() # Query poses of assets in the scene for camera_name in scene.get_cameras(): print(f"Pose of camera '{camera_name}':\n {scene.get_camera_info(camera_name)['pose_in_frame']}\n") # - # Scene visualization: scene.visualize()
perception/fairotag/tutorials/2_multi_camera_scene.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn import svm from sklearn.metrics import confusion_matrix from sklearn.metrics import f1_score from sklearn.metrics import accuracy_score # - dataset = pd.read_csv('drugs.csv') print(dataset.info()) dataset.head() nan_not_accepted = ['<JCHEM_AVERAGE_POLARIZABILITY>','<JCHEM_LOGP>','<JCHEM_PKA>','<JCHEM_PKA_STRONGEST_ACIDIC>','<JCHEM_PKA_STRONGEST_BASIC>','<JCHEM_REFRACTIVITY>'] for column in nan_not_accepted: mean = dataset[column].mean(skipna=True) dataset[column]=dataset[column].replace(np.NaN,mean) print(dataset.info()) dataset.head() dt = pd.read_csv('all.csv') print(len(dt)) dt.head() dt=dt[['UniProt ID','Drug IDs']] print(len(dt)) dt.head() reaction = dt.loc[dt['UniProt ID']=='P0C6X7'] val = reaction['Drug IDs'].values[0] drugovi = "".join(val).split("; ") dataset['<DATABASE_ID>'].values y=[] for m in dataset['<DATABASE_ID>'].values: da=False for c in drugovi: if m==c: y.append(1) da=True if not da: y.append(0) x=dataset.iloc[:,2:16] x_train,x_test,y_train,y_test = train_test_split(x,y,random_state=0,test_size=0.35) clf = svm.SVC(kernel='poly',degree = 14) clf.fit(x_train,y_train) y_pred=clf.predict(x_test) cm = confusion_matrix(y_test,y_pred) print(cm) print(f1_score(y_test,y_pred)) print(accuracy_score(y_test,y_pred))
project/Python codes/.ipynb_checkpoints/Maturski SVM-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Install dependencies for this example # Note: This does not include itk-jupyter-widgets, itself import sys # !{sys.executable} -m pip install itk-io # + try: from urllib.request import urlretrieve except ImportError: from urllib import urlretrieve import os import itk from itkwidgets import view # - # Download data file_name = '005_32months_T2_RegT1_Reg2Atlas_ManualBrainMask_Stripped.nrrd' if not os.path.exists(file_name): url = 'https://data.kitware.com/api/v1/file/564a5b078d777f7522dbfaa6/download' urlretrieve(url, file_name) image = itk.imread(file_name) view(image)
examples/3DImage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #hide # !pip3 install voila fastai # !jupyter serverextension enable voila --sys-prefix from fastai.vision.widgets import * from fastai.vision.core import PILImage from fastai.learner import * from pathlib import Path import requests # + path = Path() url = 'https://github.com/SergiiSopin/ML-AI-Tests/raw/main/bear_model.pkl' r = requests.get(url, allow_redirects=True) open('model.pkl', 'wb').write(r.content) path.ls(file_exts='.pkl') import fastai.losses import fastai.layers fastai.layers.BaseLoss = fastai.losses.BaseLoss fastai.layers.CrossEntropyLossFlat = fastai.losses.CrossEntropyLossFlat fastai.layers.BCEWithLogitsLossFlat = fastai.losses.BCEWithLogitsLossFlat fastai.layers.BCELossFlat = fastai.losses.BCELossFlat fastai.layers.MSELossFlat = fastai.losses.MSELossFlat fastai.layers.L1LossFlat = fastai.losses.L1LossFlat fastai.layers.LabelSmoothingCrossEntropy = fastai.losses.LabelSmoothingCrossEntropy fastai.layers.LabelSmoothingCrossEntropyFlat = fastai.losses.LabelSmoothingCrossEntropyFlat learn_inf = load_learner(path/'model.pkl') # - btn_upload = widgets.FileUpload() btn_run = widgets.Button(description='Classify') # + out_pl = widgets.Output() lbl_pred = widgets.Label() def on_click_classify(change): img = PILImage.create(btn_upload.data[-1]) out_pl.clear_output() with out_pl: display(img.to_thumb(128,128)) pred,pred_idx,probs = learn_inf.predict(img) lbl_pred.value = f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}' btn_run.on_click(on_click_classify) # - #hide_output VBox([widgets.Label('Select your bear!'), btn_upload, btn_run, out_pl, lbl_pred])
Bear recognizer/BearRecognizer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="KFfm5epZogwp" # Setup # + colab={"base_uri": "https://localhost:8080/"} id="_s7ac0iRCRA_" executionInfo={"status": "ok", "timestamp": 1651700282108, "user_tz": -120, "elapsed": 1678, "user": {"displayName": "<NAME>", "userId": "12795006612548471298"}} outputId="1ed7f0a2-3429-4a5b-9130-5553e00fde34" # Code inspired by <NAME> <<EMAIL>>, License: BSD 3 clause # Python ≥3.7 is required import sys assert sys.version_info >= (3, 7) # Scikit-Learn ≥0.20 is required import sklearn assert sklearn.__version__ >= "1.0" print('The scikit-learn version is {}.'.format(sklearn.__version__)) from time import time import numpy as np from numpy import arange import matplotlib.pyplot as plt import pandas as pd import gc from sklearn import linear_model from sklearn.svm import l1_min_c from sklearn.preprocessing import label_binarize #one-vs-all scheme from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline #access drive from google.colab import drive drive.mount('/content/drive') # %cd /content/drive/MyDrive/GRAD-C24_Machine_Learning/MLProject_KenyaFinancial x_path= "/content/drive/MyDrive/GRAD-C24_Machine_Learning/MLProject_KenyaFinancial/clean_data/XoheImp.csv" y_path= "/content/drive/MyDrive/GRAD-C24_Machine_Learning/MLProject_KenyaFinancial/clean_data/Y.csv" # + [markdown] id="xIS39sXtozdJ" # Splitting data into test, training and validation set # + id="au6fYuTGaJpO" executionInfo={"status": "ok", "timestamp": 1651700233745, "user_tz": -120, "elapsed": 86357, "user": {"displayName": "<NAME>", "userId": "12795006612548471298"}} colab={"base_uri": "https://localhost:8080/"} outputId="b04cdb14-c338-4107-98f4-cb26c6410fbf" # %run Splitting_data.ipynb # + [markdown] id="4IsMvH0yo2s1" # Creating a list of C values that will be used as tuning parameter # + id="IUHfpG2AZSjx" cs = arange(0.1, 0.9, 0.1) # + colab={"base_uri": "https://localhost:8080/"} id="DPjbPtCeel0U" executionInfo={"status": "ok", "timestamp": 1651700416970, "user_tz": -120, "elapsed": 7, "user": {"displayName": "<NAME>", "userId": "12795006612548471298"}} outputId="999f503a-947f-4d5a-c6f0-71a4d3ac9b1d" gc.collect() # + [markdown] id="nUYxMgNjpDIt" # Training models # + id="Lf7h-QlNZMuP" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1651702649234, "user_tz": -120, "elapsed": 2230170, "user": {"displayName": "<NAME>", "userId": "12795006612548471298"}} outputId="c989ae15-824c-4627-fd84-18fb0fe3181b" print("Computing regularization path ...") start = time() pipe = Pipeline([('scaler', StandardScaler()), ('ridge', linear_model.LogisticRegression(random_state=0, warm_start=True))]) coefs_ = [] for c in cs: pipe.named_steps['ridge'].set_params(C=c) model = pipe.fit(X_train, y_train) Y_pred = model.predict(X_val) coefs_.append(metrics.matthews_corrcoef(y_val, Y_pred)) print("This took %0.3fs" % (time() - start)) # + [markdown] id="32zzgE5wpRqu" # Plotting and saving the graph # + colab={"base_uri": "https://localhost:8080/", "height": 404} id="o91VW3kZeJj2" executionInfo={"status": "ok", "timestamp": 1651703509867, "user_tz": -120, "elapsed": 1043, "user": {"displayName": "<NAME>", "userId": "12795006612548471298"}} outputId="63cf1839-89e6-49d4-87e5-44e1c6ef6453" coefs_ = np.array(coefs_) fig = plt.figure(figsize=(8,6)) plt.plot(cs, coefs_, marker="o") ymin, ymax = plt.ylim() plt.xlabel("C") plt.ylabel("MCC") plt.title("Logistic Regression Path") plt.axis("tight") plt.show() fig.savefig('parameter_tuning_Ridge.png') # + [markdown] id="2ZwPdCbGpUzO" # Findings: The max MCC is achieved at a C of 0.1
code/Regularisation path plotting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import io import matplotlib import matplotlib.image as mpimg import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import pickle import PIL import torch import torchvision import urllib import sys import warnings from torchvision import transforms sys.path.append(os.path.join(os.path.dirname(os.path.realpath('__file__')), '..')) from attacks import eot_attacks from attacks import utils from utils import labels_util from utils import datasets # Disable annoying UserWarning caused by using nn.Upsample # in the relighting model. warnings.filterwarnings("ignore", category=UserWarning) # %load_ext autoreload # %autoreload 2 # + config = {} config['classif_model_name'] = 'resnet_indoor' config['relight_model_name'] = 'multi_illumination_murmann' config['relighter_eps'] = 1e-4 checkpoint_path = '../' checkpoint_path += 'relighters/multi_illumination/checkpoints/relight/epoch_13.pth' relight_model = utils.load_relighting_model(config['relight_model_name'], checkpoint_path) classif_model = utils.load_classification_model(config['classif_model_name']) idx_to_label = labels_util.load_idx_to_label('indoor_scenes') label_to_idx = {label : idx for idx, label in idx_to_label.items()} print(label_to_idx) # - config['target_label'] = label_to_idx['warehouse'] # label for 'warehouse' config['debugging'] = False config['num_iterations'] = 5 config['learning_rate'] = 0.05 config['attack_type'] = 'class_constrained_eot' config['gamma'] = 1.3 # + dataset = datasets.IndoorScenesDataset('../data/indoor-scenes/Test.csv', '../data/indoor-scenes/') for i in range(2): for idx in range(len(dataset)): img, gt_label = dataset[idx] print('OK!') # + # correct = 0 # adversarial = 0 # result_has_nan = 0 # dataset = datasets.IndoorScenesDataset('../data/indoor-scenes/Test.csv', # '../data/indoor-scenes/') # for eps in [0.01]: # config['eps'] = eps # correct = 0 # adversarial = 0 # result_has_nan = 0 # save_to_file = True # count_classes = {} # model_accuracies = [] # attack_succ_rates = [] # for idx in range(len(dataset)): # img, gt_label = dataset[idx] # # Keep the statistics # if gt_label in count_classes: # count_classes[gt_label] += 1 # else: # count_classes[gt_label] = 1 # config['gt_label'] = gt_label # if gt_label == config['target_label']: # config['target_label'] = label_to_idx['kitchen'] # result = eot_attacks.do_attack(relight_model, classif_model, img, config) # if result['orig_label'] == gt_label: # correct += 1 # if result['adv_label'] == config['target_label']: # adversarial += 1 # # Randomly visualize 25% of the attacks. # if np.random.random() < 1. and result['adv_label'] is not None: # eps_str = 'eps_{}_'.format(config['eps']) # utils.visualize_attack(img, result, idx_to_label) # print('eps {}, Current index: {}, Correct: {}, Adversarial: {}'.format( # config['eps'], idx, correct, adversarial)) # accuracy = correct / (idx + 1) # attack_succ_rate = adversarial / correct # if idx > 0 and \ # np.abs(accuracy - model_accuracies[-1]) < 1e-4 and \ # np.abs(attack_succ_rate - attack_succ_rates[-1]) < 1e-4: # converged = True # model_accuracies.append(accuracy) # attack_succ_rates.append(attack_succ_rate) # plt.plot(np.arange(len(model_accuracies)), model_accuracies, c='red') # plt.title('Classifier accuracy') # plt.xlabel('Dataset index') # plt.ylabel('Accuracy') # plt.close() # plt.plot(np.arange(len(attack_succ_rates)), attack_succ_rates, c='blue') # plt.title('Attack success rate') # plt.xlabel('Dataset index') # plt.ylabel('Success rate') # plt.close() # + config['attack_type'] = 'random_root' config['batch_size'] = 32 config['num_batches'] = 50 config['num_classes'] = 10 config['eps'] = 0.02 dataset = datasets.IndoorScenesDataset('../data/indoor-scenes/Test.csv', '../data/indoor-scenes/') for idx in range(len(dataset)): img, gt_label = dataset[idx] config['gt_label'] = gt_label if gt_label == config['target_label']: config['target_label'] = label_to_idx['bedroom'] result = eot_attacks.do_attack(relight_model, classif_model, img, config) utils.visualize_attack(img, result, idx_to_label) config['target_label'] = label_to_idx['warehouse']
experiments/indoor_scenes_eot_attack_eval.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn import decomposition import matplotlib.cm as cm import combat as cb from scipy.spatial.distance import pdist np.random.seed(314) # - def check_batch_distribution(X, batch_anno, axis, title=""): pca = decomposition.PCA(n_components=2) pca.fit(X) X_trans = pca.transform(X) all_batch_reps = [] labels = set(batch_anno) colors = cm.spectral(np.linspace(0, 1, len(labels))) for val, col in zip(labels, colors): Z = X_trans[np.ix_((batch_anno==val))] rep = np.mean(Z, axis=0) all_batch_reps.append(rep) axis.scatter(Z[:, 0], Z[:, 1], label=val, marker='o', c=col, edgecolor='none') axis.add_artist(plt.Circle(rep, 5, color=col)) axis.set_title(title) axis.legend(numpoints=1) all_batch_reps = np.array(all_batch_reps) return np.sum(pdist(all_batch_reps)) # + import enum import combat as cb class Columns(str, enum.Enum): """Column types in FastGenomics CSV files""" CELLS = 'cellId*Ganzzahl' GENES = 'entrezId*Ganzzahl' EXPR = 'expressionValue*Zahl' BATCH = '_generated_batch*Text' # BATCH = 'batch' # genes_path = fg_io.get_input_path('genes_data_input') genes_path = "../sample_data/data/dataset/expressions_entrez.tsv" # cells_meta = fg_io.get_input_path('cells_meta_input') cells_meta = "../sample_data/data/dataset/cells.tsv" # combat requires full matrix input - unstack input file # combat expects matrix of shape [genes x cells], so index # columns accordingly data = ( pd.read_csv(genes_path, sep='\t') #.dropna() .set_index([Columns.GENES, Columns.CELLS]) #.loc[:, Columns.EXPR] .unstack() .fillna(0) ) pheno = pd.read_csv(cells_meta, sep="\t") # data = pd.read_csv("../sample_data/data/dataset/bladder-expr.txt", sep="\t") # pheno = pd.read_csv("../sample_data/data/dataset/bladder-pheno.txt", sep="\t") corr = cb.combat(data, pheno[Columns.BATCH]) f, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,4)) total_batch_dist = check_batch_distribution(data.values.T, pheno[Columns.BATCH], ax1, "Before Batch Correction") total_batch_dist_corr = check_batch_distribution(corr.values.T, pheno[Columns.BATCH], ax2, "After Batch Correction") print("Batch center distance before correction:", total_batch_dist) print("Batch center distance after correction:", total_batch_dist_corr) print("Batch center ratio (should be > 1):", total_batch_dist/total_batch_dist_corr) plt.show() # -
hello_genomics/test combat_app.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #This is Epidemiological model for LSD # ## SEIR system of equations # <left> $\dot{S}=\alpha*N-(\beta*SI/N)+\theta*R-\psi*S$ <br> # $\dot{E}=(\beta*SI)/N -\gamma*E - \psi*E $<br> # $\dot{I}=(\gamma*E- I(\omega + \psi)$ <br> # $\dot{R}=(\omega*I \theta*R-\psi *R)$ <br> # # # + #import python modules import numpy as np import matplotlib.animation as animation import matplotlib.pylab as plt from scipy.integrate import odeint import math #to display images from IPython.display import Image #magic to show plots inline # %matplotlib inline # - def SEIR(state,t): # unpack the state vector S = state[0] E = state[1] I = state[2] R = state[3] # Initial values alpha = 0.04 gamma = 0.03 psi = 0.3 beta = 0.02 omega = 0.4 theta = 0.08 N=math.exp((alpha-psi)*1000) # Computing the number of populations #N=1000 # Compute the derivatives dS=alpha*N-((beta*S*E)/N)+theta*R-psi*S #print (dS) dE = (beta*S*E)/N-gamma*E-psi*E #print (dE) dI=gamma*E- I*(omega+psi) dR=omega*I-theta*R-psi*R # return the result of derivatives return [dS, dE, dI, dR] state0 = [100.0, 10.0, 10.0,10.0] t = np.arange(0.0, 200.0, 1) state = odeint(SEIR, state0, t) plt.plot(t,state) plt.xlabel('TIME (sec)') plt.ylabel('STATES') plt.title('SEIR Epidemiological Model') plt.legend(('$S$', '$E$','$I$','$R$'))
Epidemiological Model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Think Bayes: Chapter 5 # # This notebook presents code and exercises from Think Bayes, second edition. # # Copyright 2016 <NAME> # # MIT License: https://opensource.org/licenses/MIT # + from __future__ import print_function, division % matplotlib inline import warnings warnings.filterwarnings('ignore') import numpy as np from thinkbayes2 import Pmf, Cdf, Suite, Beta import thinkplot # - # ## Odds # # The following function converts from probabilities to odds. def Odds(p): return p / (1-p) # And this function converts from odds to probabilities. def Probability(o): return o / (o+1) # If 20% of bettors think my horse will win, that corresponds to odds of 1:4, or 0.25. p = 0.2 Odds(p) # If the odds against my horse are 1:5, that corresponds to a probability of 1/6. o = 1/5 Probability(o) # We can use the odds form of Bayes's theorem to solve the cookie problem: prior_odds = 1 likelihood_ratio = 0.75 / 0.5 post_odds = prior_odds * likelihood_ratio post_odds # And then we can compute the posterior probability, if desired. post_prob = Probability(post_odds) post_prob # If we draw another cookie and it's chocolate, we can do another update: likelihood_ratio = 0.25 / 0.5 post_odds *= likelihood_ratio post_odds # And convert back to probability. post_prob = Probability(post_odds) post_prob # ## Oliver's blood # # The likelihood ratio is also useful for talking about the strength of evidence without getting bogged down talking about priors. # # As an example, we'll solve this problem from MacKay's {\it Information Theory, Inference, and Learning Algorithms}: # # > Two people have left traces of their own blood at the scene of a crime. A suspect, Oliver, is tested and found to have type 'O' blood. The blood groups of the two traces are found to be of type 'O' (a common type in the local population, having frequency 60) and of type 'AB' (a rare type, with frequency 1). Do these data [the traces found at the scene] give evidence in favor of the proposition that Oliver was one of the people [who left blood at the scene]? # # If Oliver is # one of the people who left blood at the crime scene, then he # accounts for the 'O' sample, so the probability of the data # is just the probability that a random member of the population # has type 'AB' blood, which is 1%. # # If Oliver did not leave blood at the scene, then we have two # samples to account for. If we choose two random people from # the population, what is the chance of finding one with type 'O' # and one with type 'AB'? Well, there are two ways it might happen: # the first person we choose might have type 'O' and the second # 'AB', or the other way around. So the total probability is # $2 (0.6) (0.01) = 1.2$%. # # So the likelihood ratio is: # + like1 = 0.01 like2 = 2 * 0.6 * 0.01 likelihood_ratio = like1 / like2 likelihood_ratio # - # Since the ratio is less than 1, it is evidence *against* the hypothesis that Oliver left blood at the scence. # # But it is weak evidence. For example, if the prior odds were 1 (that is, 50% probability), the posterior odds would be 0.83, which corresponds to a probability of: post_odds = 1 * like1 / like2 Probability(post_odds) # So this evidence doesn't "move the needle" very much. # **Exercise:** Suppose other evidence had made you 90% confident of Oliver's guilt. How much would this exculpatory evidence change your beliefs? What if you initially thought there was only a 10% chance of his guilt? # # Notice that evidence with the same strength has a different effect on probability, depending on where you started. # + # Solution post_odds = Odds(0.9) * like1 / like2 Probability(post_odds) # + # Solution post_odds = Odds(0.1) * like1 / like2 Probability(post_odds) # -
solutions/chap05soln.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + deletable=true editable=true from pymongo import MongoClient from tqdm import tqdm # + deletable=true editable=true client = MongoClient() db = client['rf_test'] col_entries = db['entries'] col_inst = db['inst'] col_inds = db['inds'] col_nouns = db['nouns'] # + [markdown] deletable=true editable=true # ## First look at entries # + [markdown] deletable=true editable=true # ### What are those? # + deletable=true editable=true def list_entries(): d = col_entries.distinct("type") return [x for x in d] # + [markdown] deletable=true editable=true # ### What do we have the most? # + deletable=true editable=true entries = Out[4] # + deletable=true editable=true def count_entry(): entry_count = [] for entry in entries: c = col_entries.find({"type":entry}).count() print("{}|{}".format(c,entry)) entry_count.append((c,entry)) return entry_count # + deletable=true editable=true entry_count = Out[13] # + deletable=true editable=true sorted(entry_count, key= lambda x: x[0])[::-1] # + [markdown] deletable=true editable=true # In these sources of data, usernames seem interesting. # + [markdown] deletable=true editable=true # ### Usernames # + [markdown] deletable=true editable=true # First, we want to look through what are the usernames we have in the database. To find all usernames, we can do: # + deletable=true editable=true def find_all_usernames(): return [x for x in col_entries.find({"type":"Username"})] # + [markdown] deletable=true editable=true # Since there are about 50k usernames, let's just look at a random one: # + deletable=true editable=true find_all_usernames()[30] # + [markdown] deletable=true editable=true # Note the 'id' field. Since this correspond to the author field in the instance, we can use this to count the contributions of the username. Therefore, for each username, we can count or list all of the instance that they are associated with. # + deletable=true editable=true def find_inst_by_username(userid): return [x for x in col_inst.find({"attributes.authors": userid})] # + deletable=true editable=true find_inst_by_username("LTRvjC") # + [markdown] deletable=true editable=true # We could also go through the data and see which username "have the most to say". # + deletable=true editable=true def username_ranking(): usernames_ranks = [] usernames = find_all_usernames()[:100] for u in tqdm(usernames): uid = u['id'] name = u['name'] count = col_inst.find({"attributes.authors":uid}).count() usernames_ranks.append((uid, name, count)) return sorted(usernames_ranks, key=lambda x: x[2]) # + [markdown] deletable=true editable=true # On my computer, this process would take around **5 hours** to go through the complete dataset and count up all the usernames, we got about 3 username per seconds. So I only did about the first 100 usernames # + deletable=true editable=true u_ranks = username_ranking() # - # For example, in this small subset, the user with the most instance connected to, is # + deletable=true editable=true u_ranks[-1] # + deletable=true editable=true def get_inst_for_user(uid): return [x for x in col_inst.find({"attributes.authors":uid})] # - # #### What does this user talks about? [x['attributes']['indicator'] for x in get_inst_for_user('KKYGPH')] from nltk.tag import pos_tag def find_nouns(sentence): words = sentence.split() tagged = pos_tag(words) return [w for w, t in tagged if t == 'NNP'] # Here, we will look at everything the user has said, and count all the proper nouns. def words_from_user(uid): insts = get_inst_for_user('KKYGPH') sentences = [x['fragment'] for x in insts] words = {} for s in sentences: ns = find_nouns(s) for n in ns: try: words[n] += 1 except KeyError: words[n] = 1 return words sorted(((a,b) for a,b in words_from_user('KKYGPH').items()), key=lambda x: x[1])[::-1] # Uhmm, apparently, this user talks about Dridex more than other. This still needs a lot of improvement. One big improvement this approach can use is to categorize the noun. This will give us some context to what the user is talking about. # Furthermore, we can compare the user and group them by what they are talking about. This come back to our instance database. For example, let's look at an our Dridex malware def people_and_instance(indicator): c = col_inst.find({"attributes.indicator": indicator}) c = [x for x in c] authors = [] authors_info = {} for entry in c: try: author = entry['attributes']['authors'] except KeyError: author = None authors.append(author) for a in tqdm(authors): if a: en = col_entries.find_one({"id": a[0]}) authors_info[en['id']] = {"name": en['name']} return authors_info talks_of_dridex = Out[29] len(Out[29]) #There are 468 entries about Dridex, sweet !! people_and_instance('Dridex') # When we have a list of people who talks about an topic, we can iterate through the list of ID, find their sentences and count the noun in those. This way, we could see who are "really interested" in the topic. # A different way of looking at Usernames, especially Twitter username is to look through the relationship aspect. This means that take a twitter handle. We will also look at their following. We suspect that the twitter user would follows subject of importance to them. In this way, we will be able to see other account to put in our crawl list. Furthermore, we can look at a group of people and see if they all talk about the same topic.
rf/RF-Entries.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 函数定义 # # 本章我们将介绍编程语言中非常重要的一个概念——函数。这里的函数和我们之前在数学中所学习的函数,有一些相似点,也有些不同。 # # 相似点是,它们本质都是一段操作的复用。在数学里,假如我们有一个函数:$f(x)=3x+9$,当我们计算 $f(3)$ 或者 $f(9)$ 的时候,都是在进行 $3x+9$ 这个操作。在程序中呢,也是类似的,我们用几行代码定义了一个函数,每次调用这个函数就是重复那几段代码的操作。 # # 不同的是,在数学的函数中,我们只能定义 $f(x,y) = x^2 + xy + 3$ 这样的函数,却不能定义 $f(x,y) = x^2 + ny + m$ 这样的函数,我们不知道这里面的 $m$ 和 $n$ 是什么;我们定义的函数也不会影响到外部任何其他值或函数,假如我们在外部有一个值 $a = 3$,我们的函数 $f$ 是没有办法影响 $a$ 的值的。 # # 而在计算机科学中,我们的函数可以变得很强大,它可能在函数体中使用了定义在函数外部的值,或者悄悄改变了外部的值。 # # 这里只是关于函数在不同学科中简单异同点的介绍。本章,我们会讲解一些基本知识,比如如何定义函数,调用函数等等。在后续学习中,我们还会学到一些更高级的特性,希望你在学习过程中,能回头来思考一下,程序与数学中的函数的异同点。 # # 本章包含: # - 函数定义 # - 函数调用 # - 参数传递 # - 匿名函数 # - 高阶函数 # ## 函数定义 # # Python 中的函数,用关键字 `def` 来定义,它的语法大概长这个样子: # # ```python # def 函数名(参数列表): # 函数体 # ``` # # 这样看起来可能还是有点晦涩,让我们通过一个简单的例子来看看如何定义函数。 def max(a, b): if a > b: return a else: return b # 可以看到我们很简单的就定义了一个返回最大值的函数。注意代码中的 `(` `)` 与 `:`,它们都是半角符号。值得注意的是关键字 `return`,它用来设置这个函数的返回值,如果我们不写 `return` 的话,函数的返回值默认为 `None`。 # ## 函数调用 # # 我们其实已经在之前的课程中调用过函数了,就是我们在课程一开始使用的 `print`,我们现在来试试调用 `max`。 print(max(3, 10)) # 是不是还挺简单的?接下来我们来看一点有意思的东西。 # ## 参数传递 # # 下面是一段代码,先别着急看结果,猜测一下它会打印出什么。 # + a = 3 def foo(a): a = a + 1 return a foo(a) print(a) # - # 结果是 `3`,不知道你猜对了没?你可能也会误以为结果是 `4`,明明我们在 `foo` 这个函数里给 `a` 赋了一个新的值呀,为什么打印出来的 `a` 还是 `3` 呢? # # 其原因是,`a` 在传进函数后,其实是复制了一个新的 `a` 出来进行之后的操作,我们修改这个新的 `a`,并不会对函数外部的 `a` 造成影响。我们也可以简单的修改一下代码,达成另一种效果。 a = foo(a) print(a) # 我们可以看到这时候输出的结果就是 `4` 了。 # # 现在让我们换一个例子,还是先猜猜会打印出什么? # + l = [1, 2, 3, 4] def mutate(l): l[2] = 10 return mutate(l) print(l[2]) # - # 结果是 `10`!是不是和你想象的不一样?明明我们之前说参数传进函数之后,会复制出一个新的来进行操作呀? # # 这其实和我们传进去的东西有关。在 Python 中,有些对象,是不可修改的,而有些是可修改的,这是什么意思呢? # # 像是 `Number` 和 `String`,它们都属于不可变的数据类型,每当它们被传进一个函数时,都是生成了一个它的复制品传进去。 # # 而像 `List`,它属于可变数据类型,当我们向函数传递一个列表时,所传递的是其本身,当我们在函数内部对其进行修改后,外部的它,也会受到影响! # # 还记得我们一开始说的这句话吗? # > 我们的函数可以变得很强大,它可能在函数体中使用了定义在函数外部的值,或者悄悄改变了外部的值。 # # 函数 `mutate` 就是一个例子,在写这样的函数时,一定要注意对外部值的修改,小心造成不可思议的事情... # 现在,让我们结合之前学过的知识,试着计算一个列表中数字的和。 # + def sum(items): res = 0 for item in items: res += item return res print(sum(l)) # - # 这里 `res += item` 是 `res = res + item` 的简写,同理,我们还有 `-=` 等等... # ## 匿名函数 # # 接下来我们来介绍一下匿名函数,顾名思义,匿名函数,就是没有名字的函数。从语义上来讲,它其实和普通函数没有区别,只是不需要显式地定义函数名。 # # 我们来随意定义一个匿名函数试试: # + square = lambda x: x * x print(square(2)) print(square(9)) # - # 在上面的代码中,我们定义了一个匿名函数,它取一个参数,并计算它的平方。 # # 比较诡异的是,我们把它赋给了一个变量 `square`,这其实并不是匿名表达式的常用用法,只是为了方便介绍,我们这样做而已。接下来让我们看看匿名函数真正的强大之处。 # ## 高阶函数 # # 我们先来实现两个函数,第一个将列表里的每个数都乘二,第二个将列表里的每个数都加上3。 # + def times2(nums): for item in items: item * 2 return items def plus3(nums): for item in items: item + 3 return items # - # 有没有感觉它们实在是太像了!除了 `* 2` 和 `+ 3` 不一样,其他地方完全没区别嘛! # # 这时,我们可以使用一种神奇的语法,高阶函数!什么是高阶函数呢,就是那些取函数作为参数,或者以函数作为返回值的函数。 # # 可能你还没有完全理解这句话,让我们改写一下之前的 `times2` 与 `plus3`,看看到底高阶函数是什么东西。 # + def map(items, f): for item in items: f(item) return items def times2_(nums): map(nums, lambda x: x * 2) def plus3_(nums): map(nums, lambda x: x + 3) # - # 我们先定义了一个函数 `map`,它取一个列表与一个函数,并且对列表中的每个元素都应用它。之后,我们定义了全新版本的 `times2` 与 `plus3`。 # # 在这里,`map` 就是一个高阶函数,看看 `times2_` 与 `plus3_` 的定义,有没有感觉很方便?
m5-101/content/python-functions-prev/python-functions-prev.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # DataImage # The `DataImage` widget is similar to the `Image` widget of the base ipywidgets package, but uses a raw RGBA pixel array as its source. import numpy as np from ipydatawidgets import DataImage np.random.seed(0) # First, set up some random rgba pixel data and display it as an image: data = np.array(255 * np.random.rand(200, 200, 4), dtype='uint8') DataImage(data=data) # Next, set up a color gradient with corners black, red, green, and yellow, then display it: SIDELEN = 300 grad = np.linspace(0, 255, SIDELEN, dtype='uint8') # red and green components: rg = np.dstack(np.meshgrid(grad, grad)) # add blue and alpha components (zero and 255): rgba = np.dstack([ rg, np.zeros((SIDELEN, SIDELEN), dtype='uint8'), 255 * np.ones((SIDELEN, SIDELEN), dtype='uint8') ]) DataImage(data=rgba)
examples/DataImage.ipynb
# --- # jupyter: # jupytext: # formats: ipynb,py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %% [markdown] # ## Descriptive statistics # # Acknowledging that variables and models are uncertain assumes that we directly or indirectly can describe them through probability distributions. # However for most applications the distribution is a messy entity that on its own is hard to interpret directly. # So instead, we use statistical metrics designed to summarize distribution and to get an intuitive understanding of its statistical properties. # # In addition, for each statistical property, there almost always exists an empirical counterpart that works as a best estimate of said statistical property in the scenarios where only data is available. # This is important, as [Monte Carlo integration](../main_usage/monte_carlo_integration.ipynb) isn't possible without the empirical metrics used to describe the results. # # This section takes a look at some popular statistical metrics and compares them to their empirical counterparts. # %% [markdown] # ### Expected value # # Take for example the most common metric, the expected value function [chaospy.E()](../../api/chaospy.E.rst). # This operator works on any distribution: # %% import chaospy uniform = chaospy.Uniform(0, 4) chaospy.E(uniform) # %% [markdown] # Its empirical counterpart is the mean function: $\bar X=\tfrac 1N \sum X_i$. # This function is available as `numpy.mean` and can be used on samples generated from said distribution: # %% samples = uniform.sample(1e7) numpy.mean(samples) # %% [markdown] # The operator can also be used on any polynomial, but would then require the distribution of interest as a second argument: # %% q0 = chaospy.variable() chaospy.E(q0**3-1, uniform) # %% [markdown] # In the multivariate case, the distribution and the polynomials needs to coincide politically. # E.g. # %% q0, q1, q2 = chaospy.variable(3) joint3 = chaospy.J(chaospy.Normal(0, 1), chaospy.Uniform(0, 2), chaospy.Normal(2, 2)) chaospy.E([q0, q1*q2], joint3) # %% [markdown] # Here `q0`, `q1` and `q2` correspond to `chaospy.Normal(0, 1)`, `chaospy.Uniform(0, 2)` and `chaospy.Normal(2, 2)` respectively. # It is the variable name position and distribution length that matters here, not the shape of what is being taken the expected value of. # # Note also that the model approximations created by e.g. [chaospy.fit_regression()](../../api/chaospy.fit_regression.rst) and [chaospy.fit_quadrature()](../../api/chaospy.fit_quadrature.rst) also are valid polynomials. # %% [markdown] # ### Higher order moments # # In addition to the expected value there is also higher order statistics that work in the same way. # They are with their numpy and scipy empirical counterparts: # # Name | `chaospy` | `numpy` or `scipy` # --- | --- | --- # Variance | [chaospy.Var()](../../api/chaospy.Var.rst) | `numpy.var` # Standard deviation| [chaospy.Std()](../../api/chaospy.Std.rst) | `numpy.std` # Covariance | [chaospy.Cov()](../../api/chaospy.Cov.rst) | `numpy.cov` # Correlation | [chaospy.Corr()](../../api/chaospy.Corr.rst) | `numpy.corrcoef` # Skewness | [chaospy.Skew()](../../api/chaospy.Skew.rst) | `scipy.stats.skew` # Kurtosis | [chaospy.Kurt()](../../api/chaospy.Kurt.rst) | `scipy.stats.kurtosis` # # For example (Pearson's) correlation: # %% chaospy.Corr([q0, q0*q2], joint3) # %% [markdown] # ### Conditional mean # # The conditional expected value [chaospy.E_cond()](../../api/chaospy.E_cond.rst) is similar to the more conventional [chaospy.E()](../../api/chaospy.E.rst), but differs in that it supports partial conditioning. # In other words it is possible to "freeze" some of the variables and only evaluate the others. # For example: # %% chaospy.E_cond([q0, q1*q2], q0, joint3) # %% chaospy.E_cond([q0, q1*q2], q1, joint3) # %% chaospy.E_cond([q0, q1*q2], [q1, q2], joint3) # %% [markdown] # ### Sensitivity analysis # # Variance-based sensitivity analysis (often referred to as the Sobol method or Sobol indices) is a form of global sensitivity analysis. Working within a probabilistic framework, it decomposes the variance of the output of the model or system into fractions which can be attributed to inputs or sets of inputs. Read more in for example [Wikipedia](https://en.wikipedia.org/wiki/Variance-based_sensitivity_analysis). # # In `chaospy`, the three functions are available: # # Name | `chaospy` function # --- | --- # 1. order main | [chaospy.Sens_m](../../api/chaospy.Sens_m.rst) # 2. order main | [chaospy.Sens_m2](../../api/chaospy.Sens_m.rst) # total order | [chaospy.Sens_m2](../../api/chaospy.Sens_m.rst) # # For example: # %% chaospy.Sens_m(6*q0+3*q1+q2, joint3) # %% chaospy.Sens_m2(q0*q1+q1*q2, joint3) # %% chaospy.Sens_t(6*q0+3*q1+q2, joint3) # %% [markdown] # There are no direct empirical counterparts to these functions, but it is possible to create schemes using for example [Saltelli's method](https://www.sciencedirect.com/science/article/abs/pii/S0010465502002801). # %% [markdown] # ### Percentile # # Calculating a closed form percentile of a multivariate polynomial is not feasible. # As such, `chaospy` does not calculate it. # However, as a matter of convenience, a simple function wrapper [chaospy.Perc()](../../api/chaospy.Perc.rst) that calculate said values using Monte Carlo integration is provided. # For example: # %% chaospy.Perc([q0, q1*q2], [25, 50, 75], joint3, sample=1000, seed=1234) # %% [markdown] # Note that the accuracy of this method is dependent on the number of samples. # %% [markdown] # ### Quantity of interest # # If you want to interpret the model approximation as a distribution for further second order analysis, this is possible through the [chaospy.QoI_Dist](../../api/chaospy.QoI_Dist.rst). # This is a thin wrapper function that generates samples and pass them to the kernel density estimation class [chaospy.GaussianKDE()](../../api/chaospy.GaussianKDE.rst). # It works as follows: # %% new_dist = chaospy.QoI_Dist(q0*q1+q2, joint3) new_dist.sample(6, seed=1234).round(6)
docs/user_guide/fundamentals/descriptive_statistics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1><b>The Sparks Foundation</b></h1> # # <h1><b>Data science and Business Analytics Internship</b></h1> # # <h1><b>Task1 : Prediction Using Supervised ML</b></h1> # <p>Predict the percentage of an student based on the no. of study hours</p> # <ul> # <li>In this task I'll present how the Python Scikit-Learn library for machine learning can be used to implement regression functions.</li> # <li>I will predict the percentage of marks that a student is expected to score based upon the number of hours they studied. This is a simple linear regression task as it involves just two variables.</li> # </ul> # # # <h2><b>By: <NAME></b></h2> # # <h3>Import essential libraries</h3> # <ul><li>Pandas to manage the dataframes and enables us to read various datasets into a data frame.</li> # <li>Numpy to save the Hours and Score values in different arrays.</li> # <li> Scikit to split the data into two sets :- 1). Training set, 2). Test set.</li> # <li>Matplotlib to represent the train model in pictorial form. # </li></ul> import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline from sklearn.model_selection import train_test_split # Read the data from the given CSV file, and assign it to variable "df" df = pd.read_csv('http://bit.ly/w-data') # show the first 5 rows using dataframe.head() method print("The first 5 rows of the dataframe") df.head(5) # <h3>Data Types</h3> # <p>The main types stored in Pandas dataframes are <b>object</b>, <b>float</b>, <b>int</b>, <b>bool</b> and <b>datetime64</b>. # </p> # df.dtypes # <h3>Describe</h3> # It gives a statistical summary of each column, such as count, column mean value, column standard deviation, etc. We use the describe method: # # df.describe() # <h3>shape</h3> # <p>To get the information about how many rows and columns are there in database we use the shape method.</p> # <p>It return tuple as output where first value represents number of records(rows) and second value represents number of fields(columns).</p> df.shape # <h2><b>Data Visualization</b></h2> # <h3>Correlation</h3> # Correlation is a measure of the extent of interdependence between variables. # we can calculate the correlation between variables of type "int64" or "float64" using the method "corr": df.corr() # Assign both of the fields(Hours, Scores) and store it to two different variable named x and y respectively. x=df[['Hours']] y=df[['Scores']] # Plot the data into graph # Giving appropriate lables for axises and title for graph plt.scatter(x,y) plt.title("No. of Hours Vs Scores") plt.xlabel("No. of Hours Studied by student") plt.ylabel("Score of the student") plt.show() # The graph above shows that the variables x and y have a HIGH POSITIVE LINEAR CORRELATION.(high positive beacause all points are nearby) # <h2><b>Training and Testing</b></h2> X = df.iloc[:, :-1].values Y = df.iloc[:, 1].values x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.20, random_state=0) print("number of test samples :", x_test.shape[0]) print("number of training samples:",x_train.shape[0]) # The test_size parameter sets the proportion of data that is split into the testing set. In the above, the testing set is set to 20% of the total dataset. # <p>If one does not mention the random_state in the code, then whenever the person executes your code a new random value is generated and the train and test datasets would have different values each time.</p> # # <p>However, if the person use a particular value for random_state(random_state = 1 or any other value) everytime the result will be same,i.e, same values in train and test datasets.</p> # <h3><b>Training the Model</b></h3> from sklearn.linear_model import LinearRegression lm = LinearRegression() lm.fit(x_train, y_train) # <b>Plotting the regression line</b> # # + line = lm.coef_ * X + lm.intercept_ # Plotting for the test data plt.scatter(X, Y) plt.plot(X, line); plt.show() # - # When evaluating our model, not only do we want to visualize the results, but we also want a quantitative measure to determine how accurate the model is. # <p>To determine the accuracy of a model we use score() method of R squared measure</p> # # <p>R squared, also known as the coefficient of determination, is a measure to indicate how close the data is to the fitted regression line. # </p> # acc=lm.score(x_test,y_test) print("The accuracy of the Linear Regression Model created above is: ",acc) # <h3><b>Prediction of Output</b></h3> Yhat=lm.predict(x_test) print('The output of predicted value is: ',Yhat) #Comparing Actual score vs Predicted score df1= pd.DataFrame({'ACTUAL SCORE':y_test,'PREDICTED SCORE':Yhat}) df1 # Testing with your own data hours = np.array([9.25]) own_pred = lm.predict(hours.reshape(-1,1)) print("No of Hours = {}".format(hours)) print("Predicted Score = {}".format(own_pred[0])) # <h3><b>Evaluation of Model</b></h3> from sklearn import metrics print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, Yhat)) from sklearn.metrics import r2_score print(' R-Squared :\t',metrics.r2_score(y_test, Yhat)) from sklearn.metrics import mean_squared_error print(' Mean Squared Error :\t',mean_squared_error(y_test, Yhat))
Task-1 TSF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Object Detection Demo # Welcome to the object detection inference walkthrough! This notebook will walk you step by step through the process of using a pre-trained model to detect objects in an image. Make sure to follow the [installation instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md) before you start. # # Imports # + import numpy as np import os import six.moves.urllib as urllib import sys import tarfile import tensorflow as tf import zipfile from collections import defaultdict from io import StringIO from matplotlib import pyplot as plt from PIL import Image if tf.__version__ < '1.4.0': raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!') # - # ## Env setup # + # This is needed to display the images. # %matplotlib inline # This is needed since the notebook is stored in the object_detection folder. sys.path.append("..") # - # ## Object detection imports # Here are the imports from the object detection module. # + from utils import label_map_util from utils import visualization_utils as vis_util # - # # Model preparation # ## Variables # # Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file. # # By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies. # + # What model to download. MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17' MODEL_FILE = MODEL_NAME + '.tar.gz' DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/' # Path to frozen detection graph. This is the actual model that is used for the object detection. PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb' # List of the strings that is used to add correct label for each box. PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt') NUM_CLASSES = 90 # - # ## Download Model opener = urllib.request.URLopener() opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE) tar_file = tarfile.open(MODEL_FILE) for file in tar_file.getmembers(): file_name = os.path.basename(file.name) if 'frozen_inference_graph.pb' in file_name: tar_file.extract(file, os.getcwd()) # ## Load a (frozen) Tensorflow model into memory. detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') # ## Loading label map # Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine label_map = label_map_util.load_labelmap(PATH_TO_LABELS) categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories) # ## Helper code def load_image_into_numpy_array(image): (im_width, im_height) = image.size return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) # # Detection # + # For the sake of simplicity we will use only 2 images: # image1.jpg # image2.jpg # If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS. PATH_TO_TEST_IMAGES_DIR = 'test_images' TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3) ] # Size, in inches, of the output images. IMAGE_SIZE = (12, 8) # - with detection_graph.as_default(): with tf.Session(graph=detection_graph) as sess: # Definite input and output Tensors for detection_graph image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') # Each box represents a part of the image where a particular object was detected. detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') # Each score represent how level of confidence for each of the objects. # Score is shown on the result image, together with the class label. detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') detection_classes = detection_graph.get_tensor_by_name('detection_classes:0') num_detections = detection_graph.get_tensor_by_name('num_detections:0') for image_path in TEST_IMAGE_PATHS: image = Image.open(image_path) # the array based representation of the image will be used later in order to prepare the # result image with boxes and labels on it. image_np = load_image_into_numpy_array(image) # Expand dimensions since the model expects images to have shape: [1, None, None, 3] image_np_expanded = np.expand_dims(image_np, axis=0) # Actual detection. (boxes, scores, classes, num) = sess.run( [detection_boxes, detection_scores, detection_classes, num_detections], feed_dict={image_tensor: image_np_expanded}) # Visualization of the results of a detection. vis_util.visualize_boxes_and_labels_on_image_array( image_np, np.squeeze(boxes), np.squeeze(classes).astype(np.int32), np.squeeze(scores), category_index, use_normalized_coordinates=True, line_thickness=8) plt.figure(figsize=IMAGE_SIZE) plt.imshow(image_np)
reaserch/object_detection/object_detection_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # For identifying networks of users via retweets # + import pandas as pd import networkx as nx #Plotting # %matplotlib inline from matplotlib import pyplot as plt import seaborn as sns sns.set(style="whitegrid") # %cd twitterproject # inject config value (on command line would've been --config=data-analysis) import sys # args = ['--config', 'data-analysis'] args = ['--config', 'laptop-mining'] old_sys_argv = sys.argv sys.argv = [old_sys_argv[0]] + args import environment from TwitterDatabase.Repositories import DataRepositories as DR from TwitterDatabase.DatabaseAccessObjects import DataConnections as DC from TwitterDatabase.Models.WordORM import Word from TwitterDatabase.Models.TweetORM import Users as User from TwitterDatabase.Models.TweetORM import Tweet from DataAnalysis.SearchTools.WordMaps import get_adjacent_word_counts, get_adjacent_words, get_user_ids_for_word # - retweetIdQuery = """ SELECT u.userID AS toId, r.referring AS fromId, u.screen_name, r.tid FROM twitter_miner_laptop.users u INNER JOIN (SELECT t.tweetID as tid, t.in_reply_to_screen_name AS sn, t.userID AS referring FROM twitter_miner_laptop.tweets t WHERE t.in_reply_to_screen_name IS NOT NULL) AS r WHERE u.screen_name = r.sn """ dao = DC.MySqlConnection(environment.CREDENTIAL_FILE) data = pd.read_sql_query(retweetIdQuery, dao.engine) #, index_col='tweetID') print("Loaded %s record" % len(data)) nodes = data.drop(['screen_name', 'tid'], axis=1) nodes[-5:] G = nx.DiGraph() G.add_edges_from([(a.toId, a.fromId) for a in nodes.itertuples() if a.toId is not a.fromId]) G.size() # + deletable=false editable=false GRAPHS_FOLDER = "%s/temp_output/graphs" % environment.LOG_FOLDER_PATH filepath = "%s/user-retweet.gexf" % GRAPHS_FOLDER nx.write_gexf(G, filepath) # - dc = nx.in_degree_centrality(G) degreeCentrality = [{'id': k, 'degree_centrality' : dc[k] } for k in dc.keys()] degreeCentrality = pd.DataFrame(degreeCentrality) len(dc.keys()) dc = pd.DataFrame([{'id': k, 'degree_centrality' : dc[k] } for k in dc.keys()]) dc[:5] dc[49113] r[:2] dc['49113'] # This produces a graph G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc for n1, n2, degree in edges: G.add_edges_from([(n1, n2) for i in range(0, degree)]) G.size() nx.Graph() # + # Retweet and quote tweets # -
DataAnalysis/Notebooks/.ipynb_checkpoints/User networks-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="RmeMvIoxySCo" # # BERT Hugging Face/ transformers implementation for Sentiment Analysis # # This notebook trains a sentiment analysis model to classify movie reviews as *positive* or *negative*, based on the text of the review. # # + [markdown] id="pB11yjRzy-7q" # ## Setup # # ### Training using Colab GPU # # Google Colab offers free GPUs and TPUs! Since we'll be training a large neural network it's best to take advantage of this (in this case we'll attach a GPU), otherwise training will take a very long time. # # A GPU can be added by going to the menu and selecting: # # Edit 🡒 Notebook Settings 🡒 Hardware accelerator 🡒 (GPU) # + colab={"base_uri": "https://localhost:8080/"} id="TLfhcvheySnu" outputId="8560f15c-f6c0-4018-fee6-c093bb0fa6fc" pip install transformers # + id="A4K1GFX0iwXm" import transformers import torch import torch.nn as nn from tqdm import tqdm import pandas as pd from sklearn import model_selection, metrics import numpy as np # + id="gmzRL1aQa6Vj" colab={"base_uri": "https://localhost:8080/"} outputId="3670731c-9ac4-44e3-ef71-c30a1fcef047" # from google.colab import drive # drive.mount('/content/gdrive') # + [markdown] id="LkccYjMJseut" # ##BERT Tokenizer # # To feed our text to BERT, it must be split into tokens, and then these tokens must be mapped to their index in the tokenizer vocabulary. # + id="xEc8IJrmSc-y" """BERT Configuration""" BERT_PATH = '/content/gdrive/MyDrive/bert_base_uncased' TOKENIZER = transformers.BertTokenizer.from_pretrained(pretrained_model_name_or_path = BERT_PATH, do_lower_case = True) MAX_LENGTH = 64 TRAIN_FILE = '/content/dataset.csv' TRAIN_BATCH_SIZE = 8 TRAIN_N_WORKERS = 4 VALIDATION_BATCH_SIZE = 4 VALIDATION_N_WORKERS = 1 DEVICE = 'cuda' EPOCHS = 10 MODEL_PATH = 'model.bin' # + [markdown] id="K7pXA73Wz0C9" # ## Loading and Preprocessing the input data # # We'll need to transform our data into a format BERT understands. Text inputs need to be transformed to numeric token ids and arranged in several Tensors before being input to BERT. # + id="XVFm4X1OSnaf" """data_loader""" class BERTDataset: def __init__(self, review, target): self.review = review self.target = target self.tokenizer = TOKENIZER self.max_length = MAX_LENGTH def __len__(self): return len(self.review) def __getitem__(self, item_index): # sanity check review = str(self.review[item_index]) review = ' '.join(review.split()) # encoding inputs = self.tokenizer.encode_plus( review, None, add_special_tokens = True, max_length = self.max_length, pad_to_max_length = True ) input_ids = inputs['input_ids'] attention_mask = inputs['attention_mask'] token_type_ids = inputs['token_type_ids'] return { 'input_ids': torch.tensor(input_ids, dtype = torch.long), 'attention_mask': torch.tensor(attention_mask, dtype = torch.long), 'token_type_ids': torch.tensor(token_type_ids, dtype = torch.long), 'targets': torch.tensor(self.target[item_index], dtype = torch.float) } # + [markdown] id="_T51p6IQz6SR" # ## Defining the model # # BERT(BASE): 12 layers of encoder stack with 12 bidirectional self-attention heads and 768 hidden units. Since sentiment analysis is a binary classification problem, we set the linear transformation *in_features* and *out_features* to 768 and 1 respectively. # # *last_hidden_state* is a sequence of hidden states for all tokens for all batches. # # *pooled_output* represents each input sequence as a whole. You can think of this as an embedding for the entire movie review. For the fine-tuning we are going to use the pooled_output array. # + id="zU6leTfoSqjd" """BERT Model""" class BERTBaseUncased(nn.Module): def __init__(self): super(BERTBaseUncased, self).__init__() self.bert = transformers.BertModel.from_pretrained(pretrained_model_name_or_path = BERT_PATH) self.bert_dropout = nn.Dropout(0.3) self.output_layer = nn.Linear(in_features = 768, out_features = 1) def forward(self, input_ids, attention_mask, token_type_ids): last_hidden_state, pooled_output = self.bert(input_ids, attention_mask = attention_mask, token_type_ids = token_type_ids, return_dict = False) bert_output = self.bert_dropout(pooled_output) output = self.output_layer(bert_output) return output # + [markdown] id="6L31higp5XKb" # ## Training the model # # ### Loss function # # Since this is a binary classification problem and the model outputs a probability (a single-unit layer), we'll use BinaryCrossentropy loss function. # + id="4mkgGacHSndv" """Loss function""" def loss_fn(outputs, targets): return nn.BCEWithLogitsLoss()(outputs, targets.view(-1, 1)) # + id="Et5iYY9gSvAx" """Train function""" def train_fn(data_loader, model, optimizer, device, scheduler): TOTAL_N_BATCHES = len(data_loader) model.train() for batch_index, dataset in tqdm(enumerate(data_loader), total = TOTAL_N_BATCHES): input_ids = dataset['input_ids'] attention_mask = dataset['attention_mask'] token_type_ids = dataset['token_type_ids'] targets = dataset['targets'] # send to device input_ids = input_ids.to(device, dtype = torch.long) attention_mask = attention_mask.to(device, dtype = torch.long) token_type_ids = token_type_ids.to(device, dtype = torch.long) targets = targets.to(device, dtype = torch.float) optimizer.zero_grad() outputs = model(input_ids = input_ids, attention_mask = attention_mask, token_type_ids = token_type_ids) loss = loss_fn(outputs, targets) loss.backward() optimizer.step() scheduler.step() # + id="9JGSb_PCSxSR" """Evaluation function""" def evaluate_fn(data_loader, model, device): final_outputs, final_targets = [], [] TOTAL_N_BATCHES = len(data_loader) model.eval() with torch.no_grad(): for batch_index, dataset in tqdm(enumerate(data_loader), total = TOTAL_N_BATCHES): input_ids = dataset['input_ids'] attention_mask = dataset['attention_mask'] token_type_ids = dataset['token_type_ids'] targets = dataset['targets'] # send to device input_ids = input_ids.to(device, dtype = torch.long) attention_mask = attention_mask.to(device, dtype = torch.long) token_type_ids = token_type_ids.to(device, dtype = torch.long) targets = targets.to(device, dtype = torch.float) outputs = model(input_ids = input_ids, attention_mask = attention_mask, token_type_ids = token_type_ids) final_outputs.extend(torch.sigmoid(outputs).cpu().detach().numpy().tolist()) final_targets.extend(targets.cpu().detach().numpy().tolist()) return final_outputs, final_targets # + id="DeOG5997SzyL" def main(): LEARNING_RATE = 3e-5 dataframe = pd.read_csv(TRAIN_FILE).fillna('none') dataframe.sentiment = dataframe.sentiment.apply(lambda x: 1 if x == 'positive' else 0) # split into training and validation datasets dataframe_train, dataframe_validation = model_selection.train_test_split( dataframe, test_size = 0.1, random_state = 42, stratify = dataframe.sentiment.values ) dataframe_train = dataframe_train.reset_index(drop = True) dataframe_validation = dataframe_validation.reset_index(drop = True) # create training dataset train_dataset = BERTDataset( review = dataframe_train.review.values, target = dataframe_train.sentiment.values ) # create training data_loader train_data_loader = torch.utils.data.DataLoader( dataset = train_dataset, batch_size = TRAIN_BATCH_SIZE, num_workers = TRAIN_N_WORKERS ) # create validation dataset validation_dataset = BERTDataset( review = dataframe_validation.review.values, target = dataframe_validation.sentiment.values ) # create validation data_loader validation_data_loader = torch.utils.data.DataLoader( dataset = validation_dataset, batch_size = VALIDATION_BATCH_SIZE, num_workers = VALIDATION_N_WORKERS ) device = torch.device(DEVICE) model = BERTBaseUncased() model.to(device) """ Optimizer: For fine-tuning, let's use the same optimizer that BERT was originally trained with: the "Adaptive Moments" (Adam). This optimizer minimizes the prediction loss and does regularization by weight decay (not using moments), which is also known as AdamW.""" parameter_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_parameters = [ { 'params': [parameter for n, parameter in parameter_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.001 }, { 'params': [parameter for n, parameter in parameter_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 } ] n_train_steps = int(EPOCHS * len(dataframe_train) / TRAIN_BATCH_SIZE) optimizer = transformers.AdamW(optimizer_parameters, lr = LEARNING_RATE) scheduler = transformers.get_linear_schedule_with_warmup( optimizer, num_warmup_steps = 0, num_training_steps = n_train_steps ) best_accuracy = 0 for _ in range(EPOCHS): train_fn(train_data_loader, model, optimizer, device, scheduler) outputs, targets = evaluate_fn(validation_data_loader, model, device) outputs = np.array(outputs) >= 0.5 accuracy = metrics.accuracy_score(targets, outputs) print('Accuracy Score = {}'.format(accuracy)) if accuracy > best_accuracy: # export the inference torch.save(model.state_dict(), MODEL_PATH) best_accuracy = accuracy # + colab={"base_uri": "https://localhost:8080/"} id="GuV89_TBcfMi" outputId="5b91c7f2-170f-4693-f8b1-7124a8f1ea0d" main() # + [markdown] id="oP--uXO92bRW" # ## References: # # Tensorflow Hub authors # # <NAME>
bert.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Imports and Paths import urllib3 http = urllib3.PoolManager() # + init_cell=true from urllib import request from bs4 import BeautifulSoup, Comment import pandas as pd from datetime import datetime # from shutil import copyfile # import time import json # + [markdown] heading_collapsed=true # # Load in previous list of games # + hidden=true df_gms_lst = pd.read_csv('../data/bgg_top2000_2018-10-06.csv') # + hidden=true df_gms_lst.columns # + hidden=true metadata_dict = {"title": "BGG Top 2000", "subtitle": "Board Game Geek top 2000 games rankings", "description": "Board Game Geek top 2000 games rankings and other info", "id": "mseinstein/bgg_top2000", "licenses": [{"name": "CC-BY-SA-4.0"}], "resources":[ {"path": "bgg_top2000_2018-10-06.csv", "description": "Board Game Geek top 2000 games on 2018-10-06" } ] } # + hidden=true with open('../data/kaggle/dataset-metadata.json', 'w') as fp: json.dump(metadata_dict, fp) # - # # Get the id's of the top 2000 board games # + init_cell=true pg_gm_rnks = 'https://boardgamegeek.com/browse/boardgame/page/' # + init_cell=true def extract_gm_id(soup): rows = soup.find('div', {'id': 'collection'}).find_all('tr')[1:] id_list = [] for row in rows: id_list.append(int(row.find_all('a')[1]['href'].split('/')[2])) return id_list # + init_cell=true def top_2k_gms(pg_gm_rnks): gm_ids = [] for pg_num in range(1,21): pg = request.urlopen(f'{pg_gm_rnks}{str(pg_num)}') soup = BeautifulSoup(pg, 'html.parser') gm_ids += extract_gm_id(soup) return gm_ids # - gm_ids = top_2k_gms(pg_gm_rnks) len(gm_ids) # # Extract the info for each game in the top 2k using the extracted game id's bs_pg = 'https://www.boardgamegeek.com/xmlapi2/' bs_pg_gm = f'{bs_pg}thing?type=boardgame&stats=1&ratingcomments=1&page=1&pagesize=10&id=' def extract_game_item(item): gm_dict = {} field_int = ['yearpublished', 'minplayers', 'maxplayers', 'playingtime', 'minplaytime', 'maxplaytime', 'minage'] field_categ = ['boardgamecategory', 'boardgamemechanic', 'boardgamefamily','boardgamedesigner', 'boardgameartist', 'boardgamepublisher'] field_rank = [x['friendlyname'] for x in item.find_all('rank')] field_stats = ['usersrated', 'average', 'bayesaverage', 'stddev', 'median', 'owned', 'trading', 'wanting', 'wishing', 'numcomments', 'numweights', 'averageweight'] gm_dict['name'] = item.find('name')['value'] gm_dict['id'] = item['id'] gm_dict['num_of_rankings'] = int(item.find('comments')['totalitems']) for i in field_int: field_val = item.find(i) if field_val is None: gm_dict[i] = -1 else: gm_dict[i] = int(field_val['value']) for i in field_categ: gm_dict[i] = [x['value'] for x in item.find_all('link',{'type':i})] for i in field_rank: field_val = item.find('rank',{'friendlyname':i}) if field_val is None or field_val['value'] == 'Not Ranked': gm_dict[i.replace(' ','')] = -1 else: gm_dict[i.replace(' ','')] = int(field_val['value']) for i in field_stats: field_val = item.find(i) if field_val is None: gm_dict[i] = -1 else: gm_dict[i] = float(field_val['value']) return gm_dict len(gm_ids) gm_list = [] idx_split = 4 idx_size = int(len(gm_ids)/idx_split) for i in range(idx_split): idx = str(gm_ids[i*idx_size:(i+1)*idx_size]).replace(' ','')[1:-1] pg = request.urlopen(f'{bs_pg_gm}{str(idx)}') item_ct = 0 xsoup = BeautifulSoup(pg, 'xml') # while item_ct < 500: # xsoup = BeautifulSoup(pg, 'xml') # item_ct = len(xsoup.find_all('item')) gm_list += [extract_game_item(x) for x in xsoup.find_all('item')] # break df2 = pd.DataFrame(gm_list) df2.shape df2.head() df2.loc[df2["Children'sGameRank"].notnull(),:].head().T df2.isnull().sum() gm_list = [] idx_split = 200 idx_size = int(len(gm_ids)/idx_split) for i in range(idx_split): idx = str(gm_ids[i*idx_size:(i+1)*idx_size]).replace(' ','')[1:-1] break # pg = request.urlopen(f'{bs_pg_gm}{str(idx)}') # item_ct = 0 # xsoup = BeautifulSoup(pg, 'xml') # # while item_ct < 500: # # xsoup = BeautifulSoup(pg, 'xml') # # item_ct = len(xsoup.find_all('item')) # gm_list += [extract_game_item(x) for x in xsoup.find_all('item')] # # break # df2 = pd.DataFrame(gm_list) # df2.shape idx def create_df_gm_ranks(gm_ids, bs_pg_gm): gm_list = [] idx_split = 4 idx_size = int(len(gm_ids)/idx_split) for i in range(idx_split): idx = str(gm_ids[i*idx_size:(i+1)*idx_size]).replace(' ','')[1:-1] pg = request.urlopen(f'{bs_pg_gm}{str(idx)}') xsoup = BeautifulSoup(pg, 'xml') gm_list += [extract_game_item(x) for x in xsoup.find_all('item')] df = pd.DataFrame(gm_list) return df df = create_df_gm_ranks(gm_ids, bs_pg_gm) df2.to_csv(f'../data/kaggle/{str(datetime.now().date())}_bgg_top{len(gm_ids)}.csv', index=False) with open('../data/kaggle/dataset-metadata.json', 'rb') as f: meta_dict = json.load(f) meta_dict['resources'].append({ 'path': f'{str(datetime.now().date())}_bgg_top{len(gm_ids)}.csv', 'description': f'Board Game Geek top 2000 games on {str(datetime.now().date())}' }) meta_dict meta_dict['title'] = 'Board Game Geek (BGG) Top 2000' meta_dict['resources'][-1]['path'] = '2018-12-15_bgg_top2000.csv' meta_dict['resources'][-1]['description']= 'Board Game Geek top 2000 games on 2018-12-15' with open('../data/kaggle/dataset-metadata.json', 'w') as fp: json.dump(meta_dict, fp) # Code for kaggle # # kaggle datasets version -m "week of 2018-10-20" -p .\ -d meta_dict gm_list = [] idx_split = 4 idx_size = int(len(gm_ids)/idx_split) for i in range(idx_split): idx = str(gm_ids[i*idx_size:(i+1)*idx_size]).replace(' ','')[1:-1] break idx2 = '174430,161936,182028,167791,12333,187645,169786,220308,120677,193738,84876,173346,180263,115746,3076,102794,205637' pg = request.urlopen(f'{bs_pg_gm}{str(idx)}') xsoup = BeautifulSoup(pg, 'xml') aa = xsoup.find_all('item') len(aa) http.urlopen() r = http.request('GET', f'{bs_pg_gm}{str(idx)}') xsoup2 = BeautifulSoup(r.data, 'xml') bb = xsoup.find_all('item') len(bb) # + [markdown] heading_collapsed=true # # XML2 API # + [markdown] hidden=true # Base URI: /xmlapi2/thing?parameters # - id=NNN # - Specifies the id of the thing(s) to retrieve. To request multiple things with a single query, NNN can specify a comma-delimited list of ids. # - type=THINGTYPE # - Specifies that, regardless of the type of thing asked for by id, the results are filtered by the THINGTYPE(s) specified. Multiple THINGTYPEs can be specified in a comma-delimited list. # - versions=1 # - Returns version info for the item. # - videos = 1 # - Returns videos for the item. # - stats=1 # - Returns ranking and rating stats for the item. # - historical=1 # - Returns historical data over time. See page parameter. # - marketplace=1 # - Returns marketplace data. # - comments=1 # - Returns all comments about the item. Also includes ratings when commented. See page parameter. # - ratingcomments=1 # - Returns all ratings for the item. Also includes comments when rated. See page parameter. The ratingcomments and comments parameters cannot be used together, as the output always appears in the \<comments\> node of the XML; comments parameter takes precedence if both are specified. Ratings are sorted in descending rating value, based on the highest rating they have assigned to that item (each item in the collection can have a different rating). # - page=NNN # - Defaults to 1, controls the page of data to see for historical info, comments, and ratings data. # - pagesize=NNN # - Set the number of records to return in paging. Minimum is 10, maximum is 100. # - from=YYYY-MM-DD # - Not currently supported. # - to=YYYY-MM-DD # - Not currently supported.
notebooks/bgg_weekly_crawler.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import sys test = pd.read_csv( '../data/raw/test.csv', parse_dates = ['timestamp'], ) print(test.dtypes) print(sys.getsizeof(test) / 1024 / 1024) print(test.memory_usage() / 1024 / 1024) test df = test.copy() df = df.astype({'world': 'category', 'type': 'category', 'title': 'category', 'event_code': 'category', 'installation_id': 'category', 'game_session': 'category', 'event_id': 'category'}) df.loc[:, 'game_time'] = pd.to_numeric(df.game_time, downcast = 'integer') df.loc[:, 'event_count'] = pd.to_numeric(df.event_count, downcast = 'integer') print(sys.getsizeof(df) / 1024 / 1024) print(df.memory_usage() / 1024 / 1024) df
notebooks/tlh_1.01_clean_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Intelligent Systems Assignment 1 # # ## Masterball solver # # **Names and IDs:** # # # ### 1. Create a class to model the Masterball problem # A Masterball must be represented as an array of arrays with integer values representing the color of the tile in each position: # # A solved masterball must look like this: # # ```python # [ [0, 1, 2, 3, 4, 5, 6, 7], # [0, 1, 2, 3, 4, 5, 6, 7], # [0, 1, 2, 3, 4, 5, 6, 7], # [0, 1, 2, 3, 4, 5, 6, 7] # ] # ``` # #### Variables modeling the actions # + ''' This variables MUST not be changed. They represent the movements of the masterball. ''' R_0 = "Right 0" R_1 = "Right 1" R_2 = "Right 2" R_3 = "Right 3" V_0 = "Vertical 0" V_1 = "Vertical 1" V_2 = "Vertical 2" V_3 = "Vertical 3" V_4 = "Vertical 4" V_5 = "Vertical 5" V_6 = "Vertical 6" V_7 = "Vertical 7" # - # `R_i` moves the `i`th row to the right. For instance, `R_2` applied to the solved state will produce: # # ```python # [ [0, 1, 2, 3, 4, 5, 6, 7], # [0, 1, 2, 3, 4, 5, 6, 7], # [7, 0, 1, 2, 3, 4, 5, 6], # [0, 1, 2, 3, 4, 5, 6, 7] # ] # ``` # # `V_i` performs a clockwise vertical move starting with the `i`th column # # `V_1` applied to the above state will produce: # # ```python # [ [0, 4, 3, 2, 1, 5, 6, 7], # [0, 3, 2, 1, 0, 5, 6, 7], # [7, 4, 3, 2, 1, 4, 5, 6], # [0, 4, 3, 2, 1, 5, 6, 7] # ] # ``` # #### The Masterball problem class import search class MasterballProblem(search.SearchProblem): def __init__(self, startState): ''' Store the initial state in the problem representation and any useful data. Here are some examples of initial states: [[0, 1, 4, 5, 6, 2, 3, 7], [0, 1, 3, 4, 5, 6, 3, 7], [1, 2, 4, 5, 6, 2, 7, 0], [0, 1, 4, 5, 6, 2, 3, 7]] [[0, 7, 4, 5, 1, 6, 2, 3], [0, 7, 4, 5, 0, 5, 2, 3], [7, 6, 3, 4, 1, 6, 1, 2], [0, 7, 4, 5, 1, 6, 2, 3]] [[0, 1, 6, 4, 5, 2, 3, 7], [0, 2, 6, 5, 1, 3, 4, 7], [0, 2, 6, 5, 1, 3, 4, 7], [0, 5, 6, 4, 1, 2, 3, 7]] ''' self.expanded = 0 ### your code here ### pass def isGoalState(self, state): ''' Define when a given state is a goal state (A correctly colored masterball) ''' ### your code here ### pass def getStartState(self): ''' Implement a method that returns the start state according to the SearchProblem contract. ''' ### your code here ### pass def getSuccessors(self, state): ''' Implement a successor function: Given a state from the masterball return a list of the successors and their corresponding actions. This method *must* return a list where each element is a tuple of three elements with the state of the masterball in the first position, the action (according to the definition above) in the second position, and the cost of the action in the last position. Note that you should not modify the state. ''' self.expanded += 1 ### your code here ### pass # ### 2. Implement iterative deepening search # # Follow the example code provided in class and implement iterative deepening search (IDS). # + def iterativeDeepeningSearch(problem): ### your code here ### return [] def aStarSearch(problem, heuristic): ### your code here ### return [] # - # Evaluate it to see what is the maximum depth that it could explore in a reasonable time. Report the results. # ### 3. Implement different heuristics for the problem # Implement at least two admissible and consistent heuristics. Compare A* using the heuristics against IDS calculating the number of expanded nodes and the effective branching factor, in the same way as it is done in figure 3.29 of [Russell10]. def myHeuristic(state): ### your code here ### return 0 # + def solveMasterBall(problem, search_function): ''' This function receives a Masterball problem instance and a search_function (IDS or A*S) and must return a list of actions that solve the problem. ''' ### your code here ### return [] problem = MasterballProblem([ [0, 4, 3, 2, 1, 5, 6, 7], [0, 3, 2, 1, 0, 5, 6, 7], [7, 4, 3, 2, 1, 4, 5, 6], [0, 4, 3, 2, 1, 5, 6, 7]]) print solveMasterBall(problem, iterativeDeepeningSearch(problem)) print solveMasterBall(problem, aStarSearch(problem, myHeuristic))
masterball.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ### PLOTLY - TERNARY PHASE DIAGRAM # Importing Pandas and plotly import pandas as pd import os root_dir = os.getcwd() import plotly.express as px # Changing Directory back to the folder 'Dataset_Final' (root directory) os.chdir(root_dir) # Opening Dataset in Pandas binodal = pd.read_csv('Ternary_Phase_Diagram.csv') # Defining figure parameters for Plotly fig = px.scatter_ternary(binodal, # data Ethanol,Water,Toluen a="Ethanol", b="Water", c="Toluene", size = 'Size', size_max = 15, #size of data points (bigger for tie lines) color = 'Color', color_discrete_map = {"Binodal" : "black", "Tie Line 1" : "red", "Tie Line 2" : "blue"}, #Defines color for various points symbol = "Shape") # shape (circles, squares, diamonds) # Show ternary plot fig.show() # Exporting SVG # Define preferred export format and export engine as Kaleido fig.to_image(format="svg", engine="kaleido") # Export image as 'Binodal.png' fig.write_image('Binodal_plotly.svg') # Exporting PNG # Define preferred export format and export engine as Kaleido fig.to_image(format="png", engine="kaleido") # Export image as 'Binodal.png' fig.write_image('Binodal_plotly.png') # -
.ipynb_checkpoints/PlottingTernaryPhaseDiagram_unmodifiedversion-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Demo 3: Simple Image reconstruction # + active="" # This demo will show you a simple image reconstruction can be performed, by using OS_SART and FDK. # NOTE: if you havent already downloaded the tigre_demo_file and navigated to the correct directory, do so before continuing with this demo. # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # This file is part of the TIGRE Toolbox # # Copyright (c) 2015, University of Bath and # CERN-European Organization for Nuclear Research # All rights reserved. # # License: Open Source under BSD. # See the full license at # https://github.com/CERN/TIGRE/license.txt # # Contact: <EMAIL> # Codes: https://github.com/CERN/TIGRE/ # -------------------------------------------------------------------------- # Coded by: MATLAB (original code): <NAME> # PYTHON : <NAME>,<NAME> # - # ## Define geometry from tigre.geometry import TIGREParameters geo=TIGREParameters(high_quality=False) # ## Load data and generate projections import numpy as np from _Ax import Ax from Test_data import data_loader # define angles angles=np.linspace(0,2*np.pi,dtype=np.float32) # load head phantom data head=data_loader.load_head_phantom(number_of_voxels=geo.nVoxel) # generate projections projections=Ax(head,geo,angles,'interpolated') # ## Reconstruct image using OS-SART and FDK # + from tigre.Algorithms.FDK import FDK from tigre.Algorithms.OS_SART import OS_SART from tigre.Utilities.plotImg import plotImg # OS_SART niter=50 imgOSSART=OS_SART(projections,geo,angles,niter) # FDK imgfdk=FDK(projections,geo,angles) # Show the results plotImg(np.hstack((imgOSSART,imgfdk)),slice=32,dim='x') # -
Python/tigre_demo_file/d03_simple_image_reconstruction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Now You Code 1: Address # # Write a Python program to input elements of your postal address and then output them as if they were an address label. The program should use a dictionary to store the address and complete two function defintions one for inputting the address and one for printing the address. # # **NOTE:** While you most certainly can write this program without using dictionaries or functions, the point of the exercise is to get used to using them!!! # # Sample Run: # # ``` # Enter Street: 314 Hinds Hall # Enter City: Syracuse # Enter State: NY # Enter Postal Zip Code: 13244 # Mailing Address: # 314 Hinds Hall # Syracuse , NY 13244 # # ``` # ## Step 1: Problem Analysis `input_address` function # # This function should get input from the user at run time and return the input address. # # Inputs: None (gets input from user) # # Outputs: a Python dictionary of address info (street, city, state, postal_code) # # Algorithm (Steps in Program): # # # + ## Step 2: Write input_address_ function #input: None (inputs from console) #output: dictionary of the address def input_address(): # todo: write code here to input the street, city, state and zip code and add to dictionary at runtime and store in a dictionary street= input("Enter your street and house number: ") address['street']=street city= input("Enter your city : ") address['city']=city state= input("Enter your state: ") address['state']=state zipcode= input("Enter your zip code: ") address['zipcode']= zipcode return address # - # ## Step 3: Problem Analysis `print_address` function # # This function should display a mailing address using the dictionary variable # # Inputs: dictionary variable of address information (street, city, state, postal_code) # # Outputs: None (prints to screen) # # Algorithm (Steps in Program): # # ## Step 4: write code # input: address dictionary # output: none (outputs to console) def print_address(address): # todo: write code to print the address (leave empty return at the end print("MAILING ADDRESS:") print(address['street']) print(address['city'],",", address['state'], address['zipcode']) return # ## Step 5: Problem Analysis main program # # Should be trivial at this point. # # Inputs: # # Outputs: # # Algorithm (Steps in Program): # # + ## Step 6: write main program, use other 2 functions you made to solve this problem. # main program # todo: call input_address, then print_address address= {} input_address() print_address(address) # - # ## Step 7: Questions # # 1. Explain a strategy for a situation when an expected dictionary key, like 'state' for example does not exist? # # Answer: you can do (address.get('state','Key does not exist')). # # # 2. The program as it is written is not very useful. How can we make it more useful? # # # Answer: You can have it also ask for a return address # # ## Step 8: Reflection # # Reflect upon your experience completing this assignment. This should be a personal narrative, in your own voice, and cite specifics relevant to the activity as to help the grader understand how you arrived at the code you submitted. Things to consider touching upon: Elaborate on the process itself. Did your original problem analysis work as designed? How many iterations did you go through before you arrived at the solution? Where did you struggle along the way and how did you overcome it? What did you learn from completing the assignment? What do you need to work on to get better? What was most valuable and least valuable about this exercise? Do you have any suggestions for improvements? # # To make a good reflection, you should journal your thoughts, questions and comments while you complete the exercise. # # Keep your response to between 100 and 250 words. # # `--== Write Your Reflection Below Here ==--` # # I was able to do this practice with no help, but it did take some time because there was an error where it returned my list as {}. It took some time to figure out why that happened and fix the problem. I'd say this took me about 1-1.5 hours to finish, just because I was getting a little bit stuck at first. Now that I have the correct code, I understand how and why it works, and it was actually pretty simple. I just overthinked a few things.
content/lessons/09/Now-You-Code/NYC1-Address.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # DCGAN on the MNIST Dataset import tempfile import os import numpy as np import tensorflow as tf from tensorflow.python.keras import layers from matplotlib import pyplot as plt import fastestimator as fe from fastestimator.backend import binary_crossentropy, feed_forward from fastestimator.dataset.data import mnist from fastestimator.op.numpyop import LambdaOp from fastestimator.op.numpyop.univariate import ExpandDims, Normalize from fastestimator.op.tensorop import TensorOp from fastestimator.op.tensorop.model import ModelOp, UpdateOp from fastestimator.trace.io import ModelSaver # + tags=["parameters"] batch_size = 256 epochs = 50 train_steps_per_epoch = None save_dir = tempfile.mkdtemp() model_name = 'model_epoch_50.h5' # - # <h2>Building components</h2> # ### Step 1: Prepare training and define a `Pipeline` # We are loading data from tf.keras.datasets.mnist and defining a series of operations to perform on the data before the training: train_data, _ = mnist.load_data() pipeline = fe.Pipeline( train_data=train_data, batch_size=batch_size, ops=[ ExpandDims(inputs="x", outputs="x"), Normalize(inputs="x", outputs="x", mean=1.0, std=1.0, max_pixel_value=127.5), LambdaOp(fn=lambda: np.random.normal(size=[100]).astype('float32'), outputs="z") ]) # ### Step 2: Create a `model` and FastEstimator `Network` # First, we have to define the network architecture for both our <b>Generator</b> and <b>Discriminator</b>. After defining the architecture, users are expected to feed the architecture definition, along with associated model names and optimizers, to fe.build. def generator(): model = tf.keras.Sequential() model.add(layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100, ))) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Reshape((7, 7, 256))) model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')) return model def discriminator(): model = tf.keras.Sequential() model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[28, 28, 1])) model.add(layers.LeakyReLU()) model.add(layers.Dropout(0.3)) model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same')) model.add(layers.LeakyReLU()) model.add(layers.Dropout(0.3)) model.add(layers.Flatten()) model.add(layers.Dense(1)) return model gen_model = fe.build(model_fn=generator, optimizer_fn=lambda: tf.optimizers.Adam(1e-4)) disc_model = fe.build(model_fn=discriminator, optimizer_fn=lambda: tf.optimizers.Adam(1e-4)) # We define the generator and discriminator losses. These can have multiple inputs and outputs. class GLoss(TensorOp): """Compute generator loss.""" def forward(self, data, state): return binary_crossentropy(y_pred=data, y_true=tf.ones_like(data), from_logits=True) class DLoss(TensorOp): """Compute discriminator loss.""" def forward(self, data, state): true_score, fake_score = data real_loss = binary_crossentropy(y_pred=true_score, y_true=tf.ones_like(true_score), from_logits=True) fake_loss = binary_crossentropy(y_pred=fake_score, y_true=tf.zeros_like(fake_score), from_logits=True) total_loss = real_loss + fake_loss return total_loss # `fe.Network` takes series of operators. Here we pass our models wrapped into `ModelOps` along with our loss functions and some update rules: network = fe.Network(ops=[ ModelOp(model=gen_model, inputs="z", outputs="x_fake"), ModelOp(model=disc_model, inputs="x_fake", outputs="fake_score"), GLoss(inputs="fake_score", outputs="gloss"), UpdateOp(model=gen_model, loss_name="gloss"), ModelOp(inputs="x", model=disc_model, outputs="true_score"), DLoss(inputs=("true_score", "fake_score"), outputs="dloss"), UpdateOp(model=disc_model, loss_name="dloss") ]) # ### Step 3: Prepare `Estimator` and configure the training loop # We will define an `Estimator` that has four notable arguments: network, pipeline, epochs and traces. Our `Network` and `Pipeline` objects are passed here as an argument along with the number of epochs and a `Trace`, in this case one designed to save our model every 5 epochs. traces=ModelSaver(model=gen_model, save_dir=save_dir, frequency=5) estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces, train_steps_per_epoch=train_steps_per_epoch) # <h2>Training</h2> estimator.fit() # <h2>Inferencing</h2> # For inferencing, first we have to load the trained model weights. We will load the trained generator weights using <i>fe.build</i> model_path = os.path.join(save_dir, model_name) trained_model = fe.build(model_fn=generator, weights_path=model_path, optimizer_fn=lambda: tf.optimizers.Adam(1e-4)) # We will the generate some images from random noise: images = feed_forward(trained_model, np.random.normal(size=(16, 100)), training=False) fig, axes = plt.subplots(4, 4) axes = np.ravel(axes) for i in range(images.shape[0]): axes[i].axis('off') axes[i].imshow(np.squeeze(images[i, ...] * 127.5 + 127.5), cmap='gray')
apphub/image_generation/dcgan/dcgan.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tensor # language: python # name: tensor # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # - pd.set_option("display.max_columns",None) # ## Lifecycle # # * Data Analytics # * Feature Engineering # * Feature Selection # * Model Building # * Model Deployment # # Dataset:- https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data df = pd.read_csv("./datasets/house-prices-advanced-regression-techniques/train.csv") df.head(8) df.shape # ## In Data Analysis we will analyze to find out the below stuff # # * Missing Values # * All The Numerical Variables # * Distribution of the Numerical Variables (if it is skewed then we have to do transformation) # * Categorical Variables # * Cardinality of Categorical Variables # * Outliers # * Relationship between independent and dependent feature (SalePrice) # ## Missing value # + features_with_na = [feature for feature in df.columns if df[feature].isnull().sum() >1] for feature in features_with_na: print(feature," : ", df[feature].isnull().mean() *100) print("\nTotal Features with null values: ",len(features_with_na)) # - # Dropping columns having high % of null values df1 = df.drop(columns = ["Alley","PoolQC","Fence","MiscFeature"]) features_with_na2 = list1 = [ele for ele in features_with_na if ele not in ["Alley","PoolQC","Fence","MiscFeature"]] features_with_na2 df1[features_with_na2].info() # Can observe mean and median are almost same, and max value are too high df1[features_with_na2].describe() df1["MasVnrArea"].value_counts() # For normal distribution replacing with mean values df1["LotFrontage"] = df1["LotFrontage"].fillna(df1["LotFrontage"].mean()) df1["GarageYrBlt"] = df1["GarageYrBlt"].fillna(df1["GarageYrBlt"].mean()) df1["MasVnrArea"] = df1["MasVnrArea"].fillna(0) df1[features_with_na2].describe() # + features_with_na3 = [ele for ele in features_with_na2 if ele not in ["LotFrontage","GarageYrBlt","MasVnrArea"]] for col in features_with_na3: print(col," :\n") print(df[col].value_counts()) print("\n\n") # - df1[features_with_na3].mode() # + # Fill null values of categorical column with most probable value features_with_na4 = ["MasVnrType","FireplaceQu"] df2 = df1.copy() for col in features_with_na3: if col not in features_with_na4: df2[col] = df1[col].fillna(df1[col].mode()[0]) df2[features_with_na3].info() # - df2[features_with_na3].mode() # Since it has 2 many null values df3 = df2.drop(columns = features_with_na4) df3.isnull().sum().sum() df3.info() df3["Electrical"].fillna(df3["Electrical"].mode()[0], inplace=True) df3.isnull().sum().sum() cat_col = df3.select_dtypes(include="O") len(cat_col.columns)
Kaggle_Solutions/House-Prices-Advanced-Regression-Techniques.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a> # $ \newcommand{\bra}[1]{\langle #1|} $ # $ \newcommand{\ket}[1]{|#1\rangle} $ # $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ # $ \newcommand{\dot}[2]{ #1 \cdot #2} $ # $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ # $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ # $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ # $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ # $ \newcommand{\mypar}[1]{\left( #1 \right)} $ # $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ # $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ # $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ # $ \newcommand{\onehalf}{\frac{1}{2}} $ # $ \newcommand{\donehalf}{\dfrac{1}{2}} $ # $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ # $ \newcommand{\vzero}{\myvector{1\\0}} $ # $ \newcommand{\vone}{\myvector{0\\1}} $ # $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $ # $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ # $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ # $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ # $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $ # $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ # $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ # $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ # $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ # $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $ # $ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $ # $ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $ # $ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $ # $ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $ # $ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $ # <font style="font-size:28px;" align="left"><b> <font color="blue"> Solution for </font>Hadamard Operator </b></font> # <br> # _prepared by <NAME>_ # <br><br> # <a id="task1"></a> # <h3> Task 1 </h3> # # Remember that x-gate flips the value of a qubit. # # Design a quantum circuit with a single qubit. # # The qubit is initially set to $ \ket{0} $. # # Set the value of qubit to $ \ket{1} $ by using x-gate. # # Experiment 1: Apply one Hadamard gate, make measurement, and execute your program 10000 times. # # Experiment 2: Apply two Hadamard gates, make measurement, and execute your program 10000 times. # # Compare your results. # # The following two diagrams represent these experiments. # <table> # <tr> # <td><img src="../photon/images/photon8.jpg" width="80%"></td> # <td><img src="../photon/images/photon9.jpg" width="70%"></td> # </tr> # </table> # <h3>Solution</h3> # # <h4>Experiment 1: x-gate, h-gate, and measurement </h4> # + # import all necessary objects and methods for quantum circuits from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer # define a quantum register with one qubit q = QuantumRegister(1,"qreg") # define a classical register with one bit # it stores the measurement result of the quantum part c = ClassicalRegister(1,"creg") # define our quantum circuit qc = QuantumCircuit(q,c) # apply x-gate to the first qubit qc.x(q[0]) # apply h-gate (Hadamard: quantum coin-flipping) to the first qubit qc.h(q[0]) # measure the first qubit, and store the result in the first classical bit qc.measure(q,c) # draw the circuit by using matplotlib qc.draw(output='mpl') # re-run the cell if the figure is not displayed # + # execute the circuit and read the results job = execute(qc,Aer.get_backend('qasm_simulator'),shots=10000) counts = job.result().get_counts(qc) print(counts) # - # <h4>Experiment 2: x-gate, h-gate, h-gate, and measurement </h4> # + # import all necessary objects and methods for quantum circuits from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer # define a quantum register with one qubit q2 = QuantumRegister(1,"qreg2") # define a classical register with one bit # it stores the measurement result of the quantum part c2 = ClassicalRegister(1,"creg2") # define our quantum circuit qc2 = QuantumCircuit(q2,c2) # apply x-gate to the first qubit qc2.x(q2[0]) # apply h-gate (Hadamard: quantum coin-flipping) to the first qubit twice qc2.h(q2[0]) qc2.h(q2[0]) # measure the first qubit, and store the result in the first classical bit qc2.measure(q2,c2) # draw the circuit by using matplotlib qc2.draw(output='mpl') # re-run the cell if the figure is not displayed # + # execute the circuit and read the results job = execute(qc2,Aer.get_backend('qasm_simulator'),shots=10000) counts2 = job.result().get_counts(qc2) print(counts2)
Bronze/quantum-with-qiskit/Q20_Hadamard_Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:notebook] * # language: python # name: conda-env-notebook-py # --- # # Test scipy.signal.correlate on some atl06 data from foundation ice stream # + import numpy as np import scipy, sys, os, pyproj, glob, re, h5py import matplotlib.pyplot as plt from scipy.signal import correlate from astropy.time import Time # %matplotlib widget # %load_ext autoreload # %autoreload 2 # - # # Test scipy.signal.correlate # Generate some test data: # + dx = 0.1 x = np.arange(0,10,dx) y = np.zeros(np.shape(x)) ix0 = 30 ix1 = 30 + 15 y[ix0:ix1] = 1 fig,axs = plt.subplots(1,2) axs[0].plot(x,y,'k') axs[0].set_xlabel('distance (m)') axs[0].set_ylabel('value') axs[1].plot(np.arange(len(x)), y,'k') axs[1].set_xlabel('index') # - # Make a signal to correlate with: # + imposed_offset = int(14/dx) # 14 meters, in units of samples x_noise = np.arange(0,50,dx) # make the vector we're comparing with much longer y_noise = np.zeros(np.shape(x_noise)) y_noise[ix0 + imposed_offset : ix1 + imposed_offset] = 1 # uncomment the line below to add noise # y_noise = y_noise * np.random.random(np.shape(y_noise)) fig,axs = plt.subplots(1,2) axs[0].plot(x,y,'k') axs[0].set_xlabel('distance (m)') axs[0].set_ylabel('value') axs[1].plot(np.arange(len(x)), y, 'k') axs[1].set_xlabel('index') axs[0].plot(x_noise,y_noise, 'b') axs[0].set_xlabel('distance (m)') axs[0].set_ylabel('value') axs[1].plot(np.arange(len(x_noise)), y_noise,'b') axs[1].set_xlabel('index') fig.suptitle('black = original, blue = shifted') # - # Try scipy.signal.correlate: # # mode ='full' returns the entire cross correlation; could be 'valid' to return only non- zero-padded part # # method = direct (not fft) corr = correlate(y_noise,y, mode = 'full', method = 'direct') norm_val = np.sqrt(np.sum(y_noise**2)*np.sum(y**2)) corr = corr / norm_val # What are the dimensions of corr? print('corr: ', np.shape(corr)) print('x: ', np.shape(x)) print('x: ', np.shape(x_noise)) # + # lagvec = np.arange(0,len(x_noise) - len(x) + 1) lagvec = np.arange( -(len(x) - 1), len(x_noise), 1) shift_vec = lagvec * dx ix_peak = np.arange(len(corr))[corr == np.nanmax(corr)][0] best_lag = lagvec[ix_peak] best_shift = shift_vec[ix_peak] fig,axs = plt.subplots(3,1) axs[0].plot(lagvec,corr) axs[0].plot(lagvec[ix_peak],corr[ix_peak], 'r*') axs[0].set_xlabel('lag (samples)') axs[0].set_ylabel('correlation coefficient') axs[1].plot(shift_vec,corr) axs[1].plot(shift_vec[ix_peak],corr[ix_peak], 'r*') axs[1].set_xlabel('shift (m)') axs[1].set_ylabel('correlation coefficient') axs[2].plot(x + best_shift, y,'k') axs[2].plot(x_noise, y_noise, 'b--') axs[2].set_xlabel('shift (m)') fig.suptitle(' '.join(['Shift ', str(best_lag), ' samples, or ', str(best_shift), ' m to line up signals'])) # - # # Let's try with some ATL06 data # Load some repeat data: # # # import readers, etc # + # # ! cd ..; [ -d pointCollection ] || git clone https://www.github.com/smithB/pointCollection.git # sys.path.append(os.path.join(os.getcwd(), '..')) # # !python3 -m pip install --user git+https://github.com/tsutterley/pointCollection.git@pip import pointCollection as pc # - moa_datapath = '/srv/tutorial-data/land_ice_applications/' datapath = '/home/jovyan/shared/surface_velocity/FIS_ATL06/' # + # example hf5 file, if you need to look at the fields # datapath='/home/jovyan/shared/surface_velocity/FIS_ATL06_small/processed_ATL06_20191129105346_09700511_003_01.h5' # # !h5ls -r /home/jovyan/shared/surface_velocity/FIS_ATL06_small/processed_ATL06_20191129105346_09700511_003_01.h5 # - # # Geographic setting : Foundation Ice Stream print(pc.__file__) # + # something wrong with pointCollection spatial_extent = np.array([-102, -76, -98, -74.5]) lat=spatial_extent[[1, 3, 3, 1, 1]] lon=spatial_extent[[2, 2, 0, 0, 2]] print(lat) print(lon) # project the coordinates to Antarctic polar stereographic xy=np.array(pyproj.Proj(3031)(lon, lat)) # get the bounds of the projected coordinates XR=[np.nanmin(xy[0,:]), np.nanmax(xy[0,:])] YR=[np.nanmin(xy[1,:]), np.nanmax(xy[1,:])] MOA=pc.grid.data().from_geotif(os.path.join(moa_datapath, 'MOA','moa_2009_1km.tif'), bounds=[XR, YR]) # show the mosaic: plt.figure() MOA.show(cmap='gray', clim=[14000, 17000]) plt.plot(xy[0,:], xy[1,:]) plt.title('Mosaic of Antarctica for Pine Island Glacier') # - # # Load repeat track data # ATL06 reader def atl06_to_dict(filename, beam, field_dict=None, index=None, epsg=None): """ Read selected datasets from an ATL06 file Input arguments: filename: ATl06 file to read beam: a string specifying which beam is to be read (ex: gt1l, gt1r, gt2l, etc) field_dict: A dictinary describing the fields to be read keys give the group names to be read, entries are lists of datasets within the groups index: which entries in each field to read epsg: an EPSG code specifying a projection (see www.epsg.org). Good choices are: for Greenland, 3413 (polar stereographic projection, with Greenland along the Y axis) for Antarctica, 3031 (polar stereographic projection, centered on the Pouth Pole) Output argument: D6: dictionary containing ATL06 data. Each dataset in dataset_dict has its own entry in D6. Each dataset in D6 contains a numpy array containing the data """ if field_dict is None: field_dict={None:['latitude','longitude','h_li', 'atl06_quality_summary'],\ 'ground_track':['x_atc','y_atc'],\ 'fit_statistics':['dh_fit_dx', 'dh_fit_dy']} D={} # below: file_re = regular expression, it will pull apart the regular expression to get the information from the filename file_re=re.compile('ATL06_(?P<date>\d+)_(?P<rgt>\d\d\d\d)(?P<cycle>\d\d)(?P<region>\d\d)_(?P<release>\d\d\d)_(?P<version>\d\d).h5') with h5py.File(filename,'r') as h5f: for key in field_dict: for ds in field_dict[key]: if key is not None: ds_name=beam+'/land_ice_segments/'+key+'/'+ds else: ds_name=beam+'/land_ice_segments/'+ds if index is not None: D[ds]=np.array(h5f[ds_name][index]) else: D[ds]=np.array(h5f[ds_name]) if '_FillValue' in h5f[ds_name].attrs: bad_vals=D[ds]==h5f[ds_name].attrs['_FillValue'] D[ds]=D[ds].astype(float) D[ds][bad_vals]=np.NaN D['data_start_utc'] = h5f['/ancillary_data/data_start_utc'][:] D['delta_time'] = h5f['/' + beam + '/land_ice_segments/delta_time'][:] D['segment_id'] = h5f['/' + beam + '/land_ice_segments/segment_id'][:] if epsg is not None: xy=np.array(pyproj.proj.Proj(epsg)(D['longitude'], D['latitude'])) D['x']=xy[0,:].reshape(D['latitude'].shape) D['y']=xy[1,:].reshape(D['latitude'].shape) temp=file_re.search(filename) D['rgt']=int(temp['rgt']) D['cycle']=int(temp['cycle']) D['beam']=beam return D # Read in files; this next cell took ~1 minute early in the morning # + # find all the files in the directory: # ATL06_files=glob.glob(os.path.join(datapath, 'PIG_ATL06', '*.h5')) ATL06_files=glob.glob(os.path.join(datapath, '*.h5')) D_dict={} error_count=0 for file in ATL06_files[:10]: try: D_dict[file]=atl06_to_dict(file, '/gt2l', index=slice(0, -1, 25), epsg=3031) except KeyError as e: print(f'file {file} encountered error {e}') error_count += 1 print(f"read {len(D_dict)} data files of which {error_count} gave errors") # - # Plot ground tracks plt.figure(figsize=[8,8]) hax0=plt.gcf().add_subplot(211, aspect='equal') MOA.show(ax=hax0, cmap='gray', clim=[14000, 17000]); hax1=plt.gcf().add_subplot(212, aspect='equal', sharex=hax0, sharey=hax0) MOA.show(ax=hax1, cmap='gray', clim=[14000, 17000]); for fname, Di in D_dict.items(): cycle=Di['cycle'] if cycle <= 2: ax=hax0 else: ax=hax1 #print(fname) #print(f'\t{rgt}, {cycle}, {region}') ax.plot(Di['x'], Di['y']) if True: try: if cycle < 3: ax.text(Di['x'][0], Di['y'][0], f"rgt={Di['rgt']}, cyc={cycle}", clip_on=True) elif cycle==3: ax.text(Di['x'][0], Di['y'][0], f"rgt={Di['rgt']}, cyc={cycle}+", clip_on=True) except IndexError: pass hax0.set_title('cycles 1 and 2'); hax1.set_title('cycle 3+'); # Map view elevations # + map_fig=plt.figure() map_ax=map_fig.add_subplot(111) # MOA.show(ax=map_ax, cmap='gray', clim=[14000, 17000]) for fname, Di in D_dict.items(): # select elevations with good quality_summary good=Di['atl06_quality_summary']==0 ms=map_ax.scatter( Di['x'][good], Di['y'][good], 2, c=Di['h_li'][good], \ vmin=0, vmax=1000, label=fname) map_ax._aspect='equal' plt.colorbar(ms, label='elevation'); # - # Repeat track elevation profile # <NAME>'s code to plot the individual segments: def plot_segs(D6, ind=None, **kwargs): """ Plot a sloping line for each ATL06 segment """ if ind is None: ind=np.ones_like(D6['h_li'], dtype=bool) #define the heights of the segment endpoints. Leave a row of NaNs so that the endpoints don't get joined h_ep=np.zeros([3, D6['h_li'][ind].size])+np.NaN h_ep[0, :]=D6['h_li'][ind]-D6['dh_fit_dx'][ind]*20 h_ep[1, :]=D6['h_li'][ind]+D6['dh_fit_dx'][ind]*20 # define the x coordinates of the segment endpoints x_ep=np.zeros([3,D6['h_li'][ind].size])+np.NaN x_ep[0, :]=D6['x_atc'][ind]-20 x_ep[1, :]=D6['x_atc'][ind]+20 plt.plot(x_ep.T.ravel(), h_ep.T.ravel(), **kwargs) # A revised code to plot the elevations of segment midpoints (h_li): def plot_elevation(D6, ind=None, **kwargs): """ Plot midpoint elevation for each ATL06 segment """ if ind is None: ind=np.ones_like(D6['h_li'], dtype=bool) # pull out heights of segment midpoints h_li = D6['h_li'][ind] # pull out along track x coordinates of segment midpoints x_atc = D6['x_atc'][ind] plt.plot(x_atc, h_li, **kwargs) # + D_2l={} D_2r={} # specify the rgt here: rgt="0027" rgt="0848" #Ben's suggestion # iterate over the repeat cycles for cycle in ['03','04','05','06','07']: for filename in glob.glob(os.path.join(datapath, f'*ATL06_*_{rgt}{cycle}*_003*.h5')): try: # read the left-beam data D_2l[filename]=atl06_to_dict(filename,'/gt2l', index=None, epsg=3031) # read the right-beam data D_2r[filename]=atl06_to_dict(filename,'/gt2r', index=None, epsg=3031) # plot the locations in the previous plot map_ax.plot(D_2r[filename]['x'], D_2r[filename]['y'],'k'); map_ax.plot(D_2l[filename]['x'], D_2l[filename]['y'],'k'); except Exception as e: print(f'filename={filename}, exception={e}') plt.figure(); for filename, Di in D_2l.items(): #Plot only points that have ATL06_quality_summary==0 (good points) hl=plot_elevation(Di, ind=Di['atl06_quality_summary']==0, label=f"cycle={Di['cycle']}") #hl=plt.plot(Di['x_atc'][Di['atl06_quality_summary']==0], Di['h_li'][Di['atl06_quality_summary']==0], '.', label=f"cycle={Di['cycle']}") plt.legend() plt.xlabel('x_atc') plt.ylabel('elevation'); # - # Pull out a segment and cross correlate: # # Let's try x_atc = 2.935e7 thru 2.93e7 (just from looking through data) # # + cycles = [] # names of cycles with data for filename, Di in D_2l.items(): cycles += [str(Di['cycle']).zfill(2)] cycles.sort() # x1 = 2.93e7 # x2 = 2.935e7 beams = ['gt1l','gt1r','gt2l','gt2r','gt3l','gt3r'] ### extract and plot data from all available cycles fig, axs = plt.subplots(4,1) x_atc = {} h_li = {} h_li_diff = {} times = {} for cycle in cycles: # find Di that matches cycle: Di = {} x_atc[cycle] = {} h_li[cycle] = {} h_li_diff[cycle] = {} times[cycle] = {} filenames = glob.glob(os.path.join(datapath, f'*ATL06_*_{rgt}{cycle}*_003*.h5')) for filename in filenames: try: for beam in beams: Di[filename]=atl06_to_dict(filename,'/'+ beam, index=None, epsg=3031) times[cycle][beam] = Di[filename]['data_start_utc'] # extract h_li and x_atc for that section x_atc_tmp = Di[filename]['x_atc'] h_li_tmp = Di[filename]['h_li']#[ixs] # segment ids: seg_ids = Di[filename]['segment_id'] # print(len(seg_ids), len(x_atc_tmp)) # make a monotonically increasing x vector # assumes dx = 20 exactly, so be carefull referencing back ind = seg_ids - np.nanmin(seg_ids) # indices starting at zero, using the segment_id field, so any skipped segment will be kept in correct location x_full = np.arange(np.max(ind)+1) * 20 + x_atc_tmp[0] h_full = np.zeros(np.max(ind)+1) + np.NaN h_full[ind] = h_li_tmp x_atc[cycle][beam] = x_full h_li[cycle][beam] = h_full # ### here is where you would put a filter # # you would want to de-mean and detrend that section first: # h = h_full # x = x_full # h = h - np.nanmean(h) # de-mean # h = scipy.signal.detrend(h, type = 'linear') # de-trend; need to deal with nans first # # use scipy.signal.filter to filter # # differentiate that section of data h_diff = (h_full[1:] - h_full[0:-1]) / (x_full[1:] - x_full[0:-1]) h_li_diff[cycle][beam] = h_diff # plot axs[0].plot(x_full, h_full) axs[1].plot(x_full[1:], h_diff) # axs[2].plot(x_atc_tmp[1:] - x_atc_tmp[:-1]) axs[2].plot(np.isnan(h_full)) axs[3].plot(seg_ids[1:]- seg_ids[:-1]) except: print(f'filename={filename}, exception={e}') # + n_veloc = len(cycles) - 1 segment_length = 5000 # m x1 = 2.925e7#x_atc[cycles[0]][beams[0]][1000] <-- the very first x value in a file; doesn't work, I think b/c nans # 2.93e7 search_width = 1000 # m dx = 20 # meters between x_atc points for veloc_number in range(n_veloc): cycle1 = cycles[veloc_number] cycle2 = cycles[veloc_number+1] t1_string = times[cycle1]['gt1l'][0].astype(str) #figure out later if just picking hte first one it ok t1 = Time(t1_string) t2_string = times[cycle2]['gt1l'][0].astype(str) #figure out later if just picking hte first one it ok t2 = Time(t2_string) dt = (t2 - t1).jd # difference in julian days velocities = {} for beam in beams: fig1, axs = plt.subplots(4,1) # cut out small chunk of data at time t1 (first cycle) x_full_t1 = x_atc[cycle1][beam] ix_x1 = np.arange(len(x_full_t1))[x_full_t1 >= x1][0] ix_x2 = ix_x1 + int(np.round(segment_length/dx)) x_t1 = x_full_t1[ix_x1:ix_x2] h_li1 = h_li_diff[cycle1][beam][ix_x1-1:ix_x2-1] # start 1 index earlier because the data are differentiated # cut out a wider chunk of data at time t2 (second cycle) x_full_t2 = x_atc[cycle2][beam] ix_x3 = ix_x1 - int(np.round(search_width/dx)) # offset on earlier end by # indices in search_width ix_x4 = ix_x2 + int(np.round(search_width/dx)) # offset on later end by # indices in search_width x_t2 = x_full_t2[ix_x3:ix_x4] h_li2 = h_li_diff[cycle2][beam][ix_x3:ix_x4] # plot data axs[0].plot(x_t2, h_li2, 'r') axs[0].plot(x_t1, h_li1, 'k') axs[0].set_xlabel('x_atc (m)') # correlate old with newer data corr = correlate(h_li1, h_li2, mode = 'valid', method = 'direct') norm_val = np.sqrt(np.sum(h_li1**2)*np.sum(h_li2**2)) # normalize so values range between 0 and 1 corr = corr / norm_val # lagvec = np.arange( -(len(h_li1) - 1), len(h_li2), 1)# for mode = 'full' # lagvec = np.arange( -int(search_width/dx) - 1, int(search_width/dx) +1, 1) # for mode = 'valid' lagvec = np.arange(- int(np.round(search_width/dx)), int(search_width/dx) +1,1)# for mode = 'valid' shift_vec = lagvec * dx ix_peak = np.arange(len(corr))[corr == np.nanmax(corr)][0] best_lag = lagvec[ix_peak] best_shift = shift_vec[ix_peak] velocities[beam] = best_shift/(dt/365) axs[1].plot(lagvec,corr) axs[1].plot(lagvec[ix_peak],corr[ix_peak], 'r*') axs[1].set_xlabel('lag (samples)') axs[2].plot(shift_vec,corr) axs[2].plot(shift_vec[ix_peak],corr[ix_peak], 'r*') axs[2].set_xlabel('shift (m)') # plot shifted data axs[3].plot(x_t2, h_li2, 'r') axs[3].plot(x_t1 - best_shift, h_li1, 'k') axs[3].set_xlabel('x_atc (m)') axs[0].text(x_t2[100], 0.6*np.nanmax(h_li2), beam) axs[1].text(lagvec[5], 0.6*np.nanmax(corr), 'best lag: ' + str(best_lag) + '; corr val: ' + str(np.round(corr[ix_peak],3))) axs[2].text(shift_vec[5], 0.6*np.nanmax(corr), 'best shift: ' + str(best_shift) + ' m'+ '; corr val: ' + str(np.round(corr[ix_peak],3))) axs[2].text(shift_vec[5], 0.3*np.nanmax(corr), 'veloc of ' + str(np.round(best_shift/(dt/365),1)) + ' m/yr') fig1.suptitle('black = older cycle data, red = newer cycle data to search across') # + n_veloc = len(cycles) - 1 segment_length = 1000 # m search_width = 500 # m dx = 20 # meters between x_atc points x1 = 2.915e7#x_atc[cycles[0]][beams[0]][1000] <-- the very first x value in a file; doesn't work, I think b/c nans # 2.93e7 x1s = x_atc[cycles[veloc_number]][beams[0]][search_width:-segment_length-2*search_width:10] velocities = {} for beam in beams: velocities[beam] = np.empty_like(x1s) for xi,x1 in enumerate(x1s): for veloc_number in range(n_veloc): cycle1 = cycles[veloc_number] cycle2 = cycles[veloc_number+1] t1_string = times[cycle1]['gt1l'][0].astype(str) #figure out later if just picking hte first one it ok t1 = Time(t1_string) t2_string = times[cycle2]['gt1l'][0].astype(str) #figure out later if just picking hte first one it ok t2 = Time(t2_string) dt = (t2 - t1).jd # difference in julian days for beam in beams: # cut out small chunk of data at time t1 (first cycle) x_full_t1 = x_atc[cycle1][beam] ix_x1 = np.arange(len(x_full_t1))[x_full_t1 >= x1][0] ix_x2 = ix_x1 + int(np.round(segment_length/dx)) x_t1 = x_full_t1[ix_x1:ix_x2] h_li1 = h_li_diff[cycle1][beam][ix_x1-1:ix_x2-1] # start 1 index earlier because the data are differentiated # cut out a wider chunk of data at time t2 (second cycle) x_full_t2 = x_atc[cycle2][beam] ix_x3 = ix_x1 - int(np.round(search_width/dx)) # offset on earlier end by # indices in search_width ix_x4 = ix_x2 + int(np.round(search_width/dx)) # offset on later end by # indices in search_width x_t2 = x_full_t2[ix_x3:ix_x4] h_li2 = h_li_diff[cycle2][beam][ix_x3:ix_x4] # correlate old with newer data corr = correlate(h_li1, h_li2, mode = 'valid', method = 'direct') norm_val = np.sqrt(np.sum(h_li1**2)*np.sum(h_li2**2)) # normalize so values range between 0 and 1 corr = corr / norm_val # lagvec = np.arange( -(len(h_li1) - 1), len(h_li2), 1)# for mode = 'full' # lagvec = np.arange( -int(search_width/dx) - 1, int(search_width/dx) +1, 1) # for mode = 'valid' lagvec = np.arange(- int(np.round(search_width/dx)), int(search_width/dx) +1,1)# for mode = 'valid' shift_vec = lagvec * dx if all(np.isnan(corr)): velocities[beam][xi] = np.nan else: ix_peak = np.arange(len(corr))[corr == np.nanmax(corr)][0] best_lag = lagvec[ix_peak] best_shift = shift_vec[ix_peak] velocities[beam][xi] = best_shift/(dt/365) # + plt.figure() ax1 = plt.subplot(211) for filename, Di in D_2l.items(): #Plot only points that have ATL06_quality_summary==0 (good points) hl=plot_elevation(Di, ind=Di['atl06_quality_summary']==0, label=f"cycle={Di['cycle']}") #hl=plt.plot(Di['x_atc'][Di['atl06_quality_summary']==0], Di['h_li'][Di['atl06_quality_summary']==0], '.', label=f"cycle={Di['cycle']}") plt.legend() plt.ylabel('elevation'); ax2 = plt.subplot(212,sharex=ax1) for beam in beams: plt.plot(x1s+dx*(segment_length/2),velocities[beam],'.',alpha=0.2,ms=3,label=beam) plt.ylabel('velocity (m/yr)') plt.xlabel('x_atc') plt.ylim(0,1500) plt.legend() # -
contributors/ben_hills/SHARE_2_test_xcorr_FIS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.3 64-bit (''ai-lab'': conda)' # name: python3 # --- # # AI-LAB LESSON 1: Uninformed Search Strategies # # In this first session we will work on uninformed search. # # ### Maze Environments # The environments used is **SmallMaze** (visible in the figure). # # <img src="images/maze.png" width="300"> # # The agent starts in cell $(0, 2)$ and has to reach the treasure in $(4, 3)$. # # In order to use the environment we need first to import the packages of OpenAI Gym. Notice that due to the structure of this repository, we need to add the parent directory to the path # + import os, sys, time, math module_path = os.path.abspath(os.path.join('../tools')) if module_path not in sys.path: sys.path.append(module_path) from utils.ai_lab_functions import * import gym, envs # - # ### Assignment 1: Breadth-First Search (BFS) # # Your first assignment is to implement the BFS algorithm on SmallMaze. In particular, you are required to implement both tree_search and graph_search versions of BFS that will be called by the generic bfs. # # The results returned by your **BFS** must be in the following form (path, time_cost, space_cost), more in detail: # # - **path** - tuple of state identifiers forming a path from the start state to the goal state. None if no solution is found. # - **time_cost** - the number of nodes checked during the exploration. # - **space_cost** - the maximum number of nodes in memory at the same time. # # After the correctness of your implementations have been assessed, you can run the algorithms on the **SmallMaze** environment. # # Functions to implement: # # - BFS_TreeSearch(problem) # - BFS_GraphSearch(problem) # # Function **build_path(node)** can be used to return a tuple of states from the root node (excluded) to another node by following parent links. # # Here the pseudo-code form the book **Artificial Intelligence: A Modern Approach** for the *Graph Search* and *Tree Search*: # # <img src="images/tree-graph-search.png" width="600"> # # Here the pseudo-code form the book **Artificial Intelligence: A Modern Approach** for the *BFS* algorithm, note that it refers to the implementation of the *Graph Search Version*: # # <img src="images/bfs.png" width="600"> # The next two functions have to be implemented def BFS_TreeSearch(problem): """ Tree Search BFS Args: problem: OpenAI Gym environment Returns: (path, time_cost, space_cost): solution as a path and stats. """ node = Node(problem.startstate, None) time_cost = 0 space_cost = 1 frontier = NodeQueue() frontier.add(node) while not frontier.is_empty(): node = frontier.remove() space_cost += 1 for action in range(problem.action_space.n): child = Node(problem.sample(node.state, action), node) if child.state == problem.goalstate: return build_path(child), time_cost, space_cost time_cost += 1 frontier.add(child) def BFS_GraphSearch(problem): """ Graph Search BFS Args: problem: OpenAI Gym environment Returns: (path, time_cost, space_cost): solution as a path and stats. """ node = Node(problem.startstate, None) time_cost = 0 space_cost = 1 frontier = NodeQueue() explored = NodeQueue() frontier.add(node) while not frontier.is_empty(): node = frontier.remove() explored.add(node) for action in range(problem.action_space.n): child = Node(problem.sample(node.state, action), node) if child.state not in explored and child.state not in frontier: if child.state == problem.goalstate: return build_path(child), time_cost, space_cost time_cost += 1 frontier.add(child) # The following code calls your tree search and graph search version of BFS and prints the results # + envname = "SmallMaze-v0" environment = gym.make(envname) solution_ts, time_ts, memory_ts = BFS_TreeSearch(environment) solution_gs, time_gs, memory_gs = BFS_GraphSearch(environment) print("\n----------------------------------------------------------------") print("\tBFS TREE SEARCH PROBLEM: ") print("----------------------------------------------------------------") print("Solution: {}".format(solution_2_string(solution_ts, environment))) print("N° of nodes explored: {}".format(time_ts)) print("Max n° of nodes in memory: {}".format(memory_ts)) print("\n----------------------------------------------------------------") print("\tBFS GRAPH SEARCH PROBLEM: ") print("----------------------------------------------------------------") print("Solution: {}".format(solution_2_string(solution_gs, environment))) print("N° of nodes explored: {}".format(time_gs)) print("Max n° of nodes in memory: {}".format(memory_gs)) # - # Correct results can be found [here](lesson_1_results.txt). # ### Assignment 2: Depth-Limited Search (DLS) and Iterative Deepening depth-first Search (IDS) # # Your second assignment is to implement the IDS algorithm on SmallMaze. # In particular, you are required to implement *DLS* in the graph search version, *DLS* in the tree search version and the final *Iterative_DLS*. # # Similarly to assignment 1, the results returned by your ids must be in the following form (path, Time Cost, Space Cost) described above. After the correctness of your implementations have been assessed, you can run the algorithms on the **SmallMaze** environment. # # Functions to implement: # # - Recursive_DLS_TreeSearch(node, problem, limit) # - Recursive_DLS_GraphSearch(node, problem, limit, explored) # - IDS(problem) # # Function **build_path(node)** can be used to return a tuple of states from the root node (excluded) to another node by following parent links. # # Here the pseudo-code form the book **Artificial Intelligence: A Modern Approach** for the *Depth-Limited Search (Tree Search Version)* and *Iterative deepening depth-first search (Tree Search Version)*: # <img src="images/dls.png" width="600"> # <img src="images/ids.png" width="600"> # # Note that **Node** has a useful variable that can be set in the constructor and can be used to track the depth of a node in the path (and consequently of the recursion stack of IDS): pathcost. If the root node has a pathcost of 0, its children will have a pathcost increased by 1. start = environment.startstate root = Node(start) # parent = None and pathcost = 0 as default child = Node(environment.sample(start, 0), root, root.pathcost + 1) # pathcost is the third argument print("Root pathcost: {}\tChild pathcost: {}".format(root.pathcost, child.pathcost)) def DLS(problem, limit, RDLS_Function): """ DLS Args: problem: OpenAI Gym environment limit: depth limit for the exploration, negative number means 'no limit' Returns: (path, time_cost, space_cost): solution as a path and stats. """ node = Node(problem.startstate, None) return RDLS_Function(node, problem, limit, set()) # The next two functions have to be implemented: def Recursive_DLS_GraphSearch(node, problem, limit, explored): """ Recursive DLS Args: node: node to explore problem: OpenAI Gym environment limit: depth limit for the exploration, negative number means 'no limit' explored: completely explored nodes Returns: (path, time_cost, space_cost): solution as a path and stats. """ cutoff_occurred = False if problem.goalstate == node.state: return build_path(node), 1, len(explored) if limit == 0: return "cut_off", 1, len(explored) explored.add(node.state) space_cost = node.pathcost time_cost = 1 for action in range(problem.action_space.n): child = Node(problem.sample(node.state,action), node, node.pathcost+1) if child.state not in explored: result, recur_time, recur_space = Recursive_DLS_GraphSearch(child, problem, limit - 1, explored) time_cost += recur_time space_cost = max(space_cost, recur_space) if result == "cut_off": cutoff_occurred = True else: if result != "failure": return result, time_cost, space_cost if cutoff_occurred: return "cut_off", time_cost, space_cost return "failure", time_cost, space_cost def Recursive_DLS_TreeSearch(node, problem, limit, explored=None): """ DLS (Tree Search Version) Args: node: node to explore problem: OpenAI Gym environment limit: depth limit for the exploration, negative number means 'no limit' Returns: (path, time_cost, space_cost): solution as a path and stats. """ space_cost = node.pathcost time_cost = 1 cutoff_occurred = False if problem.goalstate == node.state: return build_path(node), time_cost, node.pathcost if limit == 0: return "cut_off", time_cost, node.pathcost for action in range(problem.action_space.n): child = Node(problem.sample(node.state,action), node, node.pathcost+1) result, recur_time, recur_space = Recursive_DLS_TreeSearch(child, problem, limit - 1, None) time_cost += recur_time space_cost = max(space_cost, recur_space) if result == "cut_off": cutoff_occurred = True else: if result != "failure": return result, time_cost, space_cost if cutoff_occurred: return "cut_off", time_cost, space_cost return "failure", time_cost, space_cost def IDS(problem, DLS_Function): """ Iteartive_DLS DLS Args: problem: OpenAI Gym environment Returns: (path, time_cost, space_cost): solution as a path and stats. """ total_time_cost = 0 total_space_cost = 1 for i in zero_to_infinity(): # # YOUR CODE HERE ... # return 0, 0, 0, 0 # Placeholder return solution_dls, total_time_cost, total_space_cost, i # The following code calls your version of IDS and prints the results: # + envname = "SmallMaze-v0" environment = gym.make(envname) solution_ts, time_ts, memory_ts, iterations_ts = IDS(environment, Recursive_DLS_TreeSearch) solution_gs, time_gs, memory_gs, iterations_gs = IDS(environment, Recursive_DLS_GraphSearch) print("\n----------------------------------------------------------------") print("\tIDS TREE SEARCH PROBLEM: ") print("----------------------------------------------------------------") print("Necessary Iterations: {}".format(iterations_ts)) print("Solution: {}".format(solution_2_string(solution_ts, environment))) print("N° of nodes explored: {}".format(time_ts)) print("Max n° of nodes in memory: {}".format(memory_ts)) print("\n----------------------------------------------------------------") print("\tIDS GRAPH SEARCH PROBLEM: ") print("----------------------------------------------------------------") print("Necessary Iterations: {}".format(iterations_gs)) print("Solution: {}".format(solution_2_string(solution_gs, environment))) print("N° of nodes explored: {}".format(time_gs)) print("Max n° of nodes in memory: {}".format(memory_gs)) # - # Correct results can be found [here](lesson_1_results.txt). # # ### Discussion # # Now that you have correctly implemented both BFS and IDS what can you say about the solutions they compute? Are there significant differences in the stats?
lesson_1/lesson_1_problem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Welcome to the Introduction to Python Tutorials # # ### Contents # # - [Introduction](notebooks/index.ipynb) # - Data Types: [Variables, Strings & Numbers](notebooks/var_string_num.ipynb) # - [Lists and Tuples](notebooks/lists_tuples.ipynb) # - Optional: [More Lists and Tuples](notebooks/more_lists_tuples.ipynb) # - Selection & Iteration: # - [If statements](notebooks/if_statements.ipynb) # - [While and Input](notebooks/while_input.ipynb) # - Introduction to PyGame # - Programming with Functions # - [Introducing Functions](notebooks/introducing_functions.ipynb) # - [More Functions](notebooks/more_functions.ipynb) # - Creating a Sidescrolling Game with PyGame
START HERE.ipynb