code
stringlengths
2.5k
150k
kind
stringclasses
1 value
``` ###################### Query 1 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q1")\ .getOrCreate() df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") df.createOrReplaceTempView("lineitem") query = "select \ l_returnflag, \ l_linestatus, \ sum(l_quantity) as sum_qty, \ sum(l_extendedprice) as sum_base_price, \ sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, \ sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, \ avg(l_quantity) as avg_qty, \ avg(l_extendedprice) as avg_price, \ avg(l_discount) as avg_disc, \ count(*) as count_order \ from \ lineitem \ where \ l_shipdate <= date '1998-09-02' \ group by \ l_returnflag, l_linestatus \ order by \ l_returnflag, l_linestatus" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ###################### Query 2 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q2")\ .getOrCreate() p_df = spark.read.format("parquet").load("/orin_tpchnp_100/part") p_df.createOrReplaceTempView("part") s_df = spark.read.format("parquet").load("/orin_tpchnp_100/supplier") s_df.createOrReplaceTempView("supplier") ps_df = spark.read.format("parquet").load("/orin_tpchnp_100/partsupp") ps_df.createOrReplaceTempView("partsupp") n_df = spark.read.format("parquet").load("/orin_tpchnp_100/nation") n_df.createOrReplaceTempView("nation") r_df = spark.read.format("parquet").load("/orin_tpchnp_100/region") r_df.createOrReplaceTempView("region") query = "select \ s_acctbal, \ s_name, \ n_name, \ p_partkey, \ ps_supplycost, \ p_mfgr, \ s_address, \ s_phone, \ s_comment \ from \ part, \ supplier, \ partsupp, \ nation, \ region \ where \ p_partkey = ps_partkey \ and s_suppkey = ps_suppkey \ and p_size = 15 \ and p_type like '%BRASS' \ and s_nationkey = n_nationkey \ and n_regionkey = r_regionkey \ and r_name = 'EUROPE' \ and ps_supplycost = ( \ select \ min(ps_supplycost) \ from \ partsupp, \ supplier, \ nation, \ region \ where \ p_partkey = ps_partkey \ and s_suppkey = ps_suppkey \ and s_nationkey = n_nationkey \ and n_regionkey = r_regionkey \ and r_name = 'EUROPE') \ order by \ s_acctbal desc, n_name, s_name, p_partkey" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show(100) #%time sqlDF.show(100) spark.stop() ###################### Query 3 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q3")\ .getOrCreate() c_df = spark.read.format("parquet").load("/orin_tpchnp_100/customer") c_df.createOrReplaceTempView("customer") o_df = spark.read.format("parquet").load("/orin_tpchnp_100/orders") o_df.createOrReplaceTempView("orders") l_df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") l_df.createOrReplaceTempView("lineitem") query = "select \ l_orderkey, \ sum(l_extendedprice * (1 - l_discount)) as revenue, \ o_orderdate, \ o_shippriority \ from \ customer, orders, lineitem \ where \ c_mktsegment = 'BUILDING' \ and c_custkey = o_custkey \ and l_orderkey = o_orderkey \ and o_orderdate < date '1995-03-15' \ and l_shipdate > date '1995-03-15' \ group by \ l_orderkey, o_orderdate, o_shippriority \ order by \ revenue desc, o_orderdate" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show(10) %time sqlDF.show(10) spark.stop() ###################### Query 4 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q4")\ .getOrCreate() orders_df = spark.read.format("parquet").load("/orin_tpchnp_100/orders") df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") orders_df.createOrReplaceTempView("orders") df.createOrReplaceTempView("lineitem") query = "select \ o_orderpriority, \ count(*) as order_count \ from \ orders \ where \ o_orderdate >= date '1993-07-01' \ and o_orderdate < date '1993-10-01' \ and exists \ (select * \ from \ lineitem \ where \ l_orderkey = o_orderkey \ and l_commitdate < l_receiptdate) \ group by \ o_orderpriority \ order by \ o_orderpriority" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ###################### Query 5 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q5")\ .getOrCreate() c_df = spark.read.format("parquet").load("/orin_tpchnp_100/customer") c_df.createOrReplaceTempView("customer") o_df = spark.read.format("parquet").load("/orin_tpchnp_100/orders") o_df.createOrReplaceTempView("orders") l_df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") l_df.createOrReplaceTempView("lineitem") s_df = spark.read.format("parquet").load("/orin_tpchnp_100/supplier") s_df.createOrReplaceTempView("supplier") n_df = spark.read.format("parquet").load("/orin_tpchnp_100/nation") n_df.createOrReplaceTempView("nation") r_df = spark.read.format("parquet").load("/orin_tpchnp_100/region") r_df.createOrReplaceTempView("region") query = "select \ n_name, \ sum(l_extendedprice * (1 - l_discount)) as revenue \ from \ customer, \ orders, \ lineitem, \ supplier, \ nation, \ region \ where \ c_custkey = o_custkey \ and l_orderkey = o_orderkey \ and l_suppkey = s_suppkey \ and c_nationkey = s_nationkey \ and s_nationkey = n_nationkey \ and n_regionkey = r_regionkey \ and r_name = 'ASIA' \ and o_orderdate >= date '1994-01-01' \ and o_orderdate < date '1995-01-01' \ group by \ n_name \ order by \ revenue desc " sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ###################### Query 6 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q6")\ .getOrCreate() df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") df.createOrReplaceTempView("lineitem") query = "select \ sum(l_extendedprice * l_discount) as revenue \ from \ lineitem \ where \ l_shipdate >= date '1994-01-01' \ and l_shipdate < date '1995-01-01' \ and l_discount between 0.05 and 0.07 \ and l_quantity < 24" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ###################### Query 7 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q7")\ .getOrCreate() s_df = spark.read.format("parquet").load("/orin_tpchnp_100/supplier") s_df.createOrReplaceTempView("supplier") l_df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") l_df.createOrReplaceTempView("lineitem") o_df = spark.read.format("parquet").load("/orin_tpchnp_100/orders") o_df.createOrReplaceTempView("orders") c_df = spark.read.format("parquet").load("/orin_tpchnp_100/customer") c_df.createOrReplaceTempView("customer") n_df = spark.read.format("parquet").load("/orin_tpchnp_100/nation") n_df.createOrReplaceTempView("nation") query = "select \ supp_nation, \ cust_nation, \ l_year, \ sum(volume) as revenue \ from \ (select \ n1.n_name as supp_nation, \ n2.n_name as cust_nation, \ year(l_shipdate) as l_year, \ l_extendedprice * (1 - l_discount) as volume \ from \ supplier, \ lineitem, \ orders, \ customer, \ nation n1, \ nation n2 \ where \ s_suppkey = l_suppkey \ and o_orderkey = l_orderkey \ and c_custkey = o_custkey \ and s_nationkey = n1.n_nationkey \ and c_nationkey = n2.n_nationkey \ and (\ (n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') \ or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE') \ ) \ and l_shipdate between date '1995-01-01' and date '1996-12-31' \ ) as shipping \ group by \ supp_nation, \ cust_nation, \ l_year \ order by \ supp_nation, \ cust_nation, \ l_year" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ###################### Query 8 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q8")\ .getOrCreate() p_df = spark.read.format("parquet").load("/orin_tpchnp_100/part") p_df.createOrReplaceTempView("part") s_df = spark.read.format("parquet").load("/orin_tpchnp_100/supplier") s_df.createOrReplaceTempView("supplier") l_df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") l_df.createOrReplaceTempView("lineitem") o_df = spark.read.format("parquet").load("/orin_tpchnp_100/orders") o_df.createOrReplaceTempView("orders") c_df = spark.read.format("parquet").load("/orin_tpchnp_100/customer") c_df.createOrReplaceTempView("customer") n_df = spark.read.format("parquet").load("/orin_tpchnp_100/nation") n_df.createOrReplaceTempView("nation") r_df = spark.read.format("parquet").load("/orin_tpchnp_100/region") r_df.createOrReplaceTempView("region") query = "select \ o_year, \ sum(case when nation = ':1' then volume else 0 end) / sum(volume) as mkt_share \ from \ (select \ extract(year from o_orderdate) as o_year, \ l_extendedprice * (1 - l_discount) as volume, \ n2.n_name as nation \ from \ part, \ supplier, \ lineitem, \ orders, \ customer, \ nation n1, \ nation n2, \ region \ where \ p_partkey = l_partkey \ and s_suppkey = l_suppkey \ and l_orderkey = o_orderkey \ and o_custkey = c_custkey \ and c_nationkey = n1.n_nationkey \ and n1.n_regionkey = r_regionkey \ and r_name = ':2' \ and s_nationkey = n2.n_nationkey \ and o_orderdate between date '1995-01-01' and date '1996-12-31' \ and p_type = ':3' \ ) as all_nations \ group by \ o_year \ order by \ o_year" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ###################### Query 9 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q9")\ .getOrCreate() p_df = spark.read.format("parquet").load("/orin_tpchnp_100/part") p_df.createOrReplaceTempView("part") s_df = spark.read.format("parquet").load("/orin_tpchnp_100/supplier") s_df.createOrReplaceTempView("supplier") l_df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") l_df.createOrReplaceTempView("lineitem") ps_df = spark.read.format("parquet").load("/orin_tpchnp_100/partsupp") ps_df.createOrReplaceTempView("partsupp") o_df = spark.read.format("parquet").load("/orin_tpchnp_100/orders") o_df.createOrReplaceTempView("orders") n_df = spark.read.format("parquet").load("/orin_tpchnp_100/nation") n_df.createOrReplaceTempView("nation") query = "select \ nation, \ o_year, \ sum(amount) as sum_profit \ from \ (select \ n_name as nation, \ year(o_orderdate) as o_year, \ l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount \ from \ part, \ supplier, \ lineitem, \ partsupp, \ orders, \ nation \ where \ s_suppkey = l_suppkey \ and ps_suppkey = l_suppkey \ and ps_partkey = l_partkey \ and p_partkey = l_partkey \ and o_orderkey = l_orderkey \ and s_nationkey = n_nationkey \ and p_name like '%green%'\ ) as profit \ group by \ nation, \ o_year \ order by \ nation, o_year desc" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ###################### Query 10 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q10")\ .getOrCreate() c_df = spark.read.format("parquet").load("/orin_tpchnp_100/customer") c_df.createOrReplaceTempView("customer") o_df = spark.read.format("parquet").load("/orin_tpchnp_100/orders") o_df.createOrReplaceTempView("orders") l_df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") l_df.createOrReplaceTempView("lineitem") n_df = spark.read.format("parquet").load("/orin_tpchnp_100/nation") n_df.createOrReplaceTempView("nation") query = "select \ c_custkey, \ c_name, \ sum(l_extendedprice * (1 - l_discount)) as revenue, \ c_acctbal, \ n_name, \ c_address, \ c_phone, \ c_comment \ from \ customer, \ orders, \ lineitem, \ nation \ where \ c_custkey = o_custkey \ and l_orderkey = o_orderkey \ and o_orderdate >= date '1993-10-01' \ and o_orderdate < date '1994-01-01' \ and l_returnflag = 'R' \ and c_nationkey = n_nationkey \ group by \ c_custkey, \ c_name, \ c_acctbal, \ c_phone, \ n_name, \ c_address, \ c_comment \ order by \ revenue desc" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show(20) %time sqlDF.show(20) spark.stop() ###################### Query 11 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q11")\ .getOrCreate() p_df = spark.read.format("parquet").load("/orin_tpchnp_100/partsupp") p_df.createOrReplaceTempView("partsupp") s_df = spark.read.format("parquet").load("/orin_tpchnp_100/supplier") s_df.createOrReplaceTempView("supplier") n_df = spark.read.format("parquet").load("/orin_tpchnp_100/nation") n_df.createOrReplaceTempView("nation") query = "select \ ps_partkey, \ sum(ps_supplycost * ps_availqty) as value \ from \ partsupp, \ supplier, \ nation \ where \ ps_suppkey = s_suppkey \ and s_nationkey = n_nationkey \ and n_name = 'GERMANY' \ group by \ ps_partkey \ having \ sum(ps_supplycost * ps_availqty) > \ (select \ sum(ps_supplycost * ps_availqty) * 0.0001000000 \ from \ partsupp, \ supplier, \ nation \ where \ ps_suppkey = s_suppkey \ and s_nationkey = n_nationkey \ and n_name = 'GERMANY') \ order by \ value desc" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ###################### Query 12 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q12")\ .getOrCreate() o_df = spark.read.format("parquet").load("/orin_tpchnp_100/orders") o_df.createOrReplaceTempView("orders") l_df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") l_df.createOrReplaceTempView("lineitem") query = "select \ l_shipmode, \ sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, \ sum(case when o_orderpriority <> '1-URGENT' and o_orderpriority <> '2-HIGH' then 1 else 0 end) as low_line_count \ from \ orders, \ lineitem \ where \ o_orderkey = l_orderkey \ and l_shipmode in ('MAIL', 'SHIP') \ and l_commitdate < l_receiptdate \ and l_shipdate < l_commitdate \ and l_receiptdate >= date '1994-01-01' \ and l_receiptdate < date '1995-01-01' \ group by \ l_shipmode \ order by \ l_shipmode" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ###################### Query 13 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q13")\ .getOrCreate() c_df = spark.read.format("parquet").load("/orin_tpchnp_100/customer") c_df.createOrReplaceTempView("customer") o_df = spark.read.format("parquet").load("/orin_tpchnp_100/orders") o_df.createOrReplaceTempView("orders") query = "select \ c_count, \ count(*) as custdist \ from \ (select \ c_custkey, \ count(o_orderkey) as c_count \ from \ customer \ left outer join \ orders \ on \ c_custkey = o_custkey \ and o_comment not like '%special%requests%' \ group by \ c_custkey) as c_orders \ group by \ c_count \ order by \ custdist desc, c_count desc" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ###################### Query 14 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q14")\ .getOrCreate() l_df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") l_df.createOrReplaceTempView("lineitem") p_df = spark.read.format("parquet").load("/orin_tpchnp_100/part") p_df.createOrReplaceTempView("part") query = "select \ 100.00 * sum( \ case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end \ ) / sum(l_extendedprice * (1 - l_discount)) \ as promo_revenue \ from \ lineitem, \ part \ where \ l_partkey = p_partkey \ and l_shipdate >= date '1995-09-01' \ and l_shipdate < date '1995-10-01'" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ###################### Query 15 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q15")\ .getOrCreate() s_df = spark.read.format("parquet").load("/orin_tpchnp_100/supplier") s_df.createOrReplaceTempView("supplier") l_df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") l_df.createOrReplaceTempView("lineitem") query = "with revenue0 as \ (select \ l_suppkey as supplier_no, \ sum(l_extendedprice * (1 - l_discount)) as total_revenue \ from \ lineitem \ where \ l_shipdate >= date '1996-01-01' \ and l_shipdate < date '1996-04-01' \ group by \ l_suppkey) \ select \ s_suppkey, \ s_name, \ s_address, \ s_phone, \ total_revenue \ from \ supplier, \ revenue0 \ where \ s_suppkey = supplier_no \ and total_revenue = \ (select \ max(total_revenue) \ from \ revenue0) \ order by \ s_suppkey" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ###################### Query 16 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q16")\ .getOrCreate() c_df = spark.read.format("parquet").load("/orin_tpchnp_100/partsupp") c_df.createOrReplaceTempView("partsupp") l_df = spark.read.format("parquet").load("/orin_tpchnp_100/part") l_df.createOrReplaceTempView("part") o_df = spark.read.format("parquet").load("/orin_tpchnp_100/supplier") o_df.createOrReplaceTempView("supplier") query = "select \ p_brand, \ p_type, \ p_size, \ count(distinct ps_suppkey) as supplier_cnt \ from \ partsupp, \ part \ where \ p_partkey = ps_partkey \ and p_brand <> 'Brand#45' \ and p_type not like 'MEDIUM POLISHED%' \ and p_size in (49, 14, 23, 45, 19, 3, 36, 9) \ and ps_suppkey not in \ (select \ s_suppkey \ from \ supplier \ where \ s_comment like '%Customer%Complaints%') \ group by \ p_brand, \ p_type, \ p_size \ order by \ supplier_cnt desc, p_brand, p_type, p_size" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ###################### Query 17 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q17")\ .getOrCreate() l_df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") l_df.createOrReplaceTempView("lineitem") p_df = spark.read.format("parquet").load("/orin_tpchnp_100/part") p_df.createOrReplaceTempView("part") query = "select \ sum(l_extendedprice) / 7.0 as avg_yearly \ from \ lineitem, \ part \ where \ p_partkey = l_partkey \ and p_brand = 'Brand#23' \ and p_container = 'MED BOX' \ and l_quantity < ( \ select \ 0.2 * avg(l_quantity) \ from \ lineitem \ where \ l_partkey = p_partkey)" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ###################### Query 18 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q18")\ .getOrCreate() c_df = spark.read.format("parquet").load("/orin_tpchnp_100/customer") c_df.createOrReplaceTempView("customer") l_df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") l_df.createOrReplaceTempView("lineitem") o_df = spark.read.format("parquet").load("/orin_tpchnp_100/orders") o_df.createOrReplaceTempView("orders") query = "select \ c_name, \ c_custkey, \ o_orderkey, \ o_orderdate, \ o_totalprice, \ sum(l_quantity) \ from \ customer, \ orders, \ lineitem \ where \ o_orderkey in ( \ select \ l_orderkey \ from \ lineitem \ group by \ l_orderkey \ having \ sum(l_quantity) > 300 )\ and c_custkey = o_custkey \ and o_orderkey = l_orderkey \ group by \ c_name, \ c_custkey, \ o_orderkey, \ o_orderdate, \ o_totalprice \ order by \ o_totalprice desc, o_orderdate" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show(100) %time sqlDF.show(100) spark.stop() ###################### Query 19 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q19")\ .getOrCreate() l_df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") l_df.createOrReplaceTempView("lineitem") p_df = spark.read.format("parquet").load("/orin_tpchnp_100/part") p_df.createOrReplaceTempView("part") query = "select \ sum(l_extendedprice* (1 - l_discount)) as revenue \ from \ lineitem, \ part where ( \ p_partkey = l_partkey \ and p_brand = 'Brand#12' \ and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') \ and l_quantity >= 1 \ and l_quantity <= 1 + 10 \ and p_size between 1 and 5 \ and l_shipmode in ('AIR', 'AIR REG') \ and l_shipinstruct = 'DELIVER IN PERSON' \ ) or ( \ p_partkey = l_partkey \ and p_brand = 'Brand#23' \ and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') \ and l_quantity >= 10 \ and l_quantity <= 10 + 10 \ and p_size between 1 and 10 \ and l_shipmode in ('AIR', 'AIR REG') \ and l_shipinstruct = 'DELIVER IN PERSON' \ ) or ( \ p_partkey = l_partkey \ and p_brand = 'Brand#34' \ and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') \ and l_quantity >= 20 \ and l_quantity <= 20 + 10 \ and p_size between 1 and 15 \ and l_shipmode in ('AIR', 'AIR REG') \ and l_shipinstruct = 'DELIVER IN PERSON' \ )" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show(20) %time sqlDF.show(20) spark.stop() ###################### Query 20 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q20")\ .getOrCreate() s_df = spark.read.format("parquet").load("/orin_tpchnp_100/supplier") s_df.createOrReplaceTempView("supplier") n_df = spark.read.format("parquet").load("/orin_tpchnp_100/nation") n_df.createOrReplaceTempView("nation") ps_df = spark.read.format("parquet").load("/orin_tpchnp_100/partsupp") ps_df.createOrReplaceTempView("partsupp") p_df = spark.read.format("parquet").load("/orin_tpchnp_100/part") p_df.createOrReplaceTempView("part") l_df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") l_df.createOrReplaceTempView("lineitem") query = "select \ s_name, \ s_address \ from \ supplier, \ nation \ where \ s_suppkey in \ (select \ ps_suppkey \ from \ partsupp \ where \ ps_partkey in \ (select \ p_partkey \ from \ part \ where \ p_name like 'forest%') \ and ps_availqty > \ (select \ 0.5 * sum(l_quantity) \ from \ lineitem \ where \ l_partkey = ps_partkey \ and l_suppkey = ps_suppkey \ and l_shipdate >= date '1994-01-01' \ and l_shipdate < date '1995-01-01')\ ) \ and s_nationkey = n_nationkey \ and n_name = 'CANADA' \ order by \ s_name" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ###################### Query 21 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q21")\ .getOrCreate() s_df = spark.read.format("parquet").load("/orin_tpchnp_100/supplier") s_df.createOrReplaceTempView("supplier") l_df = spark.read.format("parquet").load("/orin_tpchnp_100/lineitem") l_df.createOrReplaceTempView("lineitem") o_df = spark.read.format("parquet").load("/orin_tpchnp_100/orders") o_df.createOrReplaceTempView("orders") n_df = spark.read.format("parquet").load("/orin_tpchnp_100/nation") n_df.createOrReplaceTempView("nation") p_df = spark.read.format("parquet").load("/orin_tpchnp_100/part") p_df.createOrReplaceTempView("part") query = "select \ s_name, \ count(*) as numwait \ from \ supplier, \ lineitem l1, \ orders, \ nation \ where \ s_suppkey = l1.l_suppkey \ and o_orderkey = l1.l_orderkey \ and o_orderstatus = 'F' \ and l1.l_receiptdate > l1.l_commitdate \ and exists ( \ select \ * \ from \ lineitem l2 \ where \ l2.l_orderkey = l1.l_orderkey \ and l2.l_suppkey <> l1.l_suppkey \ ) and not exists ( \ select \ * \ from \ lineitem l3 \ where \ l3.l_orderkey = l1.l_orderkey \ and l3.l_suppkey <> l1.l_suppkey \ and l3.l_receiptdate > l3.l_commitdate\ ) and s_nationkey = n_nationkey \ and n_name = 'SAUDI ARABIA' \ group by \ s_name \ order by \ numwait desc, s_name" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ###################### Query 22 ##################### from pyspark.sql import SparkSession spark = SparkSession\ .builder\ .master('yarn-client')\ .appName("TPCH_Q22")\ .getOrCreate() c_df = spark.read.format("parquet").load("/orin_tpchnp_100/customer") c_df.createOrReplaceTempView("customer") o_df = spark.read.format("parquet").load("/orin_tpchnp_100/orders") o_df.createOrReplaceTempView("orders") query = "select \ cntrycode, \ count(*) as numcust, \ sum(c_acctbal) as totacctbal \ from \ ( \ select \ substring(c_phone, 1, 2) as cntrycode, \ c_acctbal \ from \ customer \ where \ substring(c_phone, 1, 2) in \ ('13', '31', '23', '29', '30', '18', '17') \ and c_acctbal > ( \ select \ avg(c_acctbal) \ from \ customer \ where \ c_acctbal > 0.00 \ and substring(c_phone, 1, 2) in \ ('13', '31', '23', '29', '30', '18', '17') \ ) \ and not exists ( \ select \ * \ from \ orders \ where \ o_custkey = c_custkey \ ) \ ) as custsale \ group by \ cntrycode \ order by \ cntrycode" sqlDF = spark.sql(query) print("RowBased Process") %time sqlDF.show() %time sqlDF.show() spark.stop() ```
github_jupyter
``` %run ../Python_files/util.py ##### read in raw data import openpyxl data_folder = '/home/jzh/Dropbox/Research/\ Data-driven_estimation_inverse_optimization/INRIX/Raw_data/' # load filtered INRIX attribute table raw data wb_INRIX = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table.xlsx') # load filtered capacity attribute table raw data wb_capac = openpyxl.load_workbook(data_folder + 'filtered_capacity_attribute_table.xlsx') # load lookup table raw data wb_lookup = openpyxl.load_workbook(data_folder + 'roadinv_id_to_tmc_lookup.xlsx') # get sheet name from workbook sheet_INRIX_name = wb_INRIX.sheetnames[0].encode('utf-8') sheet_capac_name = wb_capac.sheetnames[0].encode('utf-8') sheet_lookup_name = wb_lookup.sheetnames[0].encode('utf-8') # get sheet of filtered INRIX attribute table raw data sheet_INRIX = wb_INRIX.get_sheet_by_name(sheet_INRIX_name) # get sheet of filtered capacity attribute table raw data sheet_capac = wb_capac.get_sheet_by_name(sheet_capac_name) # get sheet of lookup table raw data sheet_lookup = wb_lookup.get_sheet_by_name(sheet_lookup_name) ##### extract attributes of interest from INRIX sheet tmc_list = [] road_num_list = [] shape_length_list = [] for i in xrange(2, 1 + sheet_INRIX.max_row): tmc_list.append(sheet_INRIX.cell(row=i, column=2).value.encode('utf-8')) road_num_list.append(sheet_INRIX.cell(row=i, column=3).value.encode('utf-8')) shape_length_list.append(sheet_INRIX.cell(row=i, column=13).value) assert(len(tmc_list) == len(road_num_list) and \ len(road_num_list) == len(shape_length_list)) ##### extract attributes of interest from capacity sheet road_invent_list = [] length_list = [] route_num_list = [] AB_AM_capac_list = [] AB_MD_capac_list = [] AB_PM_capac_list = [] AB_NT_capac_list = [] for i in xrange(2, 1 + sheet_capac.max_row): road_invent_list.append(sheet_capac.cell(row=i, column=26).value) length_list.append(sheet_capac.cell(row=i, column=2).value) route_num_list.append(sheet_capac.cell(row=i, column=8).value) # take the period capacity factor into consideration AB_AM_capac_list.append((1.0/2.5)*sheet_capac.cell(row=i, column=18).value) AB_MD_capac_list.append((1.0/4.75)*sheet_capac.cell(row=i, column=20).value) AB_PM_capac_list.append((1.0/2.5)*sheet_capac.cell(row=i, column=22).value) AB_NT_capac_list.append((1.0/7.0)*sheet_capac.cell(row=i, column=24).value) assert(len(road_invent_list) == len(length_list) and \ len(length_list) == len(route_num_list) and \ len(route_num_list) == len(AB_AM_capac_list) and \ len(AB_AM_capac_list) == len(AB_MD_capac_list) and \ len(AB_MD_capac_list) == len(AB_PM_capac_list) and \ len(AB_PM_capac_list) == len(AB_NT_capac_list)) ##### extract attributes of interest from lookup sheet road_inv_ID_lookup_list = [] tmc_lookup_list = [] for i in xrange(2, 1 + sheet_lookup.max_row): road_inv_ID_lookup_list.append(sheet_lookup.cell(row=i, column=1).value) tmc_lookup_list.append(str(sheet_lookup.cell(row=i, column=4).value)) assert(len(road_inv_ID_lookup_list) == len(tmc_lookup_list)) # instantiation of RoadSegInr class road_seg_inr = RoadSegInr(tmc_list, road_num_list, shape_length_list) # instantiation of RoadSegCapac class road_seg_capac = RoadSegCapac(road_invent_list, length_list, route_num_list, \ AB_AM_capac_list, AB_MD_capac_list, \ AB_PM_capac_list, AB_NT_capac_list) # instantiation of LookUp class look_up = LookUp(road_inv_ID_lookup_list, tmc_lookup_list) # make a dictionary from look_up tmc_roadInv_dict = {i:j for (i, j) in zip(look_up.tmc, look_up.road_inv_ID)} # make dictionaries from road_seg_capac roadInv_capac_dict_AM = {i:j for (i, j) in zip(road_seg_capac.road_invent, \ road_seg_capac.AB_AM_capac)} roadInv_capac_dict_MD = {i:j for (i, j) in zip(road_seg_capac.road_invent, \ road_seg_capac.AB_MD_capac)} roadInv_capac_dict_PM = {i:j for (i, j) in zip(road_seg_capac.road_invent, \ road_seg_capac.AB_PM_capac)} roadInv_capac_dict_NT = {i:j for (i, j) in zip(road_seg_capac.road_invent, \ road_seg_capac.AB_NT_capac)} capac_AM = [] capac_MD = [] capac_PM = [] capac_NT = [] for i in range(len(road_seg_inr.tmc)): capac_AM.append(roadInv_capac_dict_AM[tmc_roadInv_dict[road_seg_inr.tmc[i]]]) capac_MD.append(roadInv_capac_dict_MD[tmc_roadInv_dict[road_seg_inr.tmc[i]]]) capac_PM.append(roadInv_capac_dict_PM[tmc_roadInv_dict[road_seg_inr.tmc[i]]]) capac_NT.append(roadInv_capac_dict_NT[tmc_roadInv_dict[road_seg_inr.tmc[i]]]) # instantiation of RoadSegInrCapac class road_seg_inr_capac = RoadSegInrCapac(tmc_list, road_num_list, shape_length_list, \ capac_AM, capac_MD, capac_PM, capac_NT) zdump(road_seg_inr_capac, '../temp_files/road_seg_inr_capac.pkz') ```
github_jupyter
# pipda A framework for data piping in python Inspired by [siuba][1], [dfply][2], [plydata][3] and [dplython][4], but with simple yet powerful APIs to mimic the `dplyr` and `tidyr` packages in python ## Installation ```shell pip install -U pipda ``` ## Usage Checkout [plyrda][6] for more detailed usages. ### Verbs Verbs are functions next to the piping sign (i.e. `>>`) receiving the data directly. ``` try: import pandas except ImportError: !pip install pandas import pandas as pd from pipda import ( register_verb, register_func, register_operator, evaluate_expr, Operator, Symbolic, Context ) f = Symbolic() df = pd.DataFrame({ 'x': [0, 1, 2, 3], 'y': ['zero', 'one', 'two', 'three'] }) df @register_verb(pd.DataFrame) def head(data, n=5): return data.head(n) df >> head(2) @register_verb(pd.DataFrame, context=Context.EVAL) def mutate(data, **kwargs): data = data.copy() for key, val in kwargs.items(): data[key] = val return data df >> mutate(z=1) df >> mutate(z=f.x) # Verbs that don't compile f.a to data, but just the column name @register_verb(pd.DataFrame, context=Context.SELECT) def select(data, *columns): return data.loc[:, columns] # f.x won't be compiled as df.x but just 'x' df >> mutate(z=2*f.x) >> select(f.x, f.z) # Compile the args inside the verb @register_verb(pd.DataFrame, context=Context.PENDING) def mutate_existing(data, column, value): column = evaluate_expr(column, data, Context.SELECT) value = evaluate_expr(value, data, Context.EVAL) data = data.copy() data[column] = value return data # First f.x compiled as column name, and second as Series data df2 = df >> mutate_existing(f.x, 10 * f.x) df2 # Evaluate the arguments by yourself @register_verb(pd.DataFrame, context=Context.PENDING) def mutate_existing2(data, column, value): column = evaluate_expr(column, data, Context.SELECT) value = evaluate_expr(value, df2, Context.EVAL) data = data.copy() data[column] = value return data df >> mutate_existing2(f.x, 2 * f.x) # register for multiple types @register_verb(int, context=Context.EVAL) def add(data, other): return data + other # add is actually a singledispatch generic function @add.register(float, context=Context.EVAL) def _(data, other): return data * other 1 >> add(1) 1.1 >> add(1.0) # As it's a singledispatch generic function, we can do it for multiple types # with the same logic @register_verb(context=Context.EVAL) def mul(data, other): raise NotImplementedError # not invalid until types registered @mul.register(int, context=Context.EVAL) @mul.register(float, context=Context.EVAL) # or you could do @mul.register((int, float), context=Context.EVAL) # context is also supported def _(data, other): return data * other 3 >> mul(2) 3.2 >> mul(2) ``` ### Functions used in verb arguments ``` @register_func(context=Context.EVAL) def if_else(data, cond, true, false): cond.loc[cond.isin([True]), ] = true cond.loc[cond.isin([False]), ] = false return cond # The function is then also a singledispatch generic function df >> mutate(z=if_else(f.x>1, 20, 10)) # function without data argument @register_func(None) def length(strings): return [len(s) for s in strings] df >> mutate(z=length(f.y)) # register existing functions from numpy import vectorize len = register_func(None, context=Context.EVAL, func=vectorize(len)) # original function still works print(len('abc')) df >> mutate(z=len(f.y)) ``` ### Operators You may also redefine the behavior of the operators ``` @register_operator class MyOperators(Operator): def xor(self, a, b): """Inteprete X ^ Y as pow(X, Y).""" return a ** b df >> mutate(z=f.x ^ 2) ``` ### Context The context defines how a reference (`f.A`, `f['A']`, `f.A.B` is evaluated) ``` from pipda import ContextBase class MyContext(ContextBase): name = 'my' def getattr(self, parent, ref): # double it to distinguish getattr return getattr(parent, ref) def getitem(self, parent, ref): return parent[ref] * 2 @property def ref(self): # how we evaluate the ref in f[ref] return self @register_verb(context=MyContext()) def mutate_mycontext(data, **kwargs): for key, val in kwargs.items(): data[key] = val return data df >> mutate_mycontext(z=f.x + f['x']) # when ref in f[ref] is also needed to be evaluated df = df >> mutate(zero=0, one=1, two=2, three=3) df df >> mutate_mycontext(m=f[f.y][:1].values[0]) # f.y returns ['zero', 'one', 'two', 'three'] # f[f.y] gets [[0, 2, 4, 6], [0, 2, 4, 6], [0, 2, 4, 6], [0, 2, 4, 6]] # f[f.y][:1].values gets [[0, 4, 8, 16]] # f[f.y][:1].values[0] returns [0, 8, 16, 32] # Notes that each subscription ([]) will double the values ``` ### Caveats - You have to use `and_` and `or_` for bitwise and/or (`&`/`|`) operators, as `and` and `or` are python keywords. - Limitations: Any limitations apply to `executing` to detect the AST node will apply to `pipda`. It may not work in some circumstances where other AST magics apply. - Calling registered verbs/functions regularly: The piping syntax (`>>`) is recommended with `pipda`. Because everything is determined with this syntax. However, `pipda` tries to support regular calling. The ambiguity can come from the situations where the arguments passed in can shift one position right (such that they fit the piping calling), and first value passed in can also be dispatched and fit in the second argument. For example: ```python @register_verb(int) def add(a: int, b: int = 1): return a + b ``` If you call it like this `add(2)`, then we have no idea if this is calling `add(2, b=1)`, or `add(b=2)` and it's waiting for the data (`a`) to be piped in. In such a case, the function is called in the former way, but a warning will be showing. To avoid this, as it states in the warning message, according to the reasons of the ambiguity, we should make sure that the values passed in cannot be shifted one position right (given values for all arguments would do it): ```python add(2, 1) # or add(2, b=1) ``` or try not to use optional arguments while defining the function; or make sure the first value cannot be dispatched: ```python @register_verb(int) def add(a: int, b: float = 1.0): return a + b add(2.0) ``` In such a case, it is for sure that it is called like `add(b=2.0)` and wait for `a` to be piped in. You can even have a different type annotation for the second argument, even the same value can be accepted: ```python @register_verb(int) def add(a: int, b: Optional[int] = 1): return a + b add(2) ``` This will force it to call `add(2, b=1)`, but this definitely has some side effects: ```python verb(data, add(2)) ``` Here `add(2)` is intended to be called like `add(b=2)`, but unexpectedly, it will call like `add(2, b=1)`. Using the piping syntax will perfectly solve this: ```python data >> verb(add(2)) ``` since we know the function called in a verb is supposed to wait for the data to be piped in. See also: [Piping vs regular calling][7] - Use another piping sign ```python from pipda import register_piping register_piping('^') # register verbs and functions df ^ verb1(...) ^ verb2(...) ``` Allowed signs are: `+`, `-`, `*`, `@`, `/`, `//`, `%`, `**`, `<<`, `>>`, `&`, `^` and `|`. ## How it works ### The verbs ```R data %>% verb(arg1, ..., key1=kwarg1, ...) ``` The above is a typical `dplyr`/`tidyr` data piping syntax. The counterpart R syntax we expect is: ```python data >> verb(arg1, ..., key1=kwarg1, ...) ``` To implement that, we need to defer the execution of the `verb` by turning it into a `Verb` object, which holds all information of the function to be executed later. The `Verb` object won't be executed until the `data` is piped in. It all thanks to the [`executing`][5] package to let us determine the ast nodes where the function is called. So that we are able to determine whether the function is called in a piping mode. If an argument is referring to a column of the data and the column will be involved in the later computation, the it also needs to be deferred. For example, with `dplyr` in `R`: ```R data %>% mutate(z=a) ``` is trying add a column named `z` with the data from column `a`. In python, we want to do the same with: ```python data >> mutate(z=f.a) ``` where `f.a` is a `Reference` object that carries the column information without fetching the data while python sees it immmediately. Here the trick is `f`. Like other packages, we introduced the `Symbolic` object, which will connect the parts in the argument and make the whole argument an `Expression` object. This object is holding the execution information, which we could use later when the piping is detected. ### The functions Then what if we want to use some functions in the arguments of the `verb`? For example: ```python data >> select(starts_with('a')) ``` to select the columns with names start with `'a'`. No doubt that we need to defer the execution of the function, too. The trick is that we let the function return a `function` object as well, and evaluate it as the argument of the verb. ### The operators `pipda` also opens oppotunities to change the behavior of the operators in verb/function arguments. This allows us to mimic something like this: ```python data >> select(-f.a) # select all columns but `a` ``` To do that, we turn it into an `Operator` object. Just like a `Verb` or a `Function` object, the execution is deferred. By default, the operators we used are from the python standard library `operator`. `operator.neg` in the above example. You can also define you own by subclassing the `Operator` class, and then register it to replace the default one by decorating it with `register_operator`. [1]: https://github.com/machow/siuba [2]: https://github.com/kieferk/dfply [3]: https://github.com/has2k1/plydata [4]: https://github.com/dodger487/dplython [5]: https://github.com/alexmojaki/executing [6]: https://github.com/pwwang/plyrda [7]: https://pwwang.github.io/datar/piping_vs_regular/
github_jupyter
# Visualize the best RFE conformations using cMDS plots ``` import pandas as pd import numpy as np import sys sys.path.append('../..') from helper_modules.run_or_load import * from helper_modules.MDS import * ``` ### Load protein related data ``` prot_name = 'fxa' DIR = '../1_Download_and_prepare_protein_ensembles' path_to_file = f'{DIR}/TABLA_MTDATA_FXA_136_crys_LIGS_INFO.json' df_prot = pd.read_json(path_to_file) df_prot.head(3) ``` ### Load the dimensionality reduction results ``` df_dims = pd.read_pickle('../3_Protein_Ensembles_Analysis/df_PROTEINS_DIMS_reduced_TABLE.obj') # Update the df with the mds axis # Pocket shape df_prot['vol_x'] = df_dims['mds_vol_pkt_x'] df_prot['vol_y'] = df_dims['mds_vol_pkt_y'] # secondary structure residues RMSD df_prot['secres_x'] = df_dims['mds_sec_x'] df_prot['secres_y'] = df_dims['mds_sec_y'] # pocket residues RMSD df_prot['pkt_x'] = df_dims['mds_pkt_x'] df_prot['pkt_y'] = df_dims['mds_pkt_y'] df_prot.head(3) ``` ### Load POVME3 results and single-conformation docking performances (AUC-ROC) ``` # Extra features to get volume or surface area df_extra = pd.read_pickle(f'../4_Ensemble_docking_results/TABLE_Confs_Features_and_performances_fxa.pkl') # Adding to the main df df_prot['volume'] = df_extra['Pk. Volume'] df_prot['surf_area'] = df_extra['Pk. SASA'] # ROC-AUC single performance df_prot['AUC-ROC'] = df_extra['AUC-ROC'] df_prot.head(3) ``` ### Load *Recursive Feature Elimination* results ``` # Open RFE_estimator # Open RFE_estimator dataset = 'MERGED' model_name = 'XGB_tree' split = 'random' filename = f'./cachedir/rfe_selectors/RFE_xgb_{prot_name}.joblib' # Load the RFE selector (computed in the previos notebook) rfe_selector = joblib.load(filename) # Create a dataframe with the protein rankings df_ranks = pd.DataFrame({ 'pdb_id' : df_prot.index, 'rfe_ranking': rfe_selector.ranking_ }) df_ranks = df_ranks.sort_values('rfe_ranking').set_index('pdb_id') # Update the df with the rank values df_prot = df_prot.merge(df_ranks, left_index=True, right_index=True)\ .sort_values('rfe_ranking') df_prot.head(3) ``` ## cMDS plots We will use `ggplot2` for plotting ``` %load_ext rpy2.ipython ``` Just a few modifications for visualization purposes. ``` # To be able to plot confs with no inhibitors => NA == 10 df_prot['Inhib_mass_num'] = pd.to_numeric(df_prot['Inhib_mass']).\ fillna(10) ** 2 df_prot['volume.T'] = (df_prot['volume']/100) ** 1.5 df_selected = df_prot.sort_values('rfe_ranking').head(16) x = 'vol_x' y = 'vol_y' size='volume.T' ``` #### Create the dataframe for plotting ``` # This is the final table for plotting df_volpk = df_prot[['rfe_ranking', 'vol_x', 'vol_y', 'volume']] df_volpk = df_volpk.rename({'vol_x': 'x', 'vol_y': 'y'}, axis = 1) df_volpk %%R -i df_volpk -i prot_name -w 4. -h 4. --units in -r 200 source('../../R_scripts/plot_cMDS.R') prot_name <- prot_name p <- plot_cMDS(df_volpk) # Save the picture space <- 'povme' methodology <- 'MDS_plots/' save_path = '~/Documents/Doctorado/Paper_doctorado/Response_to_reviewers/Figuras_mayor_review/raw_imgs/' filename <- paste0(save_path, methodology, paste(prot_name, space, 'MDS.pdf', sep='_')) ggsave(filename, plot=p, width=4., height= 4.) print(p) ``` ## Swarplot with the AUC-ROC values per conformation - The following plot show the distribution of the protein conformations regarding its AUC-ROC value computed from their individual docking results. ``` import matplotlib import seaborn as sns import matplotlib.ticker as ticker from matplotlib.colors import LinearSegmentedColormap top_confs = 8 # Define the colormap cmap = LinearSegmentedColormap.from_list( name ='test', colors = ["red", "orange", "#374E55"], N = top_confs ) matplotlib.cm.register_cmap("mycolormap", cmap) sns.set(font_scale = 1.1, style = 'whitegrid') # Filter the df_ = df_prot.copy() # Get the top 16 df_['top_mask'] = [2 if i <= top_confs else 1 for i in df_['rfe_ranking']] df_ = df_[['AUC-ROC', 'top_mask', 'rfe_ranking']]\ .melt(id_vars=('top_mask', 'rfe_ranking')) fig, ax = plt.subplots(figsize=(2.2, 4.45)) # Blue dots (all conformations) np.random.seed(2) sns.swarmplot(y = 'value', x = 'variable', data = df_, size = 4.6, ax = ax, color = '#87DADE') # Plot the top RFE 16 conformations df_top = df_.query('top_mask == 2') np.random.seed(2) sns.swarmplot(y = 'value', x = 'variable', data = df_top, size = 5, ax = ax, hue ='rfe_ranking', edgecolor = 'black', linewidth = 0.5, palette = 'mycolormap') # Axis and labels ax.set_yticks(np.arange(0.5, 0.70, .05)) ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.2f')) ax.yaxis.tick_left() ax.get_legend().remove() ax.tick_params(length = 2, color = 'black', axis = 'y') ax.grid(True, linewidth = 0.7) ax.tick_params(axis="y",direction="in", pad=-27) ax.set(xlabel = 'Protein conformations', ylabel = '') for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(0.55) ax.spines[axis].set_edgecolor('black') plt.savefig(f'{prot_name}_swarm_auc.pdf') # Save the picture plt.show() top_confs = 8 # Define the colormap cmap = LinearSegmentedColormap.from_list( name ='test', colors = ["red", "orange", "#374E55"], N = top_confs ) matplotlib.cm.register_cmap("mycolormap", cmap) sns.set(font_scale = 0.7, style = 'whitegrid') # Filter the df_ = df_prot.copy() # Get the top 16 df_['top_mask'] = [2 if i <= top_confs else 1 for i in df_['rfe_ranking']] df_ = df_[['AUC-ROC', 'top_mask', 'rfe_ranking']]\ .melt(id_vars=('top_mask', 'rfe_ranking')) # Get the AUC-ROC of the 32 lowest conformation auc_worst_32 = df_['value'].nsmallest(32).max() df_['worst_32'] = df_['value'] <= auc_worst_32 fig, ax = plt.subplots(figsize=(1.7, 3.52)) # Blue dots (all conformations) np.random.seed(2) sns.swarmplot(y = 'value', x = 'variable', data = df_, size = 3.6, ax = ax, alpha = 0.7, hue = 'worst_32', palette = ['#F0B3B2', '#5CA586']) # Axis and labels ax.set_yticks(list(np.arange(0.3, 1.1, .1)) + [auc_worst_32]) ax.get_yticklabels()[-1].set_color("#B24745") ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.2f')) ax.yaxis.tick_left() ax.get_legend().remove() plt.axhline(y=0.5, color='darkgrey', linewidth = 1.2, linestyle = '--') plt.axhline(y=auc_worst_32, color='#79AF97', linestyle=':', linewidth = 1.2) ax.fill_between([-1,1], [0], [auc_worst_32], color='#79AF97', alpha = 0.3 ) ax.tick_params(length = 3, color = 'black', axis = 'y') ax.grid(True, linewidth = 0.7) # ax.tick_params(axis="y",direction="in", pad=-27) ax.set_xlabel('SCPs from the entire dataset', fontsize = 8) ax.set_ylabel('') for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(0.55) ax.spines[axis].set_edgecolor('black') plt.ylim(0.265, 1.033) plt.savefig(f'{prot_name}_swarm_auc.pdf') # Save the picture plt.show() ``` ## MDS using Secondary structure - Pisani (2016) residues. - The following projection was computed from the pairwise RMSD matrix of the C$\alpha$ of the residues defined by Pisani (2016). ``` df_secRMSD = df_prot[['rfe_ranking', 'secres_x', 'secres_y', 'volume']] df_secRMSD = df_secRMSD.rename({'secres_x': 'x', 'secres_y': 'y'}, axis = 1) %%R -i df_secRMSD -w 3.5 -h 3.5 --units in -r 200 p <- plot_cMDS(df_secRMSD) # Save the picture space <- 'secRMSD' methodology <- 'MDS_plots/' save_path = '~/Documents/Doctorado/Paper_doctorado/Response_to_reviewers/Figuras_mayor_review/raw_imgs/' filename <- paste0(save_path, methodology, paste(prot_name, space, 'MDS.pdf', sep='_')) ggsave(filename, plot=p, width=4.0, height= 4.0) print(p) ``` ## MDS using pocket residues ``` df_pkRMSD = df_prot[['rfe_ranking', 'pkt_x', 'pkt_y', 'volume']] df_pkRMSD = df_pkRMSD.rename({'pkt_x': 'x', 'pkt_y': 'y'}, axis = 1) %%R -i df_pkRMSD -w 4.1 -h 4.1 --units in -r 200 p <- plot_cMDS(df_pkRMSD) # Save the picture space <- 'pkRMSD' methodology <- 'MDS_plots/' save_path = '~/Documents/Doctorado/Paper_doctorado/Response_to_reviewers/Figuras_mayor_review/raw_imgs/' filename <- paste0(save_path, methodology, paste(prot_name, space, 'MDS.pdf', sep='_')) ggsave(filename, plot=p, width=4.0, height= 4.0) print(p) ```
github_jupyter
# Explaining random forest model predictions with Shapley values Shapley values provide an estimate of how much any particular feature influences the model decision. When Shapley values are averaged they provide a measure of the overall influence of a feature. Shapley values may be used across model types, and so provide a model-agnostic measure of a feature’s influence. This means that the influence of features may be compared across model types, and it allows black box models like neural networks to be explained, at least in part. For more on Shapley values in general see Chris Molner’s excellent book chapter: https://christophm.github.io/interpretable-ml-book/shapley.html More information on the shap library, inclusiong lots of useful examples may be found at: https://shap.readthedocs.io/en/latest/index.html Here we provide an example of using shap with random forests. ## Load packages ``` # Turn warnings off to keep notebook tidy import warnings warnings.filterwarnings("ignore") import matplotlib.pyplot as plt import numpy as np import pandas as pd import pickle import shap from sklearn.ensemble import RandomForestClassifier ``` ## Load data We will use the first train/test split. ``` data_loc = '../../data/sam_1/kfold_5fold/' train = pd.read_csv(data_loc + 'train_0.csv') test = pd.read_csv(data_loc + 'test_0.csv') test = test.sample(2500) ``` ## Fit random forest model Fit model and get feature importances. ``` # Get X and y X_train = train.drop('S2Thrombolysis', axis=1) X_test = test.drop('S2Thrombolysis', axis=1) y_train = train['S2Thrombolysis'] y_test = test['S2Thrombolysis'] # One hot encode hospitals X_train_hosp = pd.get_dummies(X_train['StrokeTeam'], prefix = 'team') X_train = pd.concat([X_train, X_train_hosp], axis=1) X_train.drop('StrokeTeam', axis=1, inplace=True) X_test_hosp = pd.get_dummies(X_test['StrokeTeam'], prefix = 'team') X_test = pd.concat([X_test, X_test_hosp], axis=1) X_test.drop('StrokeTeam', axis=1, inplace=True) # Define and Fit model model = RandomForestClassifier( n_estimators=100, n_jobs=-1, class_weight='balanced', random_state=42) model.fit(X_train, y_train) # Get feature weights features = list(X_train) feature_importances = model.feature_importances_ importances = pd.DataFrame(index=features) importances['importance'] = feature_importances importances['rank'] = importances['importance'].rank(ascending=False).values # Get predicted class and ptrobability y_pred = model.predict(X_test) y_prob = model.predict_proba(X_test)[:, 1] # Measure accuracy accuracy = np.mean(y_pred == y_test) print(f'Accuracy: {accuracy:0.3f}') ``` ## Shap values ### Get Shap values We will get the Shap values for the test set. Can chose to calculate shap and save, or load shap explainer. ``` calculate_shap_values = True if calculate_shap_values: # Set up explainer using typical feature values from training set # Note: Use a sample of 100-1000 for this if explainer too slow explainer = shap.Explainer(model, X_train.sample(10)) # Get Shapley values along with base and features shap_values_extended = explainer(X_test) # Shap values exist for each classification in a Tree; 1=give thrombolysis shap_values = shap_values_extended.values[:,:,1] # Save using pickle filename = './output/shap_values_extended_rf1.p' with open(filename, 'wb') as filehandler: pickle.dump(shap_values_extended, filehandler) else: # Load preloaded explainer filename = './output/shap_values_extended_rf1.p' with open(filename, 'rb') as filehandler: shap_values_extended = pickle.load(filehandler) shap_values = shap_values_extended.values[:,:,1] # Get mean Shap values for each feature shap_values_mean = pd.DataFrame(index=features) shap_values_mean['mean_shap'] = np.mean(shap_values, axis=0) shap_values_mean['abs_mean_shap'] = np.abs(shap_values_mean) shap_values_mean['mean_abs_shap'] = np.mean(np.abs(shap_values), axis=0) shap_values_mean['rank'] = shap_values_mean['mean_abs_shap'].rank( ascending=False).values ``` ### Compare top 10 weights and Shap values ``` top_10_importances = \ importances.sort_values('importance', ascending=False).head(10) top_10_importances ``` When looking for the most influential Shap values we use the mean of the absolute Shap values for each feature. ``` top_10_shap = shap_values_mean.sort_values( 'mean_abs_shap', ascending=False).head(10) top_10_shap ``` Get intersection between top 10 weights and top 10 Shap values. ``` intersection = list(top_10_importances.index.intersection(top_10_shap.index)) print(f'Number of intersection values = {len(intersection)}') print('\nIntersecting values:') intersection ``` ### Plot average Shap values against average weights ``` fig = plt.figure(figsize=(6,6)) ax = fig.add_subplot(111) ax.scatter(importances['importance'], shap_values_mean['mean_abs_shap']) ax.grid() ax.set_xlabel('Feature importance') ax.set_ylabel('Feature shap') plt.show() ``` ### Plot most influential features This is a plot of the mean absolute Shap values. ``` shap.summary_plot(shap_values, X_test, plot_type='bar') ``` ### Beeswarm plot A Beeswarm plot shows all points. The feature value for each point is shown by the colour, and its position indicates the Shap value for that instance. ``` fig = plt.figure(figsize=(6,6)) shap.summary_plot(shap_values=shap_values, features=X_test, feature_names=features, cmap=plt.get_cmap('nipy_spectral'),show=False) plt.show() ``` ### Plot Waterfall and decision plot plots for instances with low or high probability of receiving thrombolysis Waterfall plot and decision plots are alternative ways of plotting the influence of features for individual cases. ``` # Get the location of an example each where probability of giving thrombolysis # is <0.1 or >0.9 location_low_probability = np.where(y_prob < 0.1)[0][0] location_high_probability = np.where(y_prob > 0.9)[0][0] ``` An example with low probability of receiving thrombolysis. ``` shap.plots.waterfall(shap_values_extended[location_low_probability][:,1], max_display=15) instance = location_low_probability chosen_instance = X_test.iloc[instance] shap_values_instance = shap_values[instance] expected_value = shap_values_extended.base_values[0][1]#explainer.expected_value print(f"The base value is {expected_value:0.2f}") shap.decision_plot(expected_value, shap_values_instance, chosen_instance) ``` An example with high probability of receiving thrombolysis. ``` shap.plots.waterfall(shap_values_extended[location_high_probability][:,1], max_display=15) instance = location_high_probability chosen_instance = X_test.iloc[instance] shap_values_instance = shap_values[instance] expected_value = shap_values_extended.base_values[0][1]#explainer.expected_value print(f"The base value is {expected_value:0.2f}") shap.decision_plot(expected_value, shap_values_instance, chosen_instance) ``` ### Show the relationship between feature value and Shap value for top 5 influential features. ``` feat_to_show = top_10_shap.index[0:5] for feat in feat_to_show: shap.plots.scatter(shap_values_extended[:, feat][:,1], x_jitter=0) ``` Examine `S2BrainImagingTime_min` in range 0-400 minutes. ``` fig = plt.figure(figsize=(6,6)) ax = fig.add_subplot(111) shap.plots.scatter(shap_values_extended[:, 'S2BrainImagingTime_min'][:,1], x_jitter=0, ax=ax, show=False) ax.set_xlim(0,400) ```
github_jupyter
# FloPy ## MNW2 package example ``` from __future__ import print_function import sys import os import numpy as np try: import pandas as pd except: pass # run installed version of flopy or add local path try: import flopy except: fpth = os.path.abspath(os.path.join('..', '..')) sys.path.append(fpth) import flopy print(sys.version) print('numpy version: {}'.format(np.__version__)) try: print('pandas version: {}'.format(pd.__version__)) except: pass print('flopy version: {}'.format(flopy.__version__)) ``` ### Make an MNW2 package from scratch ``` m = flopy.modflow.Modflow('mnw2example', model_ws='data') dis = flopy.modflow.ModflowDis(nrow=5, ncol=5, nlay=3, nper=3, top=10, botm=0, model=m) ``` ### MNW2 information by node (this could be prepared externally from well reconds and read in from a csv or excel file) * this table has two multi-node wells, the first (well1) consisting of two nodes that are manually specified (where the variable **rw** is specified by node) * node that some variables that are constant for the whole well are also included (losstype, zpump, etc.) ``` node_data = pd.DataFrame([[1, 1, 9.5, 7.1, 'well1', 'skin', -1, 0, 0, 0, 1., 2., 5., 6.2], [1, 1, 7.1, 5.1, 'well1', 'skin', -1, 0, 0, 0, 0.5, 2., 5., 6.2], [3, 3, 9.1, 3.7, 'well2', 'skin', -1, 0, 0, 0, 1., 2., 5., 4.1]], columns=['i', 'j', 'ztop', 'zbotm', 'wellid', 'losstype', 'pumploc', 'qlimit', 'ppflag', 'pumpcap', 'rw', 'rskin', 'kskin', 'zpump']) node_data ``` #### convert the DataFrame to a rec array for compatibility with flopy ``` node_data = node_data.to_records() node_data ``` ### Stress period information (could also be developed externally) ``` stress_period_data = pd.DataFrame([[0, 'well1', 0], [1, 'well1', 100.0], [0, 'well2', 0], [1, 'well2', 1000.]], columns=['per', 'wellid', 'qdes']) stress_period_data pers = stress_period_data.groupby('per') stress_period_data = {i: pers.get_group(i).to_records() for i in [0, 1]} stress_period_data ``` ### Make ``ModflowMnw2`` package object * note that extraneous columns in node_data and stress_period_data are ignored * if itmp is positive, it must equal the number of active wells being specified in ``stress_period_data``, otherwise the package class will raise an error. ``` mnw2 = flopy.modflow.ModflowMnw2(model=m, mnwmax=2, node_data=node_data, stress_period_data=stress_period_data, itmp=[2, 2, -1], # reuse second per pumping for last stress period ) # "nodtot" is computed automatically mnw2.nodtot pd.DataFrame(mnw2.node_data) pd.DataFrame(mnw2.stress_period_data[0]) pd.DataFrame(mnw2.stress_period_data[1]) tmp = flopy.modflow.ModflowMnw2(model=m, itmp=[1, 1, -1], # reuse second per pumping for last stress period ) ``` ### empty ``node_data`` and ``stress_period_data`` tables can also be generated by the package class, and then filled ``` node_data = tmp.get_empty_node_data(3) node_data ``` ### Mnw objects at the base of the flopy mnw2 module is the **Mnw** object class, which describes a single multi-node well. A list or dict of **Mnw** objects can be used to build a package (using the example above): ``` flopy.modflow.ModflowMnw2(model=m, mnwmax=2, mnw=<dict or list of Mnw objects>, itmp=[1, 1, -1], # reuse second per pumping for last stress period ) ``` or if node_data and stress_period_data are supplied, the **Mnw** objects are created on initialization of the ModflowMnw2 class instance, and assigned to the ```.mnw``` attribute, as items in a dictionary keyed by ```wellid```. ``` mnw2.mnw mnw2.mnw['well1'].__dict__ ``` Note that Mnw object attributes for variables that vary by node are lists (e.g. ``rw`` above) #### Each Mnw object has its own ``node_data`` and ``stress_period_data`` ``` pd.DataFrame(mnw2.mnw['well1'].node_data) ``` #### Instead of a dict keyed by stress period, Mnw.stress_period_data is a recarray with pumping data listed by stress period for that well * note that data for period 2, where ``itmp`` < 1, is shown (was copied from s.p. 1 during construction of the **Mnw** object) ``` pd.DataFrame(mnw2.mnw['well2'].stress_period_data) ``` ### Build the same package using only the ``Mnw`` objects ``` mnw2fromobj = flopy.modflow.ModflowMnw2(model=m, mnwmax=2, mnw=mnw2.mnw, itmp=[2, 2, -1], # reuse second per pumping for last stress period ) pd.DataFrame(mnw2fromobj.node_data) pd.DataFrame(mnw2fromobj.stress_period_data[0]) pd.DataFrame(mnw2fromobj.stress_period_data[1]) ``` ### By default, the ``node_data`` and ``stress_period_data`` tables attached to the ``ModflowMnw2`` package class are definitive * on writing of the package output (``mnw2.write_file()``), the **Mnw** objects are regenerated from the tables. This setting is controlled by the default argument ``use_tables=True``. To write the package file using the **Mnw** objects (ignoring the tables), use ``mnw2.write_file(use_tables=False)``. ``` per1 = flopy.modflow.ModflowMnw2.get_empty_stress_period_data(itmp=2) per1 ``` ### Write an MNW2 package file and inspect the results ``` mnw2.write_file(os.path.join('data/test.mnw2')) junk = [print(l.strip('\n')) for l in open('data/test.mnw2').readlines()] ``` ### Load some example MNW2 packages ``` path = os.path.join('..', '..', 'examples', 'data', 'mnw2_examples') cpth = os.path.join('..', '..', 'autotest', 'data') m = flopy.modflow.Modflow('MNW2-Fig28', model_ws=cpth) dis = flopy.modflow.ModflowDis.load(os.path.join(path, 'MNW2-Fig28.dis'), m) m.get_package_list() mnw2pth = os.path.join(path, 'MNW2-Fig28.mnw2') mnw2 = flopy.modflow.ModflowMnw2.load(mnw2pth, m) pd.DataFrame(mnw2.node_data) pd.DataFrame(mnw2.stress_period_data[0]) mnw2.mnw pd.DataFrame(mnw2.mnw['Well-A'].stress_period_data) path = os.path.join('..', '..', 'examples', 'data', 'mnw2_examples') cpth = os.path.join('data') m = flopy.modflow.Modflow('br', model_ws=cpth) mnw2 = flopy.modflow.ModflowMnw2.load(path + '/BadRiver_cal.mnw2', m) df = pd.DataFrame(mnw2.node_data) df.loc[:, df.sum(axis=0) != 0] ```
github_jupyter
# "You get a decision tree! And YOU get a decision tree!" > "Oprah was so close to discovering random forests." - comments: true - categories: [tabular] Our first method for training structured tabular data is to use ensembles of decision trees. --- **Decision trees**: a decision tree asks a series of yes/no questions about the data. After each question, the data at that part splits between yes/no. After one or more questions, predictions can be formed by finding the group the data is part of at the bottom of the tree and returning the average value of the targets in that group. --- To train a decision tree, we follow a greedy approach with six steps: 1. Loop through each column of the data set. 2. For each column, loop through each possible **level** of that column. --- **Level**: for most continuous and some categorical variables, when we say levels, we're referring to variables that can be ordered. For example, sizes like "Small" < "Medium" < "Large". For other categorical variables, we refer to the actual values. --- 3. Try splitting the data into two groups, based on whether they're greater than or less than that value (or equal to or not equal to for other categorical variables). 4. Find the average prediction for each of those two groups and use your metric to see how close that is to the actual value of each of the items in that group. 5. After looping through all the possible columns and levels for each column, pick the split point that gave the best prediction. 6. Now, we have two groups for our data set. Treat each of them as new data sets and repeat from step 1 until each group reaches your minimum size threshold. With decision trees, you have to be careful with how many leaf nodes you end up with. If you have too many (close to the number of data entries), then your model will overfit. ## Overfitting? No problem. One year before his retirement, Leo Breiman published a paper on "bagging". Instead of training on the entire training set (or mini-batches), you 1. randomly choose a subset of the rows of your data, 2. train a model using this subset, 3. save the model, and 4. train more models on different subsets of the data. Eventually, you end up with a number of models. To make a prediction, you predict using all of the models and take the average. Each of the models have errors since they're not trained on the full training set, but since different models have different errors (and these errors aren't correlated with each other; i.e., they're independent) the errors end up cancelling out when we take the average. Seven years later, Breiman also coined "random forests" where you apply bagging to decision trees not only by randomly choosing a subset of the *rows* of your data, but you also randomly choosing a subset of the *columns when choosing a split* in each decision tree. --- **Random forests**: a specific type of an *ensemble of decision trees*, where bagging is used to combine the results of several decision trees that were trained on random subsets of the rows of the data where each split made on a random subset of the columns of the data. --- Since the errors tend to cancel out, it also means the trees are less susceptible to hyperparameter changes. We can also have as many trees as we want; in fact, the error rate usually decreases as we add more trees. ## Interpreting the model Once we trained our model, if the error rate for the validation set is higher than the training set, we want to make sure it's from generalization (or extrapolation) problems and not overfitting. **Out-of-bag error** allows us to check if we're overfitting without the need of a validation set. Since each tree in a random forest is trained on a subset of the data, we can form a validation set for each tree as the rows not included in training for that tree. What makes out-of-bag error different from validation set error is that the data in the former is within the range of the training set, while the validation set is usually outside of the range; this range is most important for time series data since the validation set should contain data that's in the future compared to the training set. So, if our out-of-bag error is lower than the validation set error, then the model is not overfitting and is instead having other problems. In general, we want to interpret in our model: - how confident are we in our predictions for a particular row of data? - for making our predictions on a specific row of data, what were the most important columns, and how did they influence the prediction? - which columns are the most important; and which columns can we ignore (remove them from training)? - which columns are *effectively redundant* in terms of prediction? - how do predictions vary as we vary the columns (as in, what kind of relationship do the columns have with the predictions)? ## Confidence for a prediction on a particular row of data When we want to predict for a particular row of data, we pass the data to each tree in our random forest and take the average of the results. To find the *relative* confidence of the prediction, we can take the standard deviation of the predictions instead of the average. So, if the standard deviation is high, we should be more wary of the prediction since the trees disagree more than if the standard deviation was low. ## Feature importance It's important to understand *how* our models are making predictions, not just how accuracte the predictions are. To find the importance of each column (feature) in our data, we can loop through each tree and recursively explore each branch. At each branch, look at what column was used for that split and how much the model improved at that split. The improvement, which is weighted by the number of rows in that group is added to the importance score for that column. The importance score is summed across all branches of all trees. Then, you can normalize the scores (so that they sum to 1) and sort them in ascending order to see the least important columns, and by descending order to see the most important columns. The "how" is mostly used in production (and not in model training) to see how the data is leading to predictions. To find how each column influenced the prediction, we take a single row of the data and pass it through each of the decision trees in our random forest. At each split point, record how the prediction changes (increases or decreases) compared to the parent node of the tree and add it to the column's score. Then, combine the score for each of the columns and you can see how each column increased or decreased the prediction relative to the parent node of the random forest (which is the average of the average of the target in each row in the batch of rows in the batch of trees in the random forest). ## Ignoring features Once you found the importance of each column, you can set a threshold such that you ignore features whose importance scores were lower than that threshold (this is why we normalized the scores). Try retraining your model with those columns ignored and you can decide to keep the change (if the accuracy hasn't changed much) or change your threshold (if the accuracy decreased significantly). In any case, it's nicer to train your model with less unimportant columns since you'll be able to train future models on the same data set faster. ## Redundant features To find redundant columns, you want to find how similar each column is to another. To do so, you calculate the *rank correlation*, where all the values in each column are replaced with their *rank* relative to other values in the same column (think of it like descending `argsort`, where you give each row in a specific column the index it would have for the column to be sorted in descending order).Then, the *correlation* is calculated (kind of like the correlation coefficient $r$, but with rank). Columns with similar rank correlations may be synonyms for each other and one (or more) of them could be removed. When removing redundant columns, retrain the model where you remove only one redundant column at a time. Then, try removing them in groups and eventually altogether. The point of this tedious task is to make sure we're not significantly reducing the accuracy of the model. And, some columns, although they seem redundant, may not be redundant and would be important to keep in the model. Although not necessary, you should remove unimportant and redundant columns when possible since it'll simplify your model. ## Relationship between columns and predictions To find the relationship between a column and prediction, you could guess that we should have a row where we keep all columns constant except for the column in question. But, we can't just take the average of the predictions for a specific level of a column since other variables can change. Instead, we replace every single value in the column with a specific level in the validation set, and record the prediction with the new validation set as the input. Then, we do the same for every other level of that column. With these predictions, we can form a line graph with the levels as the x-axis and the predictions as the y-axis. We call this graph a **partial dependence plot**. Sometimes, you trained your model and - your accuraccy is too good to be true, - some features don't make sense to be predictors, or - *the partial dependence plots looks weird*. If so, your data might have **data leakage** where the training set contains information that wouldn't be available in the data you give at inference (i.e., when using the model in practice and/or your validation set). Data leakage are subtleties that give away the correct answer. For example, if you trained a model to predict the weather and the precipitation was in an available column (and/or it was only filled out on rainy days), you bet your model would predict it was "raining" on "rainy days" if there was any precipitation and "sunny" on "sunny days" otherwise. So, when you interpret the model later, you might see really high accuracy, with precipitation being a high predictor. In preventing data leakage, train your model first and then look for data leakage (and then clean or reprocess your data); this process is the same with how you would train your model first before performing data cleaning. ## We can't always use random forests With time series data, you usually want to have a model that can generalize to new data and extrapolate accurately. The downside of random forests is that it can only predict within the range of its training data. So, if the value in the validation set is outside of the range of the training set, the accuracy of the random forest will always be low since it can't predict values that high. Why might this be the case? A random forest returns a prediction based on the average of the predictions of its decision trees, where each tree predicts the average of the targets in the rows in a leaf node. So, a random forest can never predict a value that's outside of the range of the training set. In a general sense, a random forest can't generalize to **out-of-domain data**, so we need to make sure our validation, test, and future data sets contain the same kind of data as our training set. To test if there's out-of-(the training set's)-domain data, we can build a random forest that predicts which row is in the validation or training set. To do so, you can concatenate the validation and training set and label the rows by validation or training. Then, through feature importance, if there's a particular column that is more prominent in the validation set, there will be a nonuniform distribution of importance scores. Sometimes, you can remove the columns with high feature importance and improve the accuracy of the model since those columns might be related to another column (hence removing redundant columns). Removing those columns can also make your model more resilient over time since those columns may be affected by **domain shift** where the data put into the model is significantly different from the training data. ## Boosting instead of bagging Instead of random forests, which forms an ensemble of decision trees through *bagging*, we can also make **gradient boosted machines** which uses *boosting* instead of bagging. Bagging takes the average of the predictions from each decision tree. Boosting, on the other hand, *adds* the predictions of each decision tree. So, you also train your decision trees differently: - train a decision tree that *underfits* the targets of your training set, - calculate residuals by subtracting the predictions from the targets, - repeat from the beginning, but train your future models with the residuals as the targets, and - continue training more trees until you reach a certain maximum or your validation metric gets worse. With boosting, we try to minimize the error by having the residuals become as small as possible by underfitting them. Unlike random forests, the trees aren't independent of each other so the more trees we train, the more the overall model will overfit the training set. ## Free accuracy boost In training a model for tabular data, you can get a boost in accuracy by training a random forest model, doing some analysis like feature importance and partial dependence plots to remove redundant columns, and then training a neural network that uses *embeddings for the categorical variables/columns*. Then, we *retrain* our random forest model, but instead of creating *levels* for the categorical variables, we use the *embeddings trained by the neural network*. So, instead of using a neural network at inference, you can use an improved random forest model. The same can be done for gradient boosted machines, and any model that uses categorical variables. Just use the embeddings trained by the neural network. ## Conclusion We covered a machine learning technique called ensembles of decision trees. Here, we mentioned two methods of ensembling: bagging and boosting. With bagging, you form a *random forest* that's quick and easy to train. Random forests are also resistant to hyperparameter changes and since the trees are independent, it's very difficult to overfit as you increase the number of trees. With boosting, you form a *gradient boosted machine* (or *gradient boosted decision tree*) that are just as fast to train as random forests in theory, but require more hyperparameter tuning and are susceptible to overfitting with the more trees you train since the trees aren't independent of each other. However, gradient boosted machines tend to have higher accuracy than random forests. Overall, because of the limitations of decision trees, both random forests and gradient boosted machines can't extrapolate to out-of-domain data. Therefore, you sometimes have to make a *neural network*. Neural networks take the longest to train and require more preprocessing like batch normalization (which also needs to be done at inference). With neural networks, you have to be careful with your hyperparameters since they can lead to overfitting. However, neural networks are great at extrapolating and can have the highest accuracy of the three models. With neural networks, you can also use ensembles of decision trees to do some of the preprocessing to make them faster to train. And, once you train a neural network, you can use the embeddings trained by the neural networks as the inputs for the categorical variables in another ensemble of decision trees on the same data set. Doing so tends to produce much higher accuracy. If the task doesn't require extrpolation (all future predictions are expected to be in the same range as the training set), then you can use the improved ensemble of decision trees since they will be faster at inference compared to neural networks. Moreover, if the response time at inference isn't a major problem, you can even form an ensemble of neural networks and an ensemble of decision trees where you take the average of the predictions of each of the models. Taking the theory behind random forests, since the two (or more) models were trained by two (or more) very different algorithms, the errors each make are independent of each other and will cancel each other out, leading to higher accuracy with less chances of overfitting. Still, it won't make a bad model a good model.
github_jupyter
<a href="https://colab.research.google.com/github/kuriousk516/HIST4916a-Stolen_Bronzes/blob/main/Stolen_Bronzes.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Stolen Bronzes: Western Museums and Repatriation ## Introduction >"*Walk into any European museum today and you will see the curated spoils of Empire. They sit behind plate glass: dignified, tastefully lit. Accompanying pieces of card offer a name, date and place of origin. They do not mention that the objects are all stolen*." > > 'Radicals in Conversation': The Brutish Museums Public history and digital humanities offers a locus point of contending with difficult pasts. Museums, often considered bastions of knowledge, learning, and public good have fallen under an increasingly critical gaze -- and rightfully so. Public museums have been tools of colonialism, racism, and superiority centred around the supremacy of the west and its history. Digital repositories of museum archives and websites can be used to subvert the exclusionary practices employed by museums and provide tools for marginalized peoples --. The purpose of this notebook is to act as a digital tool for real life change, and it is focused on Dan Hick's [Tweet](https://twitter.com/profdanhicks/status/1375421209265983488) and book, *The Brutish Museum*. ``` %%html <iframe src="https://drive.google.com/file/d/1txSH3UkjJgLTeQW47MGLfrht7AHCEkGC/preview" width="640" height="480"></iframe> ``` What I read in Dan Hicks' Tweet was a call to action. Not necessarily for the average citizen to take the bronzes back, but to start an important discussion about the nature of artifact aqcuisition and confronting how museums procure these items in the first place. The appendix' list is a small fraction of the stolen artifacts found in hundreds of museums all over the world but it is a powerful point of focus. I want to create something, however small, that can give others the tools to have a visual representation of stolen artifacts distribution and interrogate why (mostly) western museums are the institutions holding these artifacts, what effect this has, and what's being done with them. Can anyone own art? Who has the power to decide? How do we give that power back to those who were stolen from? To learn more about the Benin bronzes and their history, a good place to start is with the ['Radicals in Conversation'](https://www.plutobooks.com/blog/podcast-brutish-museums-benin-bronzes-decolonisation/) podcast. And now, what I have here is a helpful tool for all of us to answer, **"*How close are you right this second to a looted Benin Bronze*?"** # Data I have compiled a dataframe of all the museums listed in Hicks' appendix'; you can see the original above in his Tweet. The data is in a .CSV file stored in my [GitHub repository](https://github.com/kuriousk516/HIST4916a-Stolen_Bronzes), and you can also find screenshots of the errors I encountered and advice I recieved through the HIST4916a Discord server, some of which I will reference here when discussing data limitations. ## Mapping with Folium Folium seemed the best choice for this project since it doesn't rely on Google Maps for the map itself or the data entry process. [This is the tutorial](https://craftingdh.netlify.app/tutorials/folium/) that I used for the majority of the data coding, and this is the [Point Map alternative](https://handsondataviz.org/mymaps.html) I considered but decided against. ``` import lxml import pandas as pd pd.set_option("max_rows", 400) pd.set_option("max_colwidth", 400) import pandas, os os.listdir() ['.config', 'benin_bronze_locations2.csv', 'sample_data'] ``` Here is where I ran into some trouble. I was having great difficulty in loading my .CSV file into the notebook, so I uploaded the file from my computer. Here is the alternative code to upload it using the RAW link from GitHub: url = 'copied_raw_GH_link' df1 = pd.read_csv(url) If you have another (simpler) way of getting the job done, I fully encourage you altering the code to make it happen. ``` from google.colab import files uploaded = files.upload() ``` In the .CSV file, I only had the name of the museums, cities, and countries. Manually inputting the necessary data for plotting the locations would be time-consuming and tedious, but I have an example using geopy and Nomatim to pull individual location info for the cases when "NaN" pops up when expanding the entire dataframe. ``` df1=pandas.read_csv('benin_bronze_locations2.csv', encoding = "ISO-8859-1", engine ='python') df1 !pip install geopy from geopy.geocoders import Nominatim geolocator = Nominatim(user_agent="BENIN-BRONZES", timeout=2) location = geolocator.geocode("Ulster Museum United Kingdom") location ``` Great! Now we have the means of finding the relevant map information for individual entires. But to process the large amount of data, I followed [this YouTube tutorial](https://www.youtube.com/watch?v=0IjdfgmWzMk) for some extra help. ``` def find_location(row): place = row['place'] location = geolocator.geocode(place) if location != None: return location.address, location.latitude, location.longitude, location.raw['importance'] else: return "Not Found", "Not Found", "Not Found", "Not Found" ``` To expand on my data, I needed to add a new column to my dataframe -- the addresses of the museums. ``` df1["Address"]=df1["Place"]+", "+df1["City"]+", "+df1["Country"] df1 #Then I added this string to the geocode to create a coordinates column. df1["Coordinates"]=df1["Address"].apply(geolocator.geocode) df1 ``` After compiling the addresses and coordinates, the dataframe needed the latitude and longitudes for Folium to plot the locations on the map. ``` df1["Latitude"]=df1["Coordinates"].apply(lambda x: x.latitude if x !=None else None) df1["Longitude"]=df1["Coordinates"].apply(lambda x: x.longitude if x !=None else None) df1 !pip install folium import folium beninbronze_map = folium.Map(location=[6.3350, 5.6037], zoom_start=7) beninbronze_map ``` I want Benin City to be the centre of this map, a rough point of origin. The Kingdom of Benin existed in modern day Nigeria, and it's where the looted bronzes belong. Only *nine* locations in Nigeria have collections of the bronzes, as opposed to the 152 others all over Europe, America, Canada, Russia, and Japan. Nigeria needs to be the centre of the conversation of the looted bronzes and repatriation, and so it is the centre of the map being created. ``` def create_map_markers(row, beninbronze_map): folium.Marker(location=[row['lat'], row['lon']], popup=row['place']).add_to(beninbronze_map) folium.Marker(location=[6.3350, 5.6037], popup="Send the bronzes home").add_to(beninbronze_map) beninbronze_map def create_map_markers(row, beninbronze_map): folium.Marker(location=[row['Latitude'], row['Longitude']], popup=row['Place']).add_to(beninbronze_map) ``` Many of the data entries came up as "NaN" when the code was trying to find their latitude and longitude. It's an invalid entry and needs to be dropped in order for the map markers to function. This is very important to note: out of the 156 data entries, only 86 were plotted on the map. The missing coordinates need to be added to the dataframe, but that's a bit beyond the scope of this project. I invite anyone with the time to complete the map markers using the code examples above. ``` df1.dropna(subset = ["Latitude"], inplace=True) df1.dropna(subset = ["Longitude"], inplace=True) nan_value = float("NaN") df1.replace("",nan_value, inplace=True) df1.dropna(subset = ["Latitude"], inplace=True) df1.dropna(subset = ["Longitude"], inplace=True) df1 df1.apply(lambda row:folium.CircleMarker(location=[row["Latitude"], row["Longitude"]]).add_to(beninbronze_map), axis=1) beninbronze_map beninbronze_map.save("stolen-bronzes-map.html") ``` # Conclusion Now we have a map showing (some of) the locations of the looted Benin bronzes. It needs to be expanded to include the other locations, but I hope it helped you to think about what Dan Hicks' asked: how close are you, right this minute, to a looted Benin bronze? # Recommended Reading and Points of Reference Abt, Jeffrey. “The Origins of the Public Museum.” In A Companion to Museum Studies, 115–134. Malden, MA, USA: Blackwell Publishing Ltd, 2006. Bennett, Tony. 1990. “The Political Rationality of the Museum,” Continuum: The Australian Journal of Media and Culture 2, no. 1 (1990). Bivens, Joy, and Ben Garcia, Porchia Moore, nikhil trivedi, Aletheia Wittman. 2019. ‘Collections: How We Hold the Stuff We Hold in Trust’ in MASSAction, Museums As Site for Social Action, toolkit, https://static1.squarespace.com/static/58fa685dff7c50f78be5f2b2/t/59dcdd27e5dd5b5a1b51d9d8/1507646780650/TOOLKIT_10_2017.pdf DW.com. "'A matter of fairness': New debate about Benin Bronzes in Germany." Published March 26, 2021. https://www.dw.com/en/a-matter-of-fairness-new-debate-about-benin-bronzes-in-germany/a-57013604 Hudson, David J. 2016. “On Dark Continents and Digital Divides: Information Inequality and the Reproduction of Racial Otherness in Library and Information Studies” https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9862. Kreps, Christina. 2008. ‘Non-western Models of Museums and Curation in Cross-cultural Perspective’in Sharon Macdonald, ed. ‘Companion to Museum Studies’. MacDonald, Sharon. 2008. “Collecting Practices” in Sharon Macdonald, ed. ‘Companion to Museum Studies’. Sentance, Nathan mudyi. 2018. “Why Do We Collect,” Archival Decolonist blog, August 18, 2018, https://archivaldecolonist.com/2018/08/18/why-do-we-collect/ https://www.danhicks.uk/brutishmuseums https://www.plutobooks.com/blog/podcast-brutish-museums-benin-bronzes-decolonisation/
github_jupyter
##### Copyright 2020 The OpenFermion Developers ``` ``` # Introduction to OpenFermion <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://quantumai.google/openfermion/tutorials/intro_to_openfermion"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/quantumlib/OpenFermion/blob/master/docs/tutorials/intro_to_openfermion.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/quantumlib/OpenFermion/blob/master/docs/tutorials/intro_to_openfermion.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/OpenFermion/docs/tutorials/intro_to_openfermion.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a> </td> </table> Note: The examples below must be run sequentially within a section. ## Setup Install the OpenFermion package: ``` try: import openfermion except ImportError: !pip install git+https://github.com/quantumlib/OpenFermion.git@master#egg=openfermion ``` ## Initializing the FermionOperator data structure Fermionic systems are often treated in second quantization where arbitrary operators can be expressed using the fermionic creation and annihilation operators, $a^\dagger_k$ and $a_k$. The fermionic ladder operators play a similar role to their qubit ladder operator counterparts, $\sigma^+_k$ and $\sigma^-_k$ but are distinguished by the canonical fermionic anticommutation relations, $\{a^\dagger_i, a^\dagger_j\} = \{a_i, a_j\} = 0$ and $\{a_i, a_j^\dagger\} = \delta_{ij}$. Any weighted sums of products of these operators are represented with the FermionOperator data structure in OpenFermion. The following are examples of valid FermionOperators: $$ \begin{align} & a_1 \nonumber \\ & 1.7 a^\dagger_3 \nonumber \\ &-1.7 \, a^\dagger_3 a_1 \nonumber \\ &(1 + 2i) \, a^\dagger_4 a^\dagger_3 a_9 a_1 \nonumber \\ &(1 + 2i) \, a^\dagger_4 a^\dagger_3 a_9 a_1 - 1.7 \, a^\dagger_3 a_1 \nonumber \end{align} $$ The FermionOperator class is contained in $\textrm{ops/_fermion_operator.py}$. In order to support fast addition of FermionOperator instances, the class is implemented as hash table (python dictionary). The keys of the dictionary encode the strings of ladder operators and values of the dictionary store the coefficients. The strings of ladder operators are encoded as a tuple of 2-tuples which we refer to as the "terms tuple". Each ladder operator is represented by a 2-tuple. The first element of the 2-tuple is an int indicating the tensor factor on which the ladder operator acts. The second element of the 2-tuple is Boole: 1 represents raising and 0 represents lowering. For instance, $a^\dagger_8$ is represented in a 2-tuple as $(8, 1)$. Note that indices start at 0 and the identity operator is an empty list. Below we give some examples of operators and their terms tuple: $$ \begin{align} I & \mapsto () \nonumber \\ a_1 & \mapsto ((1, 0),) \nonumber \\ a^\dagger_3 & \mapsto ((3, 1),) \nonumber \\ a^\dagger_3 a_1 & \mapsto ((3, 1), (1, 0)) \nonumber \\ a^\dagger_4 a^\dagger_3 a_9 a_1 & \mapsto ((4, 1), (3, 1), (9, 0), (1, 0)) \nonumber \end{align} $$ Note that when initializing a single ladder operator one should be careful to add the comma after the inner pair. This is because in python ((1, 2)) = (1, 2) whereas ((1, 2),) = ((1, 2),). The "terms tuple" is usually convenient when one wishes to initialize a term as part of a coded routine. However, the terms tuple is not particularly intuitive. Accordingly, OpenFermion also supports another user-friendly, string notation below. This representation is rendered when calling "print" on a FermionOperator. $$ \begin{align} I & \mapsto \textrm{""} \nonumber \\ a_1 & \mapsto \textrm{"1"} \nonumber \\ a^\dagger_3 & \mapsto \textrm{"3^"} \nonumber \\ a^\dagger_3 a_1 & \mapsto \textrm{"3^}\;\textrm{1"} \nonumber \\ a^\dagger_4 a^\dagger_3 a_9 a_1 & \mapsto \textrm{"4^}\;\textrm{3^}\;\textrm{9}\;\textrm{1"} \nonumber \end{align} $$ Let's initialize our first term! We do it two different ways below. ``` from openfermion.ops import FermionOperator my_term = FermionOperator(((3, 1), (1, 0))) print(my_term) my_term = FermionOperator('3^ 1') print(my_term) ``` The preferred way to specify the coefficient in openfermion is to provide an optional coefficient argument. If not provided, the coefficient defaults to 1. In the code below, the first method is preferred. The multiplication in the second method actually creates a copy of the term, which introduces some additional cost. All inplace operands (such as +=) modify classes whereas binary operands such as + create copies. Important caveats are that the empty tuple FermionOperator(()) and the empty string FermionOperator('') initializes identity. The empty initializer FermionOperator() initializes the zero operator. ``` good_way_to_initialize = FermionOperator('3^ 1', -1.7) print(good_way_to_initialize) bad_way_to_initialize = -1.7 * FermionOperator('3^ 1') print(bad_way_to_initialize) identity = FermionOperator('') print(identity) zero_operator = FermionOperator() print(zero_operator) ``` Note that FermionOperator has only one attribute: .terms. This attribute is the dictionary which stores the term tuples. ``` my_operator = FermionOperator('4^ 1^ 3 9', 1. + 2.j) print(my_operator) print(my_operator.terms) ``` ## Manipulating the FermionOperator data structure So far we have explained how to initialize a single FermionOperator such as $-1.7 \, a^\dagger_3 a_1$. However, in general we will want to represent sums of these operators such as $(1 + 2i) \, a^\dagger_4 a^\dagger_3 a_9 a_1 - 1.7 \, a^\dagger_3 a_1$. To do this, just add together two FermionOperators! We demonstrate below. ``` from openfermion.ops import FermionOperator term_1 = FermionOperator('4^ 3^ 9 1', 1. + 2.j) term_2 = FermionOperator('3^ 1', -1.7) my_operator = term_1 + term_2 print(my_operator) my_operator = FermionOperator('4^ 3^ 9 1', 1. + 2.j) term_2 = FermionOperator('3^ 1', -1.7) my_operator += term_2 print('') print(my_operator) ``` The print function prints each term in the operator on a different line. Note that the line my_operator = term_1 + term_2 creates a new object, which involves a copy of term_1 and term_2. The second block of code uses the inplace method +=, which is more efficient. This is especially important when trying to construct a very large FermionOperator. FermionOperators also support a wide range of builtins including, str(), repr(), ==, !=, *=, *, /, /=, +, +=, -, -=, - and **. Note that since FermionOperators involve floats, == and != check for (in)equality up to numerical precision. We demonstrate some of these methods below. ``` term_1 = FermionOperator('4^ 3^ 9 1', 1. + 2.j) term_2 = FermionOperator('3^ 1', -1.7) my_operator = term_1 - 33. * term_2 print(my_operator) my_operator *= 3.17 * (term_2 + term_1) ** 2 print('') print(my_operator) print('') print(term_2 ** 3) print('') print(term_1 == 2.*term_1 - term_1) print(term_1 == my_operator) ``` Additionally, there are a variety of methods that act on the FermionOperator data structure. We demonstrate a small subset of those methods here. ``` from openfermion.utils import commutator, count_qubits, hermitian_conjugated from openfermion.transforms import normal_ordered # Get the Hermitian conjugate of a FermionOperator, count its qubit, check if it is normal-ordered. term_1 = FermionOperator('4^ 3 3^', 1. + 2.j) print(hermitian_conjugated(term_1)) print(term_1.is_normal_ordered()) print(count_qubits(term_1)) # Normal order the term. term_2 = normal_ordered(term_1) print('') print(term_2) print(term_2.is_normal_ordered()) # Compute a commutator of the terms. print('') print(commutator(term_1, term_2)) ``` ## The QubitOperator data structure The QubitOperator data structure is another essential part of openfermion. As the name suggests, QubitOperator is used to store qubit operators in almost exactly the same way that FermionOperator is used to store fermion operators. For instance $X_0 Z_3 Y_4$ is a QubitOperator. The internal representation of this as a terms tuple would be $((0, \textrm{"X"}), (3, \textrm{"Z"}), (4, \textrm{"Y"}))$. Note that one important difference between QubitOperator and FermionOperator is that the terms in QubitOperator are always sorted in order of tensor factor. In some cases, this enables faster manipulation. We initialize some QubitOperators below. ``` from openfermion.ops import QubitOperator my_first_qubit_operator = QubitOperator('X1 Y2 Z3') print(my_first_qubit_operator) print(my_first_qubit_operator.terms) operator_2 = QubitOperator('X3 Z4', 3.17) operator_2 -= 77. * my_first_qubit_operator print('') print(operator_2) ``` ## Jordan-Wigner and Bravyi-Kitaev openfermion provides functions for mapping FermionOperators to QubitOperators. ``` from openfermion.ops import FermionOperator from openfermion.transforms import jordan_wigner, bravyi_kitaev from openfermion.utils import hermitian_conjugated from openfermion.linalg import eigenspectrum # Initialize an operator. fermion_operator = FermionOperator('2^ 0', 3.17) fermion_operator += hermitian_conjugated(fermion_operator) print(fermion_operator) # Transform to qubits under the Jordan-Wigner transformation and print its spectrum. jw_operator = jordan_wigner(fermion_operator) print('') print(jw_operator) jw_spectrum = eigenspectrum(jw_operator) print(jw_spectrum) # Transform to qubits under the Bravyi-Kitaev transformation and print its spectrum. bk_operator = bravyi_kitaev(fermion_operator) print('') print(bk_operator) bk_spectrum = eigenspectrum(bk_operator) print(bk_spectrum) ``` We see that despite the different representation, these operators are iso-spectral. We can also apply the Jordan-Wigner transform in reverse to map arbitrary QubitOperators to FermionOperators. Note that we also demonstrate the .compress() method (a method on both FermionOperators and QubitOperators) which removes zero entries. ``` from openfermion.transforms import reverse_jordan_wigner # Initialize QubitOperator. my_operator = QubitOperator('X0 Y1 Z2', 88.) my_operator += QubitOperator('Z1 Z4', 3.17) print(my_operator) # Map QubitOperator to a FermionOperator. mapped_operator = reverse_jordan_wigner(my_operator) print('') print(mapped_operator) # Map the operator back to qubits and make sure it is the same. back_to_normal = jordan_wigner(mapped_operator) back_to_normal.compress() print('') print(back_to_normal) ``` ## Sparse matrices and the Hubbard model Often, one would like to obtain a sparse matrix representation of an operator which can be analyzed numerically. There is code in both openfermion.transforms and openfermion.utils which facilitates this. The function get_sparse_operator converts either a FermionOperator, a QubitOperator or other more advanced classes such as InteractionOperator to a scipy.sparse.csc matrix. There are numerous functions in openfermion.utils which one can call on the sparse operators such as "get_gap", "get_hartree_fock_state", "get_ground_state", etc. We show this off by computing the ground state energy of the Hubbard model. To do that, we use code from the openfermion.hamiltonians module which constructs lattice models of fermions such as Hubbard models. ``` from openfermion.hamiltonians import fermi_hubbard from openfermion.linalg import get_sparse_operator, get_ground_state from openfermion.transforms import jordan_wigner # Set model. x_dimension = 2 y_dimension = 2 tunneling = 2. coulomb = 1. magnetic_field = 0.5 chemical_potential = 0.25 periodic = 1 spinless = 1 # Get fermion operator. hubbard_model = fermi_hubbard( x_dimension, y_dimension, tunneling, coulomb, chemical_potential, magnetic_field, periodic, spinless) print(hubbard_model) # Get qubit operator under Jordan-Wigner. jw_hamiltonian = jordan_wigner(hubbard_model) jw_hamiltonian.compress() print('') print(jw_hamiltonian) # Get scipy.sparse.csc representation. sparse_operator = get_sparse_operator(hubbard_model) print('') print(sparse_operator) print('\nEnergy of the model is {} in units of T and J.'.format( get_ground_state(sparse_operator)[0])) ``` ## Hamiltonians in the plane wave basis A user can write plugins to openfermion which allow for the use of, e.g., third-party electronic structure package to compute molecular orbitals, Hamiltonians, energies, reduced density matrices, coupled cluster amplitudes, etc using Gaussian basis sets. We may provide scripts which interface between such packages and openfermion in future but do not discuss them in this tutorial. When using simpler basis sets such as plane waves, these packages are not needed. openfermion comes with code which computes Hamiltonians in the plane wave basis. Note that when using plane waves, one is working with the periodized Coulomb operator, best suited for condensed phase calculations such as studying the electronic structure of a solid. To obtain these Hamiltonians one must choose to study the system without a spin degree of freedom (spinless), one must the specify dimension in which the calculation is performed (n_dimensions, usually 3), one must specify how many plane waves are in each dimension (grid_length) and one must specify the length scale of the plane wave harmonics in each dimension (length_scale) and also the locations and charges of the nuclei. One can generate these models with plane_wave_hamiltonian() found in openfermion.hamiltonians. For simplicity, below we compute the Hamiltonian in the case of zero external charge (corresponding to the uniform electron gas, aka jellium). We also demonstrate that one can transform the plane wave Hamiltonian using a Fourier transform without effecting the spectrum of the operator. ``` from openfermion.hamiltonians import jellium_model from openfermion.utils import Grid from openfermion.linalg import eigenspectrum from openfermion.transforms import jordan_wigner, fourier_transform # Let's look at a very small model of jellium in 1D. grid = Grid(dimensions=1, length=3, scale=1.0) spinless = True # Get the momentum Hamiltonian. momentum_hamiltonian = jellium_model(grid, spinless) momentum_qubit_operator = jordan_wigner(momentum_hamiltonian) momentum_qubit_operator.compress() print(momentum_qubit_operator) # Fourier transform the Hamiltonian to the position basis. position_hamiltonian = fourier_transform(momentum_hamiltonian, grid, spinless) position_qubit_operator = jordan_wigner(position_hamiltonian) position_qubit_operator.compress() print('') print (position_qubit_operator) # Check the spectra to make sure these representations are iso-spectral. spectral_difference = eigenspectrum(momentum_qubit_operator) - eigenspectrum(position_qubit_operator) print('') print(spectral_difference) ``` ## Basics of MolecularData class Data from electronic structure calculations can be saved in an OpenFermion data structure called MolecularData, which makes it easy to access within our library. Often, one would like to analyze a chemical series or look at many different Hamiltonians and sometimes the electronic structure calculations are either expensive to compute or difficult to converge (e.g. one needs to mess around with different types of SCF routines to make things converge). Accordingly, we anticipate that users will want some way to automatically database the results of their electronic structure calculations so that important data (such as the SCF integrals) can be looked up on-the-fly if the user has computed them in the past. OpenFermion supports a data provenance strategy which saves key results of the electronic structure calculation (including pointers to files containing large amounts of data, such as the molecular integrals) in an HDF5 container. The MolecularData class stores information about molecules. One initializes a MolecularData object by specifying parameters of a molecule such as its geometry, basis, multiplicity, charge and an optional string describing it. One can also initialize MolecularData simply by providing a string giving a filename where a previous MolecularData object was saved in an HDF5 container. One can save a MolecularData instance by calling the class's .save() method. This automatically saves the instance in a data folder specified during OpenFermion installation. The name of the file is generated automatically from the instance attributes and optionally provided description. Alternatively, a filename can also be provided as an optional input if one wishes to manually name the file. When electronic structure calculations are run, the data files for the molecule can be automatically updated. If one wishes to later use that data they either initialize MolecularData with the instance filename or initialize the instance and then later call the .load() method. Basis functions are provided to initialization using a string such as "6-31g". Geometries can be specified using a simple txt input file (see geometry_from_file function in molecular_data.py) or can be passed using a simple python list format demonstrated below. Atoms are specified using a string for their atomic symbol. Distances should be provided in angstrom. Below we initialize a simple instance of MolecularData without performing any electronic structure calculations. ``` from openfermion.chem import MolecularData # Set parameters to make a simple molecule. diatomic_bond_length = .7414 geometry = [('H', (0., 0., 0.)), ('H', (0., 0., diatomic_bond_length))] basis = 'sto-3g' multiplicity = 1 charge = 0 description = str(diatomic_bond_length) # Make molecule and print out a few interesting facts about it. molecule = MolecularData(geometry, basis, multiplicity, charge, description) print('Molecule has automatically generated name {}'.format( molecule.name)) print('Information about this molecule would be saved at:\n{}\n'.format( molecule.filename)) print('This molecule has {} atoms and {} electrons.'.format( molecule.n_atoms, molecule.n_electrons)) for atom, atomic_number in zip(molecule.atoms, molecule.protons): print('Contains {} atom, which has {} protons.'.format( atom, atomic_number)) ``` If we had previously computed this molecule using an electronic structure package, we can call molecule.load() to populate all sorts of interesting fields in the data structure. Though we make no assumptions about what electronic structure packages users might install, we assume that the calculations are saved in OpenFermion's MolecularData objects. Currently plugins are available for [Psi4](http://psicode.org/) [(OpenFermion-Psi4)](http://github.com/quantumlib/OpenFermion-Psi4) and [PySCF](https://github.com/sunqm/pyscf) [(OpenFermion-PySCF)](http://github.com/quantumlib/OpenFermion-PySCF), and there may be more in the future. For the purposes of this example, we will load data that ships with OpenFermion to make a plot of the energy surface of hydrogen. Note that helper functions to initialize some interesting chemical benchmarks are found in openfermion.utils. ``` # Set molecule parameters. basis = 'sto-3g' multiplicity = 1 bond_length_interval = 0.1 n_points = 25 # Generate molecule at different bond lengths. hf_energies = [] fci_energies = [] bond_lengths = [] for point in range(3, n_points + 1): bond_length = bond_length_interval * point bond_lengths += [bond_length] description = str(round(bond_length,2)) print(description) geometry = [('H', (0., 0., 0.)), ('H', (0., 0., bond_length))] molecule = MolecularData( geometry, basis, multiplicity, description=description) # Load data. molecule.load() # Print out some results of calculation. print('\nAt bond length of {} angstrom, molecular hydrogen has:'.format( bond_length)) print('Hartree-Fock energy of {} Hartree.'.format(molecule.hf_energy)) print('MP2 energy of {} Hartree.'.format(molecule.mp2_energy)) print('FCI energy of {} Hartree.'.format(molecule.fci_energy)) print('Nuclear repulsion energy between protons is {} Hartree.'.format( molecule.nuclear_repulsion)) for orbital in range(molecule.n_orbitals): print('Spatial orbital {} has energy of {} Hartree.'.format( orbital, molecule.orbital_energies[orbital])) hf_energies += [molecule.hf_energy] fci_energies += [molecule.fci_energy] # Plot. import matplotlib.pyplot as plt %matplotlib inline plt.figure(0) plt.plot(bond_lengths, fci_energies, 'x-') plt.plot(bond_lengths, hf_energies, 'o-') plt.ylabel('Energy in Hartree') plt.xlabel('Bond length in angstrom') plt.show() ``` The geometry data needed to generate MolecularData can also be retreived from the PubChem online database by inputting the molecule's name. ``` from openfermion.chem import geometry_from_pubchem methane_geometry = geometry_from_pubchem('methane') print(methane_geometry) ``` ## InteractionOperator and InteractionRDM for efficient numerical representations Fermion Hamiltonians can be expressed as $H = h_0 + \sum_{pq} h_{pq}\, a^\dagger_p a_q + \frac{1}{2} \sum_{pqrs} h_{pqrs} \, a^\dagger_p a^\dagger_q a_r a_s$ where $h_0$ is a constant shift due to the nuclear repulsion and $h_{pq}$ and $h_{pqrs}$ are the famous molecular integrals. Since fermions interact pairwise, their energy is thus a unique function of the one-particle and two-particle reduced density matrices which are expressed in second quantization as $\rho_{pq} = \left \langle p \mid a^\dagger_p a_q \mid q \right \rangle$ and $\rho_{pqrs} = \left \langle pq \mid a^\dagger_p a^\dagger_q a_r a_s \mid rs \right \rangle$, respectively. Because the RDMs and molecular Hamiltonians are both compactly represented and manipulated as 2- and 4- index tensors, we can represent them in a particularly efficient form using similar data structures. The InteractionOperator data structure can be initialized for a Hamiltonian by passing the constant $h_0$ (or 0), as well as numpy arrays representing $h_{pq}$ (or $\rho_{pq}$) and $h_{pqrs}$ (or $\rho_{pqrs}$). Importantly, InteractionOperators can also be obtained by calling MolecularData.get_molecular_hamiltonian() or by calling the function get_interaction_operator() (found in openfermion.transforms) on a FermionOperator. The InteractionRDM data structure is similar but represents RDMs. For instance, one can get a molecular RDM by calling MolecularData.get_molecular_rdm(). When generating Hamiltonians from the MolecularData class, one can choose to restrict the system to an active space. These classes inherit from the same base class, PolynomialTensor. This data structure overloads the slice operator [] so that one can get or set the key attributes of the InteractionOperator: $\textrm{.constant}$, $\textrm{.one_body_coefficients}$ and $\textrm{.two_body_coefficients}$ . For instance, InteractionOperator[(p, 1), (q, 1), (r, 0), (s, 0)] would return $h_{pqrs}$ and InteractionRDM would return $\rho_{pqrs}$. Importantly, the class supports fast basis transformations using the method PolynomialTensor.rotate_basis(rotation_matrix). But perhaps most importantly, one can map the InteractionOperator to any of the other data structures we've described here. Below, we load MolecularData from a saved calculation of LiH. We then obtain an InteractionOperator representation of this system in an active space. We then map that operator to qubits. We then demonstrate that one can rotate the orbital basis of the InteractionOperator using random angles to obtain a totally different operator that is still iso-spectral. ``` from openfermion.chem import MolecularData from openfermion.transforms import get_fermion_operator, jordan_wigner from openfermion.linalg import get_ground_state, get_sparse_operator import numpy import scipy import scipy.linalg # Load saved file for LiH. diatomic_bond_length = 1.45 geometry = [('Li', (0., 0., 0.)), ('H', (0., 0., diatomic_bond_length))] basis = 'sto-3g' multiplicity = 1 # Set Hamiltonian parameters. active_space_start = 1 active_space_stop = 3 # Generate and populate instance of MolecularData. molecule = MolecularData(geometry, basis, multiplicity, description="1.45") molecule.load() # Get the Hamiltonian in an active space. molecular_hamiltonian = molecule.get_molecular_hamiltonian( occupied_indices=range(active_space_start), active_indices=range(active_space_start, active_space_stop)) # Map operator to fermions and qubits. fermion_hamiltonian = get_fermion_operator(molecular_hamiltonian) qubit_hamiltonian = jordan_wigner(fermion_hamiltonian) qubit_hamiltonian.compress() print('The Jordan-Wigner Hamiltonian in canonical basis follows:\n{}'.format(qubit_hamiltonian)) # Get sparse operator and ground state energy. sparse_hamiltonian = get_sparse_operator(qubit_hamiltonian) energy, state = get_ground_state(sparse_hamiltonian) print('Ground state energy before rotation is {} Hartree.\n'.format(energy)) # Randomly rotate. n_orbitals = molecular_hamiltonian.n_qubits // 2 n_variables = int(n_orbitals * (n_orbitals - 1) / 2) numpy.random.seed(1) random_angles = numpy.pi * (1. - 2. * numpy.random.rand(n_variables)) kappa = numpy.zeros((n_orbitals, n_orbitals)) index = 0 for p in range(n_orbitals): for q in range(p + 1, n_orbitals): kappa[p, q] = random_angles[index] kappa[q, p] = -numpy.conjugate(random_angles[index]) index += 1 # Build the unitary rotation matrix. difference_matrix = kappa + kappa.transpose() rotation_matrix = scipy.linalg.expm(kappa) # Apply the unitary. molecular_hamiltonian.rotate_basis(rotation_matrix) # Get qubit Hamiltonian in rotated basis. qubit_hamiltonian = jordan_wigner(molecular_hamiltonian) qubit_hamiltonian.compress() print('The Jordan-Wigner Hamiltonian in rotated basis follows:\n{}'.format(qubit_hamiltonian)) # Get sparse Hamiltonian and energy in rotated basis. sparse_hamiltonian = get_sparse_operator(qubit_hamiltonian) energy, state = get_ground_state(sparse_hamiltonian) print('Ground state energy after rotation is {} Hartree.'.format(energy)) ``` ## Quadratic Hamiltonians and Slater determinants The general electronic structure Hamiltonian $H = h_0 + \sum_{pq} h_{pq}\, a^\dagger_p a_q + \frac{1}{2} \sum_{pqrs} h_{pqrs} \, a^\dagger_p a^\dagger_q a_r a_s$ contains terms that act on up to 4 sites, or is quartic in the fermionic creation and annihilation operators. However, in many situations we may fruitfully approximate these Hamiltonians by replacing these quartic terms with terms that act on at most 2 fermionic sites, or quadratic terms, as in mean-field approximation theory. These Hamiltonians have a number of special properties one can exploit for efficient simulation and manipulation of the Hamiltonian, thus warranting a special data structure. We refer to Hamiltonians which only contain terms that are quadratic in the fermionic creation and annihilation operators as quadratic Hamiltonians, and include the general case of non-particle conserving terms as in a general Bogoliubov transformation. Eigenstates of quadratic Hamiltonians can be prepared efficiently on both a quantum and classical computer, making them amenable to initial guesses for many more challenging problems. A general quadratic Hamiltonian takes the form $$H = \sum_{p, q} (M_{pq} - \mu \delta_{pq}) a^\dagger_p a_q + \frac{1}{2} \sum_{p, q} (\Delta_{pq} a^\dagger_p a^\dagger_q + \Delta_{pq}^* a_q a_p) + \text{constant},$$ where $M$ is a Hermitian matrix, $\Delta$ is an antisymmetric matrix, $\delta_{pq}$ is the Kronecker delta symbol, and $\mu$ is a chemical potential term which we keep separate from $M$ so that we can use it to adjust the expectation of the total number of particles. In OpenFermion, quadratic Hamiltonians are conveniently represented and manipulated using the QuadraticHamiltonian class, which stores $M$, $\Delta$, $\mu$ and the constant. It is specialized to exploit the properties unique to quadratic Hamiltonians. Like InteractionOperator and InteractionRDM, it inherits from the PolynomialTensor class. The BCS mean-field model of superconductivity is a quadratic Hamiltonian. The following code constructs an instance of this model as a FermionOperator, converts it to a QuadraticHamiltonian, and then computes its ground energy: ``` from openfermion.hamiltonians import mean_field_dwave from openfermion.transforms import get_quadratic_hamiltonian # Set model. x_dimension = 2 y_dimension = 2 tunneling = 2. sc_gap = 1. periodic = True # Get FermionOperator. mean_field_model = mean_field_dwave( x_dimension, y_dimension, tunneling, sc_gap, periodic=periodic) # Convert to QuadraticHamiltonian quadratic_hamiltonian = get_quadratic_hamiltonian(mean_field_model) # Compute the ground energy ground_energy = quadratic_hamiltonian.ground_energy() print(ground_energy) ``` Any quadratic Hamiltonian may be rewritten in the form $$H = \sum_p \varepsilon_p b^\dagger_p b_p + \text{constant},$$ where the $b_p$ are new annihilation operators that satisfy the fermionic anticommutation relations, and which are linear combinations of the old creation and annihilation operators. This form of $H$ makes it easy to deduce its eigenvalues; they are sums of subsets of the $\varepsilon_p$, which we call the orbital energies of $H$. The following code computes the orbital energies and the constant: ``` orbital_energies, constant = quadratic_hamiltonian.orbital_energies() print(orbital_energies) print() print(constant) ``` Eigenstates of quadratic hamiltonians are also known as fermionic Gaussian states, and they can be prepared efficiently on a quantum computer. One can use OpenFermion to obtain circuits for preparing these states. The following code obtains the description of a circuit which prepares the ground state (operations that can be performed in parallel are grouped together), along with a description of the starting state to which the circuit should be applied: ``` from openfermion.circuits import gaussian_state_preparation_circuit circuit_description, start_orbitals = gaussian_state_preparation_circuit(quadratic_hamiltonian) for parallel_ops in circuit_description: print(parallel_ops) print('') print(start_orbitals) ``` In the circuit description, each elementary operation is either a tuple of the form $(i, j, \theta, \varphi)$, indicating the operation $\exp[i \varphi a_j^\dagger a_j]\exp[\theta (a_i^\dagger a_j - a_j^\dagger a_i)]$, which is a Givens rotation of modes $i$ and $j$, or the string 'pht', indicating the particle-hole transformation on the last fermionic mode, which is the operator $\mathcal{B}$ such that $\mathcal{B} a_N \mathcal{B}^\dagger = a_N^\dagger$ and leaves the rest of the ladder operators unchanged. Operations that can be performed in parallel are grouped together. In the special case that a quadratic Hamiltonian conserves particle number ($\Delta = 0$), its eigenstates take the form $$\lvert \Psi_S \rangle = b^\dagger_{1}\cdots b^\dagger_{N_f}\lvert \text{vac} \rangle,\qquad b^\dagger_{p} = \sum_{k=1}^N Q_{pq}a^\dagger_q,$$ where $Q$ is an $N_f \times N$ matrix with orthonormal rows. These states are also known as Slater determinants. OpenFermion also provides functionality to obtain circuits for preparing Slater determinants starting with the matrix $Q$ as the input.
github_jupyter
# Historical Variance Let's see how we'd be calculating a covariance matrix of assets without the help of a factor model ``` import sys !{sys.executable} -m pip install -r requirements.txt import numpy as np import pandas as pd import time import os import quiz_helper import matplotlib.pyplot as plt %matplotlib inline plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (14, 8) ``` ### data bundle ``` import os import quiz_helper from zipline.data import bundles os.environ['ZIPLINE_ROOT'] = os.path.join(os.getcwd(), '..', '..','data','module_4_quizzes_eod') ingest_func = bundles.csvdir.csvdir_equities(['daily'], quiz_helper.EOD_BUNDLE_NAME) bundles.register(quiz_helper.EOD_BUNDLE_NAME, ingest_func) print('Data Registered') ``` ### Build pipeline engine ``` from zipline.pipeline import Pipeline from zipline.pipeline.factors import AverageDollarVolume from zipline.utils.calendars import get_calendar universe = AverageDollarVolume(window_length=120).top(500) trading_calendar = get_calendar('NYSE') bundle_data = bundles.load(quiz_helper.EOD_BUNDLE_NAME) engine = quiz_helper.build_pipeline_engine(bundle_data, trading_calendar) ``` ### View Data¶ With the pipeline engine built, let's get the stocks at the end of the period in the universe we're using. We'll use these tickers to generate the returns data for the our risk model. ``` universe_end_date = pd.Timestamp('2016-01-05', tz='UTC') universe_tickers = engine\ .run_pipeline( Pipeline(screen=universe), universe_end_date, universe_end_date)\ .index.get_level_values(1)\ .values.tolist() universe_tickers len(universe_tickers) from zipline.data.data_portal import DataPortal data_portal = DataPortal( bundle_data.asset_finder, trading_calendar=trading_calendar, first_trading_day=bundle_data.equity_daily_bar_reader.first_trading_day, equity_minute_reader=None, equity_daily_reader=bundle_data.equity_daily_bar_reader, adjustment_reader=bundle_data.adjustment_reader) ``` ## Get pricing data helper function ``` from quiz_helper import get_pricing ``` ## get pricing data into a dataframe ``` returns_df = \ get_pricing( data_portal, trading_calendar, universe_tickers, universe_end_date - pd.DateOffset(years=5), universe_end_date)\ .pct_change()[1:].fillna(0) #convert prices into returns returns_df ``` ## Quiz 1 Check out the [numpy.cov documentation](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.cov.html). Then think about what's wrong with the following use of numpy.cov ``` # What's wrong with this? annualization_factor = 252 covariance_assets_not_correct = annualization_factor*np.cov(returns_df) ## TODO: Check the shape of the covariance matrix ``` ## Answer 1 here: ## Quiz 2 How can you adjust the input so that we get the desired covariance matrix of assets? ``` # TODO: calculate the covariance matrix of assets annualization_factor = # ... covariance_assets = # ... covariance_assets.shape ``` ## Answer 2: ## Visualize the covariance matrix ``` import seaborn as sns # view a heatmap of the covariance matrix sns.heatmap(covariance_assets,cmap='Paired'); ## If the colors aren't distinctive, please try a couple of these color schemes: ## cmap = 'tab10' # cmap = 'Accent' ``` ## Quiz 3 Looking at the colormap are covariances more likely to be positive or negative? Are covariances likely to be above 0.10 or below 0.10? ## Answer 3 here: ## Fun Quiz! Do you know what the [seaborn visualization package](https://seaborn.pydata.org/index.html) was named after? ## Fun Answer! here or just check the solution notebook! ## Solutions The [solution notebook is here](historical_variance_solution.ipynb)
github_jupyter
``` pwd import pandas as pd import numpy as np df_csv= pd.read_pickle("../df_noplus/df5.pkl") all_subjects=df_csv['COURSEID'].value_counts() ##removing any subject enrolled less than 20 times #all_subjects=all_subjects[all_subjects>=20] print df_csv.shape df_csv=df_csv[df_csv["COURSEID"].isin(all_subjects.index)] print df_csv.shape for subject,count in all_subjects.iteritems(): print "%s "%subject, ##load data dfx=df_csv[df_csv['COURSEID']==subject] dfx=dfx.iloc[np.random.permutation(len(dfx))] ##convert to np.array #pattern X=dfx.as_matrix( dfx.columns[4:] ) #label y=dfx.as_matrix( ['GRADE'] ).T[0] ##evaluation indices import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_classification from sklearn.ensemble import ExtraTreesClassifier subject='CS284' print subject def get_importances20(subject,pattern_names): ##load data dfx=df_csv[df_csv['COURSEID']==subject] dfx=dfx.iloc[np.random.permutation(len(dfx))] ##convert to np.array X=dfx.as_matrix( dfx.columns[4:] ) y=dfx.as_matrix( ['GRADE'] ).T[0] # Build a forest and compute the feature importances forest = ExtraTreesClassifier(n_estimators=250, random_state=0) forest.fit(X, y) importances = forest.feature_importances_ #std = np.std([tree.feature_importances_ for tree in forest.estimators_], # axis=0) indices = np.argsort(importances)[::-1] obj=pattern_names[indices[(importances[indices].cumsum()<0.2) & (importances[indices] > 0.05) ]] import_list = [i for i in obj][:3] return (import_list, indices, importances) # Print the feature ranking print("feature important ranking:") pattern_names=df_csv.columns[4:] import_list, indices, importances = get_importances20(subject,pattern_names) print import_list for f in range(X.shape[1]): print("subject: %s, importance:%.3f, acc_sum:%.3f" % ( pattern_names[indices[f]], importances[indices[f]], importances[indices].cumsum()[f])) ##generate importance def add_dot(x): return 'cstu.'+x[:2]+'.'+x pattern_names=df_csv.columns[4:] data3=[] for s in all_subjects.index: clist = map( add_dot , get_importances20(s,pattern_names)[0] ) obj={'name':add_dot(s), 'imports': clist } data3.append(obj) data3 import json with open('bundle_files/flare-imports.json','w') as fp: json.dump(data3, fp,indent=2, separators=(', ', ': ')) ##load imported list, json data3 import json with open('bundle_files/flare-imports.json','r') as fp: data3=json.load(fp) ##filtered non-connected subject out from data3 imported_subjects={} for i in data3: for j in i['imports']: if j in imported_subjects: imported_subjects[j]+=1 else: imported_subjects[j]=0 data4=[] for i in data3: if i['name'] in imported_subjects or i['imports']: data4.append(i) else: print i import json with open('bundle_files/flare-imports.json','w') as fp: json.dump(data4, fp,indent=2, separators=(', ', ': ')) data4 import operator sorted_imsub = sorted(imported_subjects.items(), key=operator.itemgetter(1), reverse=True) sorted_imsub y=x['imports'] for k in x.iterkeys(): print k len(data3) ```
github_jupyter
# First look at our dataset In this notebook, we will look at the necessary steps required before any machine learning takes place. It involves: * loading the data; * looking at the variables in the dataset, in particular, differentiate between numerical and categorical variables, which need different preprocessing in most machine learning workflows; * visualizing the distribution of the variables to gain some insights into the dataset. ## Loading the adult census dataset We will use data from the 1994 US census that we downloaded from [OpenML](http://openml.org/). You can look at the OpenML webpage to learn more about this dataset: <http://www.openml.org/d/1590> The dataset is available as a CSV (Comma-Separated Values) file and we will use pandas to read it. <div class="admonition note alert alert-info"> <p class="first admonition-title" style="font-weight: bold;">Note</p> <p class="last"><a class="reference external" href="https://pandas.pydata.org/">Pandas</a> is a Python library used for manipulating 1 and 2 dimensional structured data. If you have never used pandas, we recommend you look at this <a class="reference external" href="https://pandas.pydata.org/docs/user_guide/10min.html">tutorial</a>.</p> </div> ``` import pandas as pd adult_census = pd.read_csv("../datasets/adult-census.csv") ``` The goal with this data is to predict whether a person earns over 50K a year from heterogeneous data such as age, employment, education, family information, etc. ## The variables (columns) in the dataset The data are stored in a pandas dataframe. A dataframe is a type of structured data composed of 2 dimensions. This type of data is also referred as tabular data. Each row represents a sample. In the field of machine learning or descriptive statistics, commonly used equivalent terms are "record", "instance", or "observation". Each column represents a type of information that has been collected and is called a feature. In the field of machine learning and descriptive statistics, commonly used equivalent terms are "variable", "attribute", or "covariate". A quick way to inspect the dataframe is to show the first few lines with the `head` method: ``` adult_census.head() ``` The column named **class** is our target variable (i.e., the variable which we want to predict). The two possible classes are `<=50K` (low-revenue) and `>50K` (high-revenue). The resulting prediction problem is therefore a binary classification problem, while we will use the other columns as input variables for our model. ``` target_column = 'class' adult_census[target_column].value_counts() ``` <div class="admonition note alert alert-info"> <p class="first admonition-title" style="font-weight: bold;">Note</p> <p>Classes are slightly imbalanced, meaning there are more samples of one or more classes compared to others. Class imbalance happens often in practice and may need special techniques when building a predictive model.</p> <p class="last">For example in a medical setting, if we are trying to predict whether subjects will develop a rare disease, there will be a lot more healthy subjects than ill subjects in the dataset.</p> </div> The dataset contains both numerical and categorical data. Numerical values take continuous values, for example `age`. Categorical values can have a finite number of values, for example `native-country`. ``` numerical_columns = [ 'age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week'] categorical_columns = [ 'workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country'] all_columns = numerical_columns + categorical_columns + [ target_column] adult_census = adult_census[all_columns] ``` We can check the number of samples and the number of columns available in the dataset: ``` print(f"The dataset contains {adult_census.shape[0]} samples and " f"{adult_census.shape[1]} columns") ``` We can compute the number of features by counting the number of columns and subtract 1, since one of the columns is the target. ``` print(f"The dataset contains {adult_census.shape[1] - 1} features.") ``` ## Visual inspection of the data Before building a predictive model, it is a good idea to look at the data: * maybe the task you are trying to achieve can be solved without machine learning; * you need to check that the information you need for your task is actually present in the dataset; * inspecting the data is a good way to find peculiarities. These can arise during data collection (for example, malfunctioning sensor or missing values), or from the way the data is processed afterwards (for example capped values). Let's look at the distribution of individual features, to get some insights about the data. We can start by plotting histograms, note that this only works for features containing numerical values: ``` _ = adult_census.hist(figsize=(20, 14)) ``` <div class="admonition tip alert alert-warning"> <p class="first admonition-title" style="font-weight: bold;">Tip</p> <p class="last">In the previous cell, we used the following pattern: <tt class="docutils literal">_ = func()</tt>. We do this to avoid showing the output of <tt class="docutils literal">func()</tt> which in this case is not that useful. We actually assign the output of <tt class="docutils literal">func()</tt> into the variable <tt class="docutils literal">_</tt> (called underscore). By convention, in Python the underscore variable is used as a "garbage" variable to store results that we are not interested in.</p> </div> We can already make a few comments about some of the variables: * `age`: there are not that many points for `age > 70`. The dataset description does indicate that retired people have been filtered out (`hours-per-week > 0`); * `education-num`: peak at 10 and 13, hard to tell what it corresponds to without looking much further. We'll do that later in this notebook; * `hours-per-week` peaks at 40, this was very likely the standard number of working hours at the time of the data collection; * most values of `capital-gain` and `capital-loss` are close to zero. For categorical variables, we can look at the distribution of values: ``` adult_census['sex'].value_counts() adult_census['education'].value_counts() ``` As noted above, `education-num` distribution has two clear peaks around 10 and 13. It would be reasonable to expect that `education-num` is the number of years of education. Let's look at the relationship between `education` and `education-num`. ``` pd.crosstab(index=adult_census['education'], columns=adult_census['education-num']) ``` This shows that `education` and `education-num` give you the same information. For example, `education-num=2` is equivalent to `education='1st-4th'`. In practice that means we can remove `education-num` without losing information. Note that having redundant (or highly correlated) columns can be a problem for machine learning algorithms. <div class="admonition note alert alert-info"> <p class="first admonition-title" style="font-weight: bold;">Note</p> <p class="last">In the upcoming notebooks, we will only keep the <tt class="docutils literal">education</tt> variable, excluding the <tt class="docutils literal"><span class="pre">education-num</span></tt> variable.</p> </div> Another way to inspect the data is to do a `pairplot` and show how each variable differs according to our target, i.e. `class`. Plots along the diagonal show the distribution of individual variables for each `class`. The plots on the off-diagonal can reveal interesting interactions between variables. ``` import seaborn as sns n_samples_to_plot = 5000 columns = ['age', 'education-num', 'hours-per-week'] _ = sns.pairplot(data=adult_census[:n_samples_to_plot], vars=columns, hue=target_column, plot_kws={'alpha': 0.2}, height=3, diag_kind='hist', diag_kws={'bins': 30}) ``` By looking at the data you could infer some hand-written rules to predict the class: * if you are young (less than 25 year-old roughly), you are in the `<=50K` class; * if you are old (more than 70 year-old roughly), you are in the `<=50K` class; * if you work part-time (less than 40 hours roughly) you are in the `<=50K` class. These hand-written rules could work reasonably well without the need for any machine learning. Note however that it is not very easy to create rules for the region `40 < hours-per-week < 60` and `30 < age < 70`. We can hope that machine learning can help in this region. Also note that visualization can help creating hand-written rules but is limited to 2 dimensions (maybe 3 dimensions), whereas machine learning models can build models in high-dimensional spaces. <div class="admonition note alert alert-info"> <p class="first admonition-title" style="font-weight: bold;">Note</p> <p class="last">In a machine-learning setting, a model automatically creates the "rules" from the data in order to make predictions on new unseen data.</p> </div> Another thing worth mentioning in this plot: if you are young (less than 25 year-old roughly) or old (more than 70 year-old roughly) you tend to work less. This is a non-linear relationship between age and hours per week. Linear machine learning models can only capture linear interactions, so this may be a factor when deciding which model to chose. ## An example of machine learning model decision rules The plot below shows the rules of a simple model, called decision tree. This model has been trained using the `age` and `hours-per-week` features, so that we can have a nice graphical representation of its decision rules in two dimensions. We will explain how this model works in a later notebook, for now let us just consider the model predictions when trained on this dataset: ![](../figures/simple_decision_tree_adult_census.png) The data points (circles) show the distribution of `hours-per-week` and `age` in the dataset. Blue points mean `low-income` and orange points mean `high-income`. This part of the plot is the same as the bottom-left plot in the pairplot above. What is new in this plot is that we have added the model decision rules as background colors. The background color in each area represents the probability of the class `high-income` as estimated by the model. Values towards 0 (dark blue) indicates that the model predicts `low-income` with a high probability. Values towards 1 (dark orange) indicates that the model predicts `high-income` with a high probability. Values towards 0.5 (white) indicates that the model is not very sure about its prediction. Looking at the plot, here is what we can gather: * In the region `age < 28.5` (left region) the prediction is `low-income`. The dark blue color indicates that the model is quite sure about its prediction. * In the region `age > 28.5 AND hours-per-week < 40.5` (bottom-right region), the prediction is `low-income`. Note that the blue is a bit lighter that for the left region which means that the algorithm is not as certain in this region. * In the region `age > 28.5 AND hours-per-week > 40.5` (top-right region), the prediction is `low-income`. However the probability of the class `low-income` is very close to 0.5 which means the model is not sure at all about its prediction. It is interesting to see that a simple model creates rules similar to the ones that we could have created by hand. Note that machine learning is really interesting when creating rules by hand is not straightforward, for example because we are in high dimension (many features) or because there are no simple and obvious rules that separate the two classes as in the top-right region In this notebook we have: * loaded the data from a CSV file using `pandas`; * looked at the different kind of variables to differentiate between categorical and numerical variables; * inspected the data with `pandas` and `seaborn`. Data inspection can allow you to decide whether using machine learning is appropriate for your data and to highlight potential peculiarities in your data. Ideas which will be discussed more in details later: * if your target variable is imbalanced (e.g., you have more samples from one target category than another), you may need special techniques for training and evaluating your machine learning model; * having redundant (or highly correlated) columns can be a problem for some machine learning algorithms; * contrary to decision tree, linear models can only capture linear interaction, so be aware of non-linear relationships in your data.
github_jupyter
### Import necessary libraries, set options ``` import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import patsy import seaborn as sns import statsmodels.api as sm import warnings from statsmodels.formula.api import glm pd.set_option('display.max_columns', 125) warnings.filterwarnings("ignore") ``` ### Read in pickled datasets ``` path_to_diss = os.path.join("path/to/dissertation/chapter") path_to_data = os.path.join(path_to_diss, "path/to/data/processed-data") ``` ##### Between-individual ``` data_final = pd.read_pickle(path_to_data + "/pkl/data_final.pkl") print(len(data_final)) data_final.head() data_final_coop = pd.read_pickle(path_to_data + "/pkl/data_final_coop.pkl") print(len(data_final_coop)) data_final_coop.head() data_pid_version = pd.read_pickle(path_to_data + "/pkl/data_pid_version.pkl") print(len(data_pid_version)) data_pid_version.head() data_pid = pd.read_pickle(path_to_data + "/pkl/data_pid.pkl") print(len(data_pid)) data_pid.head() ``` ##### Between-session ``` session_data = pd.read_pickle(path_to_data + "/pkl/session_data.pkl") print(len(session_data)) session_data.head() session_data_coop = pd.read_pickle(path_to_data + "/pkl/session_data_coop.pkl") print(len(session_data_coop)) session_data_coop.head() session_data_pid_version = pd.read_pickle(path_to_data + "/pkl/session_data_pid_version.pkl") print(len(session_data_pid_version)) session_data_pid_version.head() ``` ##### Within-individual ``` data_within = pd.read_pickle(path_to_data + "/pkl/data_within.pkl") print(len(data_within)) data_within.head() data_within_coop = pd.read_pickle(path_to_data + "/pkl/data_within_coop.pkl") print(len(data_within_coop)) data_within_coop.head() data_within_pid = pd.read_pickle(path_to_data + "/pkl/data_within_pid.pkl") print(len(data_within_pid)) data_within_pid.head() ``` ##### Within-session ``` session_data_within = pd.read_pickle(path_to_data + "/pkl/session_data_within.pkl") print(len(session_data_within)) session_data_within.head() session_data_within_coop = pd.read_pickle(path_to_data + "/pkl/session_data_within_coop.pkl") print(len(session_data_within_coop)) session_data_within_coop.head() session_data_within_pid = pd.read_pickle(path_to_data + "/pkl/session_data_within_pid.pkl") print(len(session_data_within_pid)) session_data_within_pid.head() ``` ### Generate within-subjects interaction term ``` data_within_pid['game1_int'] = data_within_pid['earned_v1'] * data_within_pid['equal_v1'] data_within_pid['game2_int'] = data_within_pid['earned_v2'] * data_within_pid['equal_v2'] data_within_pid['delta_int'] = data_within_pid['game2_int'] - data_within_pid['game1_int'] data_within_coop['game1_int'] = data_within_coop['earned_v1'] * data_within_coop['equal_v1'] data_within_coop['game2_int'] = data_within_coop['earned_v2'] * data_within_coop['equal_v2'] data_within_coop['delta_int'] = data_within_coop['game2_int'] - data_within_coop['game1_int'] session_data_within_pid['game1_int'] = session_data_within_pid['earned1'] * session_data_within_pid['equal1'] session_data_within_pid['game2_int'] = session_data_within_pid['earned2'] * session_data_within_pid['equal2'] session_data_within_pid['delta_int'] = session_data_within_pid['game2_int'] - session_data_within_pid['game1_int'] session_data_within_coop['game1_int'] = session_data_within_coop['earned1'] * session_data_within_coop['equal1'] session_data_within_coop['game2_int'] = session_data_within_coop['earned2'] * session_data_within_coop['equal2'] session_data_within_coop['delta_int'] = session_data_within_coop['game2_int'] - session_data_within_coop['game1_int'] ``` ### Fairness ~ conditions ##### Overview ``` print(np.mean(data_pid_version.f_score)) print() print(data_pid_version.groupby('version')['f_score'].mean()) print() print(data_pid_version.groupby('condition')['f_score'].mean()) print() print(data_pid_version.groupby(['version', 'condition'])['f_score'].mean()) ``` ##### Figures ``` fig, ax = plt.subplots(1, 3, figsize = (15, 5)) for i in range(3): if i == 0: temp = data_pid_version[data_pid_version['version'] == 1].groupby('condition')['f_score'].mean().reset_index() ax[i].set_title("First game only") elif i == 1: temp = data_pid_version[data_pid_version['version'] == 2].groupby('condition')['f_score'].mean().reset_index() ax[i].set_title("Second game only") else: temp = data_pid_version.groupby('condition')['f_score'].mean().reset_index() ax[i].set_title("Pooled") groupedvalues = temp.sort_values(by = ['f_score']).reset_index(drop = True) #pal = sns.color_palette("Reds_d", len(groupedvalues)) #rank = groupedvalues["f_score"].argsort().argsort() #sns.barplot(x='condition', y='f_score', data=groupedvalues, palette=np.array(pal[::-1])[rank], ax=ax[i]) clrs = ['tomato', 'royalblue', 'limegreen', 'orange'] sns.barplot(x = 'condition', y = 'f_score', data = groupedvalues, palette = clrs, ax = ax[i]) for index, row in groupedvalues.iterrows(): ax[i].text(row.name, row.f_score, round(row.f_score, 2), color = 'black', ha = "center") ax[i].set(xlabel = '', ylabel = 'Average fairness score') ax[i].set_xticklabels(['RU', 'EE', 'RE', 'EU']) ax[i].set(ylim=(0, 6)) fig.savefig(os.path.join(path_to_diss, "paper/figures/appendices/figureA5.1.png"), bbox_inches = 'tight', pad_inches = 0.25) ``` ##### Pairwise fairness and preference comparisons ``` dic_more_fair = { 'ee_vs_re': [], 'ee_vs_eu': [], 'ee_vs_ru': [], 're_vs_eu': [], 're_vs_ru': [], 'eu_vs_ru': [] } dic_prefer = { 'ee_vs_re': [], 'ee_vs_eu': [], 'ee_vs_ru': [], 're_vs_eu': [], 're_vs_ru': [], 'eu_vs_ru': [] } for index, row in data_pid.iterrows(): if (row["earned1"] == 1 and row["equal1"] == 1 and row["earned2"] == 0 and row["equal2"] == 1) or \ (row["earned1"] == 0 and row["equal1"] == 1 and row["earned2"] == 1 and row["equal2"] == 1): dic_more_fair['ee_vs_re'].append(row['more_fair']) dic_prefer['ee_vs_re'].append(row['prefer']) elif (row["earned1"] == 1 and row["equal1"] == 1 and row["earned2"] == 1 and row["equal2"] == 0) or \ (row["earned1"] == 1 and row["equal1"] == 0 and row["earned2"] == 1 and row["equal2"] == 1): dic_more_fair['ee_vs_eu'].append(row['more_fair']) dic_prefer['ee_vs_eu'].append(row['prefer']) elif (row["earned1"] == 1 and row["equal1"] == 1 and row["earned2"] == 0 and row["equal2"] == 0) or \ (row["earned1"] == 0 and row["equal1"] == 0 and row["earned2"] == 1 and row["equal2"] == 1): dic_more_fair['ee_vs_ru'].append(row['more_fair']) dic_prefer['ee_vs_ru'].append(row['prefer']) elif (row["earned1"] == 0 and row["equal1"] == 1 and row["earned2"] == 1 and row["equal2"] == 0) or \ (row["earned1"] == 1 and row["equal1"] == 0 and row["earned2"] == 0 and row["equal2"] == 1): dic_more_fair['re_vs_eu'].append(row['more_fair']) dic_prefer['re_vs_eu'].append(row['prefer']) elif (row["earned1"] == 0 and row["equal1"] == 1 and row["earned2"] == 0 and row["equal2"] == 0) or \ (row["earned1"] == 0 and row["equal1"] == 0 and row["earned2"] == 0 and row["equal2"] == 1): dic_more_fair['re_vs_ru'].append(row['more_fair']) dic_prefer['re_vs_ru'].append(row['prefer']) elif (row["earned1"] == 1 and row["equal1"] == 0 and row["earned2"] == 0 and row["equal2"] == 0) or \ (row["earned1"] == 0 and row["equal1"] == 0 and row["earned2"] == 1 and row["equal2"] == 0): dic_more_fair['eu_vs_ru'].append(row['more_fair']) dic_prefer['eu_vs_ru'].append(row['prefer']) for key in dic_more_fair.keys(): temp_dict = {} for condition in np.unique(dic_more_fair[key]): if condition != '': temp_dict[condition] = [dic_more_fair[key].count(condition)] dic_more_fair[key] = temp_dict tot = 0 for key2 in dic_more_fair[key].keys(): tot += dic_more_fair[key][key2][0] for key2 in dic_more_fair[key].keys(): raw_count = dic_more_fair[key][key2][0] perc = round(raw_count / tot * 100, 2) dic_more_fair[key][key2].append(perc) dic_more_fair for key in dic_prefer.keys(): temp_dict = {} for condition in np.unique(dic_prefer[key]): if condition != '': temp_dict[condition] = [dic_prefer[key].count(condition)] dic_prefer[key] = temp_dict tot = 0 for key2 in dic_prefer[key].keys(): tot += dic_prefer[key][key2][0] for key2 in dic_prefer[key].keys(): raw_count = dic_prefer[key][key2][0] perc = round(raw_count / tot * 100, 2) dic_prefer[key][key2].append(perc) dic_prefer ``` ##### Between-individual ``` # Between first only y, X = patsy.dmatrices( 'f_score ~ earned * equal', data_pid_version[data_pid_version['version'] == 1], return_type = 'dataframe' ) session_c = data_pid_version[(data_pid_version['version'] == 1) & (data_pid_version['version'] == data_pid_version['version']) & (data_pid_version['f_score'] == data_pid_version['f_score']) & (data_pid_version['earned'] == data_pid_version['earned']) & (data_pid_version['equal'] == data_pid_version['equal'])]['session_no'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary() # Between second only y, X = patsy.dmatrices( 'f_score ~ earned * equal', data_pid_version[data_pid_version['version'] == 2], return_type = 'dataframe' ) session_c = data_pid_version[(data_pid_version['version'] == 2) & (data_pid_version['version'] == data_pid_version['version']) & (data_pid_version['f_score'] == data_pid_version['f_score']) & (data_pid_version['earned'] == data_pid_version['earned']) & (data_pid_version['equal'] == data_pid_version['equal'])]['session_no'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary() # Between pooled y, X = patsy.dmatrices( 'f_score ~ earned * equal + C(version)', data_pid_version, return_type = 'dataframe' ) session_c = data_pid_version[(data_pid_version['version'] == data_pid_version['version']) & (data_pid_version['f_score'] == data_pid_version['f_score']) & (data_pid_version['earned'] == data_pid_version['earned']) & (data_pid_version['equal'] == data_pid_version['equal'])]['session_no'] pid_c = data_pid_version[(data_pid_version['version'] == data_pid_version['version']) & (data_pid_version['f_score'] == data_pid_version['f_score']) & (data_pid_version['earned'] == data_pid_version['earned']) & (data_pid_version['equal'] == data_pid_version['equal'])]['pid'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary() # Between version interaction y, X = patsy.dmatrices( 'f_score ~ earned * equal * C(version)', data_pid_version, return_type = 'dataframe' ) session_c = data_pid_version[(data_pid_version['version'] == data_pid_version['version']) & (data_pid_version['f_score'] == data_pid_version['f_score']) & (data_pid_version['earned'] == data_pid_version['earned']) & (data_pid_version['equal'] == data_pid_version['equal'])]['session_no'] pid_c = data_pid_version[(data_pid_version['version'] == data_pid_version['version']) & (data_pid_version['f_score'] == data_pid_version['f_score']) & (data_pid_version['earned'] == data_pid_version['earned']) & (data_pid_version['equal'] == data_pid_version['equal'])]['pid'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary() # Between first only + change_in_score y, X = patsy.dmatrices( 'f_score ~ earned * equal + change_in_score', data_pid_version[data_pid_version['version'] == 1], return_type = 'dataframe' ) session_c = data_pid_version[(data_pid_version['version'] == 1) & (data_pid_version['change_in_score'] == data_pid_version['change_in_score']) & (data_pid_version['version'] == data_pid_version['version']) & (data_pid_version['f_score'] == data_pid_version['f_score']) & (data_pid_version['earned'] == data_pid_version['earned']) & (data_pid_version['equal'] == data_pid_version['equal'])]['session_no'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary() # Between second only + change_in_score y, X = patsy.dmatrices( 'f_score ~ earned * equal + change_in_score', data_pid_version[data_pid_version['version'] == 2], return_type = 'dataframe' ) session_c = data_pid_version[(data_pid_version['version'] == 2) & (data_pid_version['change_in_score'] == data_pid_version['change_in_score']) & (data_pid_version['version'] == data_pid_version['version']) & (data_pid_version['f_score'] == data_pid_version['f_score']) & (data_pid_version['earned'] == data_pid_version['earned']) & (data_pid_version['equal'] == data_pid_version['equal'])]['session_no'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary() # Between pooled + change_in_score y, X = patsy.dmatrices( 'f_score ~ earned * equal + C(version) + change_in_score', data_pid_version, return_type = 'dataframe' ) session_c = data_pid_version[(data_pid_version['change_in_score'] == data_pid_version['change_in_score']) & (data_pid_version['version'] == data_pid_version['version']) & (data_pid_version['f_score'] == data_pid_version['f_score']) & (data_pid_version['earned'] == data_pid_version['earned']) & (data_pid_version['equal'] == data_pid_version['equal'])]['session_no'] pid_c = data_pid_version[(data_pid_version['change_in_score'] == data_pid_version['change_in_score']) & (data_pid_version['version'] == data_pid_version['version']) & (data_pid_version['f_score'] == data_pid_version['f_score']) & (data_pid_version['earned'] == data_pid_version['earned']) & (data_pid_version['equal'] == data_pid_version['equal'])]['pid'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary() # Between version interaction + change_in_score y, X = patsy.dmatrices( 'f_score ~ earned * equal * C(version) + change_in_score', data_pid_version, return_type = 'dataframe' ) session_c = data_pid_version[(data_pid_version['change_in_score'] == data_pid_version['change_in_score']) & (data_pid_version['version'] == data_pid_version['version']) & (data_pid_version['f_score'] == data_pid_version['f_score']) & (data_pid_version['earned'] == data_pid_version['earned']) & (data_pid_version['equal'] == data_pid_version['equal'])]['session_no'] pid_c = data_pid_version[(data_pid_version['change_in_score'] == data_pid_version['change_in_score']) & (data_pid_version['version'] == data_pid_version['version']) & (data_pid_version['f_score'] == data_pid_version['f_score']) & (data_pid_version['earned'] == data_pid_version['earned']) & (data_pid_version['equal'] == data_pid_version['equal'])]['pid'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary() ``` ##### Between-session ``` y, X = patsy.dmatrices( 'f_score ~ earned * equal', session_data_pid_version[session_data_pid_version['version'] == 1], return_type = 'dataframe' ) ols = sm.OLS(y, X) ols.fit().summary() y, X = patsy.dmatrices( 'f_score ~ earned * equal', session_data_pid_version[session_data_pid_version['version'] == 2], return_type = 'dataframe' ) ols = sm.OLS(y, X) ols.fit().summary() y, X = patsy.dmatrices( 'f_score ~ earned * equal + C(version)', session_data_pid_version, return_type = 'dataframe' ) session_c = session_data_pid_version[ (session_data_pid_version['version'] == session_data_pid_version['version']) & (session_data_pid_version['f_score'] == session_data_pid_version['f_score']) & (session_data_pid_version['earned'] == session_data_pid_version['earned']) & (session_data_pid_version['equal'] == session_data_pid_version['equal'])]['session_no'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary() y, X = patsy.dmatrices( 'f_score ~ earned * equal * C(version)', session_data_pid_version, return_type = 'dataframe' ) session_c = session_data_pid_version[ (session_data_pid_version['version'] == session_data_pid_version['version']) & (session_data_pid_version['f_score'] == session_data_pid_version['f_score']) & (session_data_pid_version['earned'] == session_data_pid_version['earned']) & (session_data_pid_version['equal'] == session_data_pid_version['equal'])]['session_no'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary() ``` ##### Within-individual ``` # y, X = patsy.dmatrices( # 'delta_f_score ~ delta_earned + delta_equal + delta_int', # # 'delta_f_score ~ delta_earned * delta_equal' # data_within_pid, # return_type = 'dataframe' # ) # session_c = data_within_pid[(data_within_pid['delta_f_score'] == data_within_pid['delta_f_score']) & # (data_within_pid['delta_earned'] == data_within_pid['delta_earned']) & # (data_within_pid['delta_equal'] == data_within_pid['delta_equal'])]['session_no'] # ols = sm.OLS(y, X) # ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary() temp = data_within_pid.copy() temp['delta_earned'] = temp['delta_earned'].apply(lambda row: 2 if row == -1 else row) temp['delta_equal'] = temp['delta_equal'].apply(lambda row: 2 if row == -1 else row) y, X = patsy.dmatrices( 'delta_f_score ~ C(delta_earned) * C(delta_equal)', temp, return_type = 'dataframe' ) session_c = temp[(temp['delta_f_score'] == temp['delta_f_score']) & (temp['delta_earned'] == temp['delta_earned']) & (temp['delta_equal'] == temp['delta_equal'])]['session_no'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary() y, X = patsy.dmatrices( 'delta_f_score ~ delta_earned + delta_equal + delta_int + delta_change_in_score', # 'delta_f_score ~ delta_earned * delta_equal + delta_change_in_score' data_within_pid, return_type = 'dataframe' ) session_c = data_within_pid[ (data_within_pid['delta_change_in_score'] == data_within_pid['delta_change_in_score']) & (data_within_pid['delta_f_score'] == data_within_pid['delta_f_score']) & (data_within_pid['delta_earned'] == data_within_pid['delta_earned']) & (data_within_pid['delta_equal'] == data_within_pid['delta_equal'])]['session_no'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary() ``` ##### Within-session ``` y, X = patsy.dmatrices( 'delta_f_score ~ delta_earned + delta_equal + delta_int', # 'delta_f_score ~ delta_earned * delta_equal' session_data_within_pid, return_type = 'dataframe' ) ols = sm.OLS(y, X) ols.fit().summary() temp = session_data_within_pid.copy() temp['delta_earned'] = temp['delta_earned'].apply(lambda row: 2 if row == -1 else row) temp['delta_equal'] = temp['delta_equal'].apply(lambda row: 2 if row == -1 else row) y, X = patsy.dmatrices( 'delta_f_score ~ C(delta_earned) * C(delta_equal)', temp, return_type = 'dataframe' ) ols = sm.OLS(y, X) ols.fit().summary() ``` ### Cooperation ~ conditions ##### Overview ``` print(np.mean(data_final_coop.coopChoice)) print() print(data_final_coop.groupby('version')['coopChoice'].mean()) print() print(data_final_coop.groupby('round')['coopChoice'].mean()) print() print(data_final_coop.groupby('condition')['coopChoice'].mean()) print() print(data_final_coop.groupby(['version', 'condition'])['coopChoice'].mean()) ``` ##### Figures ``` data_final_coop.groupby(['round', 'version']).mean()['coopChoice'].unstack().plot(style = ['-', '--'], color = 'black') plt.xlabel('Round') plt.ylabel('Average Cooperation') plt.title('Average Cooperation by Round') plt.legend(['First game', 'Second game']) plt.xticks(np.arange(1, 11, step=1)) plt.savefig(os.path.join(path_to_diss, "paper/figures/appendices/figureA5.2.png")) plt.show() fig, ax = plt.subplots(1, 3, figsize = (15, 5)) clrs = ['royalblue', 'orange', 'limegreen', 'tomato'] for i in range(3): if i == 0: temp = data_final_coop[ data_final_coop['version'] == 1].groupby(['round', 'condition']).mean()['coopChoice'].unstack().plot(color = clrs, ax = ax[i]) ax[i].set_title('First game only') ax[i].set_xticks(np.arange(1, 10, step = 1)) elif i == 1: temp = data_final_coop[ data_final_coop['version'] == 2].groupby(['round', 'condition']).mean()['coopChoice'].unstack().plot(color = clrs, ax = ax[i]) ax[i].set_title('Second game only') ax[i].set_xticks(np.arange(1, 11, step = 1)) else: temp = data_final_coop[ data_final_coop['round'] != 10].groupby(['round', 'condition']).mean()['coopChoice'].unstack().plot(color = clrs, ax = ax[i]) ax[i].set_title('Pooled') ax[i].set_xticks(np.arange(1, 10, step = 1)) ax[i].set_xlabel('Round') ax[i].set_ylabel('Average Cooperation') ax[i].set_yticks(np.arange(0.35, 0.85, step = 0.05)) ax[i].legend(['EE', 'EU', 'RE', 'RU']) fig.savefig(os.path.join(path_to_diss, "paper/figures/appendices/figureA5.3.png"), bbox_inches = 'tight', pad_inches = 0.25) data = {'Experimental condition': ['RU to RU', 'RU to EE', 'RU to RE', 'RU to EU', 'EE to RU', 'EE to EE', 'EE to RE', 'EE to EU', 'RE to RU', 'RE to EE', 'RE to RE', 'RE to EU', 'EU to RU', 'EU to EE', 'EU to RE', 'EU to EU'], 'Change in fairness score': [ 0.4393, 0.2451, 0.4959, 1.5124, -0.1759, 0.0374, 0.2162, 1.4580, -0.3945, -0.4087, 0.5403, 0.7417, -1.2234, -1.0642, -1.0096, -0.2833], 'Change in cooperation': [ 0.0332, -0.0404, -0.0230, -0.0192, 0.0510, -0.0232, -0.0014, -0.0266, 0.0103, -0.0888, 0.0175, -0.0174, 0.0016, 0.0674, 0.0588, 0.0002]} df = pd.DataFrame(data) df.columns = ['g', 'x', 'y'] x = list(df['x']) y = list(df['y']) p1 = np.polyfit(x, y, 1) plt.rcParams["figure.figsize"] = [10, 8] plt.xlim(-1.7, 1.7) plt.ylim(-0.095, 0.095) plt.xlabel('Change in perceived fairness score') plt.ylabel('Change in probability of cooperating') plt.scatter(x, y) xlims = plt.xlim() x.insert(0, xlims[0]) y.insert(0, np.polyval(p1, xlims[0])) x.append(xlims[1]) y.append(np.polyval(p1, xlims[1])) plt.plot(x, np.polyval(p1,x), 'r-', linewidth = 1.5) plt.xlim(xlims) for line in range(0, df.shape[0]): plt.text(df.x[line]-0.1, df.y[line]+0.0025, df.g[line], horizontalalignment='left', size='small', color='black') plt.savefig(os.path.join(path_to_diss, "paper/figures/appendices/figureA4.1.png")) plt.show() ``` ##### Cooperation history ``` def get_coop_history(row): return list(row)[1:] df_wide_v1 = data_final_coop[data_final_coop['version'] == 1][ ['pid', 'round', 'coopChoice'] ].pivot(index = 'pid', columns = 'round', values = 'coopChoice').reset_index().rename_axis(None, axis = 1) df_wide_v1.head() df_wide_v2 = data_final_coop[data_final_coop['version'] == 2][ ['pid', 'round', 'coopChoice'] ].pivot(index = 'pid', columns = 'round', values = 'coopChoice').reset_index().rename_axis(None, axis = 1) df_wide_v2.head() df_wide_v1['coop_history'] = df_wide_v1.apply(get_coop_history, axis = 1) df_wide_v1.head() df_wide_v2['coop_history'] = df_wide_v2.apply(get_coop_history, axis = 1) df_wide_v2.head() d1 = {} for ch in df_wide_v1['coop_history']: if str(ch) in d1.keys(): d1[str(ch)] += 1 else: d1[str(ch)] = 1 d1 d2 = {} for ch in df_wide_v2['coop_history']: if str(ch) in d2.keys(): d2[str(ch)] += 1 else: d2[str(ch)] = 1 d2 ni1 = 0 o1 = 0 for ch in df_wide_v1['coop_history']: if sorted(ch, reverse = True) == ch: ni1 += 1 else: o1 += 1 ni1/(ni1+o1) ni2 = 0 o2 = 0 for ch in df_wide_v2['coop_history']: if sorted(ch, reverse = True) == ch: ni2 += 1 else: o2 += 1 ni2/(ni2+o2) ``` ##### Trust as a predictor of cooperation in the second game ``` temp = data_final_coop.groupby(['session_no', 'version']).mean().reset_index()[['session_no', 'version', 'coopChoice', 'trust_score']] temp1 = temp[temp['version'] == 1] temp2 = temp[temp['version'] == 2] temp = temp1.merge(right = temp2, how = "inner", on = ["session_no"], suffixes = ("_v1", "_v2")) temp.head() y, X = patsy.dmatrices( 'coopChoice_v2 ~ trust_score_v1 + coopChoice_v1', temp, return_type = 'dataframe' ) ols = sm.OLS(y, X) ols.fit().summary() y, X = patsy.dmatrices( 'trust_score_v1 ~ coopChoice_v1', temp, return_type = 'dataframe' ) ols = sm.OLS(y, X) ols.fit().summary() np.corrcoef(temp['trust_score_v1'], temp['coopChoice_v1']) ``` ##### Average number of neighbors ``` def count_num_neighbors(row): return len(row['neighborsList']) np.mean(data_final_coop.apply(count_num_neighbors, axis = 1)) ``` ##### Average game scores ``` print(np.mean(data_pid['score1'])) print(np.mean(data_pid['score2'])) print(np.mean(data_pid['score3'])) ``` ##### Why people cooperate ``` dic_why = { "altruism": 0, "encourage": 0, "choseA": 0, "choseB": 0, "equal": 0, "more": 0, "less": 0, "fair": 0, "notfair": 0, "other": 0, "other_text": [] } for index, row in data_pid.iterrows(): if row["why_coop_list"] == row["why_coop_list"]: if "other" in list(row["why_coop_list"]): dic_why["other_text"].append(row["why_coop_other"]) for e in row["why_coop_list"]: dic_why[e] += 1 dict((key,value) for key, value in dic_why.items() if key != "other_text") print(len(dic_why["other_text"]) == dic_why["other"]) dic_why["other_text"] ``` ##### Between-individual (using Logit here but same conclusions with OLS) ``` y, X = patsy.dmatrices( 'coopChoice ~ earned * equal + C(round)', data_final_coop[data_final_coop['version'] == 1], return_type = 'dataframe' ) session_c = data_final_coop[(data_final_coop['version'] == 1) & (data_final_coop['version'] == data_final_coop['version']) & (data_final_coop['round'] == data_final_coop['round']) & (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) & (data_final_coop['earned'] == data_final_coop['earned']) & (data_final_coop['equal'] == data_final_coop['equal'])]['session_no'] pid_c = data_final_coop[(data_final_coop['version'] == 1) & (data_final_coop['version'] == data_final_coop['version']) & (data_final_coop['round'] == data_final_coop['round']) & (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) & (data_final_coop['earned'] == data_final_coop['earned']) & (data_final_coop['equal'] == data_final_coop['equal'])]['pid'] logit = sm.Logit(y, X) logit.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary() y, X = patsy.dmatrices( 'coopChoice ~ earned * equal + C(round)', data_final_coop[data_final_coop['version'] == 2], return_type = 'dataframe' ) session_c = data_final_coop[(data_final_coop['version'] == 2) & (data_final_coop['version'] == data_final_coop['version']) & (data_final_coop['round'] == data_final_coop['round']) & (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) & (data_final_coop['earned'] == data_final_coop['earned']) & (data_final_coop['equal'] == data_final_coop['equal'])]['session_no'] pid_c = data_final_coop[(data_final_coop['version'] == 2) & (data_final_coop['version'] == data_final_coop['version']) & (data_final_coop['round'] == data_final_coop['round']) & (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) & (data_final_coop['earned'] == data_final_coop['earned']) & (data_final_coop['equal'] == data_final_coop['equal'])]['pid'] logit = sm.Logit(y, X) logit.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary() y, X = patsy.dmatrices( 'coopChoice ~ earned * equal + C(version) + C(round)', data_final_coop[data_final_coop['round'] != 10], return_type = 'dataframe' ) session_c = data_final_coop[(data_final_coop['round'] != 10) & (data_final_coop['version'] == data_final_coop['version']) & (data_final_coop['round'] == data_final_coop['round']) & (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) & (data_final_coop['earned'] == data_final_coop['earned']) & (data_final_coop['equal'] == data_final_coop['equal'])]['session_no'] pid_c = data_final_coop[(data_final_coop['round'] != 10) & (data_final_coop['version'] == data_final_coop['version']) & (data_final_coop['round'] == data_final_coop['round']) & (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) & (data_final_coop['earned'] == data_final_coop['earned']) & (data_final_coop['equal'] == data_final_coop['equal'])]['pid'] logit = sm.Logit(y, X) logit.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary() y, X = patsy.dmatrices( 'coopChoice ~ earned * equal * C(version) + C(round)', data_final_coop[data_final_coop['round'] != 10], return_type = 'dataframe' ) session_c = data_final_coop[(data_final_coop['round'] != 10) & (data_final_coop['version'] == data_final_coop['version']) & (data_final_coop['round'] == data_final_coop['round']) & (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) & (data_final_coop['earned'] == data_final_coop['earned']) & (data_final_coop['equal'] == data_final_coop['equal'])]['session_no'] pid_c = data_final_coop[(data_final_coop['round'] != 10) & (data_final_coop['version'] == data_final_coop['version']) & (data_final_coop['round'] == data_final_coop['round']) & (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) & (data_final_coop['earned'] == data_final_coop['earned']) & (data_final_coop['equal'] == data_final_coop['equal'])]['pid'] logit = sm.Logit(y, X) logit.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary() y, X = patsy.dmatrices( 'coopChoice ~ earned * equal + C(round) + scoreBeforeCoop', data_final_coop[data_final_coop['version'] == 1], return_type = 'dataframe' ) session_c = data_final_coop[(data_final_coop['version'] == 1) & (data_final_coop['version'] == data_final_coop['version']) & (data_final_coop['round'] == data_final_coop['round']) & (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) & (data_final_coop['earned'] == data_final_coop['earned']) & (data_final_coop['equal'] == data_final_coop['equal'])]['session_no'] pid_c = data_final_coop[(data_final_coop['version'] == 1) & (data_final_coop['version'] == data_final_coop['version']) & (data_final_coop['round'] == data_final_coop['round']) & (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) & (data_final_coop['earned'] == data_final_coop['earned']) & (data_final_coop['equal'] == data_final_coop['equal'])]['pid'] logit = sm.Logit(y, X) logit.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary() y, X = patsy.dmatrices( 'coopChoice ~ earned * equal + C(round) + scoreBeforeCoop', data_final_coop[data_final_coop['version'] == 2], return_type = 'dataframe' ) session_c = data_final_coop[(data_final_coop['version'] == 2) & (data_final_coop['version'] == data_final_coop['version']) & (data_final_coop['round'] == data_final_coop['round']) & (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) & (data_final_coop['earned'] == data_final_coop['earned']) & (data_final_coop['equal'] == data_final_coop['equal'])]['session_no'] pid_c = data_final_coop[(data_final_coop['version'] == 2) & (data_final_coop['version'] == data_final_coop['version']) & (data_final_coop['round'] == data_final_coop['round']) & (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) & (data_final_coop['earned'] == data_final_coop['earned']) & (data_final_coop['equal'] == data_final_coop['equal'])]['pid'] logit = sm.Logit(y, X) logit.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary() y, X = patsy.dmatrices( 'coopChoice ~ earned * equal + C(version) + C(round) + scoreBeforeCoop', data_final_coop, return_type = 'dataframe' ) session_c = data_final_coop[(data_final_coop['scoreBeforeCoop'] == data_final_coop['scoreBeforeCoop']) & (data_final_coop['version'] == data_final_coop['version']) & (data_final_coop['round'] == data_final_coop['round']) & (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) & (data_final_coop['earned'] == data_final_coop['earned']) & (data_final_coop['equal'] == data_final_coop['equal'])]['session_no'] pid_c = data_final_coop[(data_final_coop['scoreBeforeCoop'] == data_final_coop['scoreBeforeCoop']) & (data_final_coop['version'] == data_final_coop['version']) & (data_final_coop['round'] == data_final_coop['round']) & (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) & (data_final_coop['earned'] == data_final_coop['earned']) & (data_final_coop['equal'] == data_final_coop['equal'])]['pid'] logit = sm.Logit(y, X) logit.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary() y, X = patsy.dmatrices( 'coopChoice ~ earned * equal * C(version) + C(round) + scoreBeforeCoop', data_final_coop, return_type = 'dataframe' ) session_c = data_final_coop[(data_final_coop['scoreBeforeCoop'] == data_final_coop['scoreBeforeCoop']) & (data_final_coop['version'] == data_final_coop['version']) & (data_final_coop['round'] == data_final_coop['round']) & (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) & (data_final_coop['earned'] == data_final_coop['earned']) & (data_final_coop['equal'] == data_final_coop['equal'])]['session_no'] pid_c = data_final_coop[(data_final_coop['scoreBeforeCoop'] == data_final_coop['scoreBeforeCoop']) & (data_final_coop['version'] == data_final_coop['version']) & (data_final_coop['round'] == data_final_coop['round']) & (data_final_coop['coopChoice'] == data_final_coop['coopChoice']) & (data_final_coop['earned'] == data_final_coop['earned']) & (data_final_coop['equal'] == data_final_coop['equal'])]['pid'] logit = sm.Logit(y, X) logit.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary() ``` ##### Between-session ``` y, X = patsy.dmatrices( 'coopChoice ~ earned * equal + C(round)', session_data_coop[session_data_coop['version'] == 1], return_type = 'dataframe' ) session_c = session_data_coop[(session_data_coop['version'] == 1) & (session_data_coop['version'] == session_data_coop['version']) & (session_data_coop['round'] == session_data_coop['round']) & (session_data_coop['coopChoice'] == session_data_coop['coopChoice']) & (session_data_coop['earned'] == session_data_coop['earned']) & (session_data_coop['equal'] == session_data_coop['equal'])]['session_no'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary() y, X = patsy.dmatrices( 'coopChoice ~ earned * equal + C(version) + C(round)', session_data_coop[session_data_coop['version'] == 2], return_type = 'dataframe' ) session_c = session_data_coop[(session_data_coop['version'] == 2) & (session_data_coop['version'] == session_data_coop['version']) & (session_data_coop['round'] == session_data_coop['round']) & (session_data_coop['coopChoice'] == session_data_coop['coopChoice']) & (session_data_coop['earned'] == session_data_coop['earned']) & (session_data_coop['equal'] == session_data_coop['equal'])]['session_no'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary() y, X = patsy.dmatrices( 'coopChoice ~ earned * equal + C(version) + C(round)', session_data_coop[session_data_coop['round'] != 10], return_type = 'dataframe' ) session_c = session_data_coop[(session_data_coop['round'] != 10) & (session_data_coop['version'] == session_data_coop['version']) & (session_data_coop['round'] == session_data_coop['round']) & (session_data_coop['coopChoice'] == session_data_coop['coopChoice']) & (session_data_coop['earned'] == session_data_coop['earned']) & (session_data_coop['equal'] == session_data_coop['equal'])]['session_no'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary() y, X = patsy.dmatrices( 'coopChoice ~ earned * equal * C(version) + C(round)', session_data_coop[session_data_coop['round'] != 10], return_type = 'dataframe' ) session_c = session_data_coop[(session_data_coop['round'] != 10) & (session_data_coop['version'] == session_data_coop['version']) & (session_data_coop['round'] == session_data_coop['round']) & (session_data_coop['coopChoice'] == session_data_coop['coopChoice']) & (session_data_coop['earned'] == session_data_coop['earned']) & (session_data_coop['equal'] == session_data_coop['equal'])]['session_no'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary() ``` ##### Within-individual ``` # y, X = patsy.dmatrices( # 'delta_coopChoice ~ delta_earned + delta_equal + delta_int + C(round)', # # 'delta_coopChoice ~ delta_earned * delta_equal + C(round)' # data_within_coop, # return_type = 'dataframe' # ) # session_c = data_within_coop[(data_within_coop['round'] == data_within_coop['round']) & # (data_within_coop['delta_coopChoice'] == data_within_coop['delta_coopChoice']) & # (data_within_coop['delta_earned'] == data_within_coop['delta_earned']) & # (data_within_coop['delta_equal'] == data_within_coop['delta_equal'])]['session_no'] # pid_c = data_within_coop[(data_within_coop['round'] == data_within_coop['round']) & # (data_within_coop['delta_coopChoice'] == data_within_coop['delta_coopChoice']) & # (data_within_coop['delta_earned'] == data_within_coop['delta_earned']) & # (data_within_coop['delta_equal'] == data_within_coop['delta_equal'])]['pid'] # ols = sm.OLS(y, X) # ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary() temp = data_within_coop.copy() temp['delta_earned'] = temp['delta_earned'].apply(lambda row: 2 if row == -1 else row) temp['delta_equal'] = temp['delta_equal'].apply(lambda row: 2 if row == -1 else row) y, X = patsy.dmatrices( 'delta_coopChoice ~ C(delta_earned) * C(delta_equal) + C(round)', temp, return_type = 'dataframe' ) session_c = temp[(temp['round'] == temp['round']) & (temp['delta_coopChoice'] == temp['delta_coopChoice']) & (temp['delta_earned'] == temp['delta_earned']) & (temp['delta_equal'] == temp['delta_equal'])]['session_no'] pid_c = temp[(temp['round'] == temp['round']) & (temp['delta_coopChoice'] == temp['delta_coopChoice']) & (temp['delta_earned'] == temp['delta_earned']) & (temp['delta_equal'] == temp['delta_equal'])]['pid'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary() y, X = patsy.dmatrices( 'delta_coopChoice ~ delta_earned + delta_equal + delta_int + delta_scoreBeforeCoop + num_other + C(round)', # 'delta_coopChoice ~ delta_earned * delta_equal + delta_scoreBeforeCoop + C(round)' data_within_coop, return_type = 'dataframe' ) session_c = data_within_coop[ (data_within_coop['delta_scoreBeforeCoop'] == data_within_coop['delta_scoreBeforeCoop']) & (data_within_coop['round'] == data_within_coop['round']) & (data_within_coop['delta_coopChoice'] == data_within_coop['delta_coopChoice']) & (data_within_coop['delta_earned'] == data_within_coop['delta_earned']) & (data_within_coop['delta_equal'] == data_within_coop['delta_equal']) & (data_within_coop['num_other'] == data_within_coop['num_other'])]['session_no'] pid_c = data_within_coop[ (data_within_coop['delta_scoreBeforeCoop'] == data_within_coop['delta_scoreBeforeCoop']) & (data_within_coop['round'] == data_within_coop['round']) & (data_within_coop['delta_coopChoice'] == data_within_coop['delta_coopChoice']) & (data_within_coop['delta_earned'] == data_within_coop['delta_earned']) & (data_within_coop['delta_equal'] == data_within_coop['delta_equal']) & (data_within_coop['num_other'] == data_within_coop['num_other'])]['pid'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c, pid_c]}).summary() ``` ##### Within-session ``` y, X = patsy.dmatrices( 'delta_coopChoice ~ delta_earned + delta_equal + delta_int + C(round)', # 'delta_coopChoice ~ delta_earned * delta_equal + C(round)' session_data_within_coop, return_type = 'dataframe' ) session_c = session_data_within_coop[ (session_data_within_coop['round'] == session_data_within_coop['round']) & (session_data_within_coop['delta_coopChoice'] == session_data_within_coop['delta_coopChoice']) & (session_data_within_coop['delta_earned'] == session_data_within_coop['delta_earned']) & (session_data_within_coop['delta_equal'] == session_data_within_coop['delta_equal'])]['session_no'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary() temp = session_data_within_coop.copy() temp['delta_earned'] = temp['delta_earned'].apply(lambda row: 2 if row == -1 else row) temp['delta_equal'] = temp['delta_equal'].apply(lambda row: 2 if row == -1 else row) y, X = patsy.dmatrices( 'delta_coopChoice ~ C(delta_earned) * C(delta_equal)', temp, return_type = 'dataframe' ) session_c = temp[(temp['round'] == temp['round']) & (temp['delta_coopChoice'] == temp['delta_coopChoice']) & (temp['delta_earned'] == temp['delta_earned']) & (temp['delta_equal'] == temp['delta_equal'])]['session_no'] ols = sm.OLS(y, X) ols.fit(cov_type='cluster', cov_kwds={'groups': [session_c]}).summary() ```
github_jupyter
# Indexing Dataframes ``` #a função set_index é um processo destrutivo e não mantém o index atual #se quisermos manter o index atual, precisamos manualmente criar uma nova coluna e copiá-los para ela #os valores import pandas as pd df = pd.read_csv('resources/week-1/datasets/Admission_Predict.csv', index_col=0) df.head() #vamos fazer de conta que não queremos manter o serial number como index do nosso DF mas sim o chance of admit, porém queremos #manter esse valor do serial number para usar mais tarde #fazemos isso usando o set_index para setar o index na coluna chance of admit #primeiro copiamos o index atua lpara uma nova coluna df['Serial No.'] = df.index #daí setamos o index para uma nova coluna df = df.set_index('Chance of Admit ') df.head() #quando criamos um index a aprtir de uma nova coluna, ele recebe o nome dessa nova coluna #podemos nos desfazer disso usando a função reset_index() para mover o index para uma nova coluna #e cria um index com números default #ele pega o chance of admit que tinha ficado como indexador e coloca ele numa nova coluna e cria um novo indexador df = df.reset_index() df.head() #uma coisa muito legal do pandas é a indexação multi-level que é similar Às chaves compostas nos bancos de dados relacionais #para criar um indexador multi-level simplesmente chamamos o set_index e passamos uma lista com nomes de colunas #que queremos transformar em indexadores df2 = pd.read_csv('resources/week-2/datasets/census.csv') df2.head() df2['SUMLEV'].unique() #excluindo todas as linhas que são sumários a nível estadual e manter apenas os dados do país df2 = df2[df2['SUMLEV'] == 50] #df2.head() df2.columns #vamos reduzir o dataset para mostrar apenas o estimado para a população e o número total de nascimentos #daí criamos uma lista com nomes de colunas que desejamos manter, projetá-las e então #definir o dataframe resultante para nossa variavel df2 columns_to_keep = ['STNAME', 'CTYNAME', 'BIRTHS2010', 'BIRTHS2011', 'BIRTHS2012', 'BIRTHS2013', 'BIRTHS2014', 'POPESTIMATE2010', 'POPESTIMATE2011', 'POPESTIMATE2012', 'POPESTIMATE2013', 'POPESTIMATE2014', 'POPESTIMATE2015'] df2 = df2[columns_to_keep] df2.head() #cria uma lista com as colunas que dejseamos manter e definimos a nova variavel df2 com essas colunas #o censo separa população por estimado por estado e país #podemos carregar os dados e setar o index para ser a combinação do estado e do país #e daí ver como o pandas trabalha com isso num dataframe #faremos isso criando uma lista com os identificadores de colunas que desejamos indexar. daí chamamos o set_index() #com essa lista e atribuimos o output como apropriado. #vemos acima que temos dois indexadores, o STNAME e CTYNAME df2 = df2.set_index(['STNAME', 'CTYNAME']) df2.head() ```
github_jupyter
## Preprocessing <!-- Was used to generate: <br> *preprocessed_data/cloud_cover_all_days_input_train_1.npy <br> preprocessed_data/cloud_cover_all_days_input_valid_1.npy <br> preprocessed_data/cloud_cover_all_days_output_train_1.npy <br> preprocessed_data/cloud_cover_all_days_output_valid_1.npy* --> ``` import sys import xarray as xr import numpy as np import matplotlib.pyplot as plt import pandas as pd import importlib # from sklearn.model_selection import StratifiedShuffleSplit from sklearn.preprocessing import StandardScaler from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.optimizers import Nadam from tensorflow.keras.callbacks import EarlyStopping base_path = '/pf/b/b309170' path = base_path + '/my_work/NARVAL/data_var_vertinterp/' output_path = base_path + '/my_work/icon-ml_data/cloud_cover_parameterization/grid_cell_based_v3/based_on_var_interpolated_data' model_path = "/pf/b/b309170/workspace_icon-ml/cloud_cover_parameterization/grid_cell_based_v3/saved_models" # Add path with my_classes to sys.path sys.path.insert(0, base_path + '/workspace_icon-ml/cloud_cover_parameterization/') from my_classes import write_infofile from my_classes import load_data NUM = 1 np.random.seed(NUM) ``` ## Reading the data ### Input: - fr_land: Fraction of land - zg: Geometric height at full levels - qv: Specific water vapor content - qi: Specific cloud ice content - temp: Temperature - pres: Pressure ### Output: - clc: Cloud Cover Be careful with the NARVAL file-naming convention when it comes to timestamps when adding 2D-variables. ``` # Loads the NARVAL data into the data_dict dictionary order_of_vars = ['qv', 'qi', 'temp', 'pres', 'zg', 'fr_land', 'clc'] data_dict = load_data(source='narval', days='all', vert_interp=True, order_of_vars=order_of_vars) #Reshaping into nd-arrays of equaling shapes (have timesteps x vert x hor) data_dict['zg'] = np.repeat(np.expand_dims(data_dict['zg'], 0), data_dict['qv'].shape[0], axis=0) data_dict['fr_land'] = np.repeat(np.expand_dims(data_dict['fr_land'], 0), data_dict['qv'].shape[0], axis=0) data_dict['fr_land'] = np.repeat(np.expand_dims(data_dict['fr_land'], 1), data_dict['qv'].shape[1], axis=1) assert data_dict['fr_land'].shape == data_dict['qv'].shape == data_dict['zg'].shape data_dict.keys() # Reshaping into 1D-arrays and converting dict into a DataFrame-object (the following is based on Aurelien Geron) for key in ['qv', 'qi', 'temp', 'pres', 'zg', 'fr_land', 'clc']: data_dict[key] = np.reshape(data_dict[key], -1) df = pd.DataFrame.from_dict(data_dict) df.head() ``` **Downsampling the data (minority class: clc = 0)** ``` np.max(df.loc[df['clc']>0])['zg'] df = df.loc[df['zg'] < 21000] # There are days with clc > 0 at 20500 meters df_noclc = df.loc[df['clc']==0] len(df_noclc) # We ensure that clc != 0 and clc = 0 have the same size downsample_ratio = (len(df) - len(df_noclc))/len(df_noclc) print(downsample_ratio) shuffled_indices = np.random.permutation(len(df_noclc)) set_size = int(len(df_noclc)*downsample_ratio) downsample_indices = shuffled_indices[:set_size] df = pd.concat([df_noclc.iloc[downsample_indices],df.loc[df['clc']!=0]]) ``` **Splitting the data into a learning and a test set** ``` #Splitting the data into a learning and a test set #Should we use StratifiedShuffleSplit instead to make sure that the test set is representative of the whole dataset? #E.g. define categories of specific water vapor and make sure those categories are present in the test set as well #-> Geron, p.69 def split_train_test(df, test_ratio): shuffled_indices = np.random.permutation(len(df)) test_set_size = int(len(df)*test_ratio) test_indices = shuffled_indices[:test_set_size] train_indices = shuffled_indices[test_set_size:] return df.iloc[train_indices], df.iloc[test_indices] learning_set, test_set = split_train_test(df, 0.2) print(len(learning_set), 'training samples, ', len(test_set), 'test samples') scaler = StandardScaler() #Split the training set/learning set into a training set and a validation set and rescale train_set, valid_set = split_train_test(learning_set, 0.1) if 'clc' in valid_set.columns: output_valid = valid_set['clc'] del valid_set['clc'] if 'clc' in train_set.columns: output_train = train_set['clc'] del train_set['clc'] scaler.fit(train_set) input_train = scaler.transform(train_set) input_valid = scaler.transform(valid_set) # Save and scale the test set as well if 'clc' in test_set.columns: output_test = test_set['clc'] del test_set['clc'] input_test = scaler.transform(test_set) # Save the data np.save(output_path + '/cloud_cover_all_days_input_train_%d.npy'%NUM, input_train) np.save(output_path + '/cloud_cover_all_days_input_valid_%d.npy'%NUM, input_valid) np.save(output_path + '/cloud_cover_all_days_output_train_%d.npy'%NUM, output_train) np.save(output_path + '/cloud_cover_all_days_output_valid_%d.npy'%NUM, output_valid) np.save(output_path + '/cloud_cover_all_days_input_test_%d.npy'%NUM, input_test) np.save(output_path + '/cloud_cover_all_days_output_test_%d.npy'%NUM, output_test) with open(model_path+'/scaler_%d.txt'%NUM, 'w') as file: file.write('Standard Scaler mean values:\n') file.write(str(scaler.mean_)) file.write('\nStandard Scaler standard deviation:\n') file.write(str(np.sqrt(scaler.var_))) # Write the accompanying info-file with open(model_path + '/model_grid_cell_based_v3_final_%d.txt'%NUM, 'w') as file: write_infofile(file, str(learning_set.columns), str(np.array(np.delete(learning_set.columns, 6))), model_path, output_path, NUM) ```
github_jupyter
<img src="img/python-logo-notext.svg" style="display:block;margin:auto;width:10%"/> <h1 style="text-align:center;">Python: Pandas Data Frames 1</h1> <h2 style="text-align:center;">Coding Akademie München GmbH</h2> <br/> <div style="text-align:center;">Dr. Matthias Hölzl</div> <div style="text-align:center;">Allaithy Raed</div> # Data Frames Data Frames sind die am häufigsten verwendete Datenstruktur von Pandas. Sie ermöglichen das bequeme Einlesen, Verarbeiten und Speichern von Daten. Konzeptionell besteht ein Data Frame aus mehreren `Series`-Instanzen, die einen gemeinsamen Index haben. ``` import numpy as np import pandas as pd ``` ## Erzeugen eines Data Frames ### Aus einem NumPy Array ``` def create_data_frame(): rng = np.random.default_rng(42) array = rng.normal(size=(5, 4), scale=5.0) index = 'A B C D E'.split() columns = 'w x y z'.split() return pd.DataFrame(array, index=index, columns=columns) df = create_data_frame() df type(df) ``` ### Aus einer CSV-Datei ``` df_csv = pd.read_csv("example_data.csv") df_csv df_csv = pd.read_csv("example_data.csv", index_col=0) df_csv ``` ### Aus einer Excel Datei ``` df_excel = pd.read_excel("excel_data.xlsx", index_col=0) df_excel df_excel2 = pd.read_excel("excel_other_sheet.xlsx", index_col=0) df_excel2 df_excel2 = pd.read_excel("excel_other_sheet.xlsx", index_col=0, sheet_name='Another Sheet') df_excel2.head() ``` ### Andere Formate: ``` pd.read_clipboard pd.read_html pd.read_json pd.read_pickle pd.read_sql; # Verwendet SQLAlchemy um auf eine Datenbank zuzugreifen # usw. ``` ### Indizes und Operationen ``` df_csv.head() df_csv.tail() df = create_data_frame() df['w'] type(df['w']) # Sollte nicht verwendet werden... df.w df[['w', 'y']] df.index df.index.is_monotonic_increasing df.size df.ndim df.shape ``` ### Erzeugen, Umbenennen und Löschen von Spalten ``` df = create_data_frame() df['Summe aus w und y'] = df['w'] + df['y'] df df.rename(columns={'Summe aus w und y': 'w + y'}) df df.rename(columns={'Summe aus w und y': 'w + y'}, index={'E': 'Z'}, inplace=True) df type(df['y']) del df['y'] df df.drop('A') df df.drop('B', inplace=True) df df.drop('z', axis=1) df df.drop('z', axis=1, inplace=True) df ``` ## Auswahl ``` df = create_data_frame() df df['w'] # Fehler # df['A'] df.loc['B'] type(df.loc['B']) df df.iloc[1] df.loc[['A', 'C']] df.loc[['A', 'C'], ['x', 'y']] df.loc['B', 'z'] df.iloc[[1, 2], [0, 3]] df.iloc[0, 0] ``` ## Bedingte Auswahl ``` df = create_data_frame() df df > 0 df[df > 0] df['w'] > 0 df[df['w'] > 0] df[df['w'] > 0][['x', 'y']] df[(df['w'] > 0) & (df['x'] < 0)] ``` # Information über Data Frames ``` df = pd.DataFrame(array, index=index, columns=columns) df['txt'] = 'a b c d e'.split() df.iloc[1, 1] = np.nan df df.describe() df.info() df.dtypes ``` ## Data Frame Index ``` df = create_data_frame() df['txt'] = 'a b c d e'.split() df df.reset_index() df df.reset_index(inplace=True) df df.rename(columns={'index': 'old_index'}, inplace=True) df df.set_index('txt') df df.set_index('txt', inplace=True) df df.set_index('old_index', inplace=True) df df.info() df.index df.index.name = None df ```
github_jupyter
<a href="https://colab.research.google.com/github/AlejandroBeltranA/OCVED-ML/blob/master/OCVED_Applied_v2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Classifying remaining articles This is the 4th of 4 scripts used in ocved.mx This script uses the LR model trained in the first script to classify the universe of articles collected from EMIS. A total of 188,492 are classified using the model from OCVED_GSR_Trained.v2.4 ``` # Mount Google Drive from google.colab import drive drive.mount('/content/drive') # Install tqdm %cd /content/drive/ !ls !pip install tqdm # Packages used import pandas as pd import numpy as np from tqdm import tqdm_notebook as tqdm from nltk.tokenize import word_tokenize from nltk import pos_tag #from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.preprocessing import LabelEncoder from collections import defaultdict from nltk.corpus import wordnet as wn from sklearn.feature_extraction.text import TfidfVectorizer from sklearn import model_selection, naive_bayes, svm, linear_model from sklearn.metrics import accuracy_score, precision_recall_fscore_support, precision_score, recall_score, f1_score ``` We download the Spacy lemmatizer again to reduce words to their lemma for normalization. ``` %%capture !pip install es-lemmatizer !pip install -U spacy !sudo python -m spacy download es_core_news_sm import re import nltk nltk.download('stopwords') ``` We load in the universe of articles collected from EMIS using the scripts in EMIS_scrape repository. These articles are downloaded from subnational news sources, regional newspapers, and other sources not specified as national newspapers. There's a lot of noise in these articles. I leave the training articles in the universe since the model should perform well on the articles it was trained on. This csv contains 158,496 articles. The majority of these are noise! ``` emis = pd.read_csv('My Drive/Data/OCVED/Classifier/universe/EMIS_Universe.csv') emis ``` As detailed in script 1, a seperate process collected articles from national newspapers by having RA's manually download these articles. The manual download process took 5 months, students would read each article and determine if it was relevant to the PI's research. In contrast, the scraping and generating traning data took a total of 3 months, with the added advantage that the model can be used for future data collected. This process generated 29,995 articles. ``` nat = pd.read_csv('My Drive/Data/OCVED/National/txt_docs/National_OCVED.csv') nat ``` We combine these two datasets, making a full universe of articles on DTO's in Mexico. All articles used in the training steps are also included given the model should perform well classifying these. ``` data = [] data.append(emis) data.append(nat) df = pd.concat(data, axis=0, ignore_index=True, sort=True).sort_values('file_id', ascending= True) df np.random.seed(1000) ``` Code for removing accents. ``` import unicodedata import string # BEGIN SHAVE_MARKS_LATIN def shave_marks_latin(txt): """Remove all diacritic marks from Latin base characters""" norm_txt = unicodedata.normalize('NFD', txt) # <1> latin_base = False keepers = [] for c in norm_txt: if unicodedata.combining(c) and latin_base: # <2> continue # ignore diacritic on Latin base char keepers.append(c) # <3> # if it isn't combining char, it's a new base char if not unicodedata.combining(c): # <4> latin_base = c in string.ascii_letters shaved = ''.join(keepers) return unicodedata.normalize('NFC', shaved) # <5> # END SHAVE_MARKS_LATIN def shave_marks(txt): """Remove all diacritic marks""" norm_txt = unicodedata.normalize('NFD', txt) # <1> shaved = ''.join(c for c in norm_txt if not unicodedata.combining(c)) # <2> return unicodedata.normalize('NFC', shaved) # <3> # END SHAVE_MARKS ``` Let's load the tokenizer and lemmatizer in. ``` from es_lemmatizer import lemmatize import es_core_news_sm nlp = es_core_news_sm.load() nlp.add_pipe(lemmatize, after="tagger") ``` Stopwords removed to reduce noise and reduce the number of not useful features. ``` from nltk.corpus import stopwords ##Creating a list of stop words and adding custom stopwords stop_words = set(stopwords.words("spanish")) ##Creating a list of custom stopwords new_words = ["daily", "newspaper", "reforma", "publication", "universal", "iv", "one", "two", "august" , "excelsior", "online", "november", "july", "september", "june", "october", "december", "print", "edition", "news", "milenio", "january", "international", "march", "april", "july", "february", "may", "october", "el occidental", "comments", "powered", "display", "space", "javascript", "trackpageview", "enablelinktracking", "location", "protocol", "weboperations", "settrackerurl", "left", "setsiteid", "createelement", "getelementsbytagname", "parentnode", "insertbefore", "writeposttexto", "everykey", "passwords" "writecolumnaderechanotas", "anteriorsiguente", "anteriorsiguiente", "writefooter", "align", "googletag", "writeaddthis", "writefooteroem", "diario delicias", "diario tampico", "the associated press", "redaccion" , "national", "diario yucatan", "mural", "periodico", "new", "previously", "shown" , "a", "para", "tener" , "haber", "ser" , "mexico city", "states", "city", "and", "elsolde", "recomendamos", "diario chihuahua" , "diario juarez" , "el norte", "voz frontera" , "regional" , "de" , "el sol" , "el" , "sudcaliforniano" , "washington", "union morelos", "milenio" , "notimex", "el financiero" , "financiero" , "forum magazine" , "economista" , "gmail" , "financial", "el" , "de", "la", "del", "de+el" , "a+el" , "shortcode" , "caption", "cfcfcf", "float", "item", "width", "follow", "aaannnnn", "gmannnnn", "dslnnnnn", "jtjnnnnn", "lcgnnnnn", "jgcnnnnn", "vhannnnn", "mtc", "eleconomista", "monitoreoif", "infosel", "gallery", "heaven", "div", "push" , "translate", "google"] stop_words = stop_words.union(new_words) stop_words = shave_marks(repr(stop_words)) dataset = df ``` Process for cleaning out the text and generating the corpus. ``` corpus = [] for i in dataset.itertuples(): #for i in tqdm(range(1, 2000)): text = shave_marks_latin(i.text) #Remove punctuations text = re.sub('[^a-zA-Z]', ' ', text) #Convert to lowercase #text = shave_marks_latin(text) #text = text.lower() #remove tags text=re.sub("&lt;/?.*?&gt;"," &lt;&gt; ",text) # remove special characters and digits text=re.sub("(\\d|\\W)+"," ",text) text = re.sub(' +', ' ', text) #Lemmatisation doc = nlp(text) text = [token.lemma_ for token in doc if token.lemma_ not in stop_words] text = " ".join(text) text = shave_marks(text) file_id = i.file_id original = i.text corpus.append({ 'text': text, 'file_id': file_id , "original": original}) print ("done") data = pd.DataFrame(corpus) data gen = data['text'] gen ``` Let's look at how frequent some words are in the universe using Tokenizer. ``` from keras.preprocessing.text import Tokenizer #Using TensorFlow backend. xtrain_count, train_y, xvalid_count tokenizer = Tokenizer(num_words=5000) tokenizer.fit_on_texts(gen) X_gen = tokenizer.texts_to_sequences(gen) #X_test = tokenizer.texts_to_sequences(valid_x) # vocab_size = len(tokenizer.word_index) + 1 # Adding 1 because of reserved 0 index print(gen.iloc[3]) print(X_gen[3]) for word in ['sexual', 'cartel', 'sinaloa', 'violencia']: print('{}: {}'.format(word, tokenizer.word_index[word])) from keras.preprocessing.sequence import pad_sequences maxlen = 100 X_gen = pad_sequences(X_gen, padding='post') #, maxlen=maxlen #X_test = pad_sequences(X_test, padding='post', maxlen=maxlen) print(X_gen[2, :]) ``` # Application Now we load in the encoder, model, and vectorizer from script 1 so we can implement it in the application pipeline. ``` import pickle pkl_file = open('/content/drive/My Drive/Data/OCVED/Classifier/algorithm/OCVED_encoder_v2.pkl', 'rb') encoder = pickle.load(pkl_file) pkl_file.close() ``` We used the LR model because it produced the best F1 score of all models. See Osorio & Beltran (2020) for more information on why. ``` from sklearn.externals import joblib # save the model to disk filename = '/content/drive/My Drive/Data/OCVED/Classifier/algorithm/logistic_model_v2.sav' # load the model from disk logit_model = joblib.load(filename) ``` It's important we use the same trained tfidf from the first script in this process. Otherwise the length and words used will be different across vectors! ``` # create a count vectorizer object from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer import pickle #count_vect = CountVectorizer(analyzer='word', token_pattern=r'\w{1,}') #count_vect.fit(data['text']) # transform the training and validation data using count vectorizer object #xtrain_count = count_vect.transform(gen) #xtrain_count #pickle.dump(xtrain_count, open("/content/drive/My Drive/Data/Bogota/categorized_articles/tfidf.pickle", "wb")) pkl_file = open("/content/drive/My Drive/Data/OCVED/Classifier/algorithm/Tfidf_vect_3.pickle", 'rb') tfidf = pickle.load(pkl_file) pkl_file.close() tfidf gen_2 = tfidf.transform(gen) gen_2 ``` Now we finally ask the logit model to generate predictions for each article. It reviews the numeric contents and makes a predictions. Anything that has above a .5 probability of being DTO related is classified as such. ``` # make a prediction y_label = logit_model.predict(gen_2) # show the inputs and predicted outputs print("X=%s, Predicted=%s" % (gen_2[0], y_label[0])) # make a prediction y_prob = logit_model.predict_proba(gen_2)[:,1] # show the inputs and predicted outputs y_prob ``` Now we want to save the output, first in a csv. ``` data['y_label'] = y_label data['y_prob'] = y_prob data.to_csv('My Drive/Data/OCVED/Classifier/predictions_v3/logit_OCVED_pred_v3.csv') ``` Here I save them as .txt files for use in Eventus ID. ``` data = data[data.y_label == 1 ] data for i in tqdm(dataset.itertuples()): text = shave_marks_latin(i.text) #Remove punctuations #text = re.sub('[^a-zA-Z]', ' ', text) #Convert to lowercase #remove tags #text=re.sub("&lt;/?.*?&gt;"," &lt;&gt; ",text) # remove special characters and digits #text=re.sub("(\\d|\\W)+"," ",text) #text = re.sub(' +', ' ', text) file_id = i.file_id original = i.original dirty = 'My Drive/Data/OCVED/Classifier/predictions_v3/dirty/' clean = 'My Drive/Data/OCVED/Classifier/predictions_v3/clean/' dirty_file = dirty + file_id clean_file = clean + file_id with open(dirty_file, 'w') as f: f.write(original) with open(clean_file, 'w') as c: c.write(text) print ("script has completed") ``` This takes a long time so I have it print out the time it finished. ``` !rm /etc/localtime !ln -s /usr/share/zoneinfo/America/Phoenix /etc/localtime !date ```
github_jupyter
# Artificial Intelligence Nanodegree ## Voice User Interfaces ## Project: Speech Recognition with Neural Networks --- In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following blocks of code will require additional functionality which you must provide. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n", "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide. >**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode. The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook. --- ## Introduction In this notebook, you will build a deep neural network that functions as part of an end-to-end automatic speech recognition (ASR) pipeline! Your completed pipeline will accept raw audio as input and return a predicted transcription of the spoken language. The full pipeline is summarized in the figure below. <img src="images/pipeline.png"> - **STEP 1** is a pre-processing step that converts raw audio to one of two feature representations that are commonly used for ASR. - **STEP 2** is an acoustic model which accepts audio features as input and returns a probability distribution over all potential transcriptions. After learning about the basic types of neural networks that are often used for acoustic modeling, you will engage in your own investigations, to design your own acoustic model! - **STEP 3** in the pipeline takes the output from the acoustic model and returns a predicted transcription. Feel free to use the links below to navigate the notebook: - [The Data](#thedata) - [**STEP 1**](#step1): Acoustic Features for Speech Recognition - [**STEP 2**](#step2): Deep Neural Networks for Acoustic Modeling - [Model 0](#model0): RNN - [Model 1](#model1): RNN + TimeDistributed Dense - [Model 2](#model2): CNN + RNN + TimeDistributed Dense - [Model 3](#model3): Deeper RNN + TimeDistributed Dense - [Model 4](#model4): Bidirectional RNN + TimeDistributed Dense - [Models 5+](#model5) - [Compare the Models](#compare) - [Final Model](#final) - [**STEP 3**](#step3): Obtain Predictions <a id='thedata'></a> ## The Data We begin by investigating the dataset that will be used to train and evaluate your pipeline. [LibriSpeech](http://www.danielpovey.com/files/2015_icassp_librispeech.pdf) is a large corpus of English-read speech, designed for training and evaluating models for ASR. The dataset contains 1000 hours of speech derived from audiobooks. We will work with a small subset in this project, since larger-scale data would take a long while to train. However, after completing this project, if you are interested in exploring further, you are encouraged to work with more of the data that is provided [online](http://www.openslr.org/12/). In the code cells below, you will use the `vis_train_features` module to visualize a training example. The supplied argument `index=0` tells the module to extract the first example in the training set. (You are welcome to change `index=0` to point to a different training example, if you like, but please **DO NOT** amend any other code in the cell.) The returned variables are: - `vis_text` - transcribed text (label) for the training example. - `vis_raw_audio` - raw audio waveform for the training example. - `vis_mfcc_feature` - mel-frequency cepstral coefficients (MFCCs) for the training example. - `vis_spectrogram_feature` - spectrogram for the training example. - `vis_audio_path` - the file path to the training example. ``` from data_generator import vis_train_features # extract label and audio features for a single training example vis_text, vis_raw_audio, vis_mfcc_feature, vis_spectrogram_feature, vis_audio_path = vis_train_features() ``` The following code cell visualizes the audio waveform for your chosen example, along with the corresponding transcript. You also have the option to play the audio in the notebook! ``` from IPython.display import Markdown, display from data_generator import vis_train_features, plot_raw_audio from IPython.display import Audio %matplotlib inline # plot audio signal plot_raw_audio(vis_raw_audio) # print length of audio signal display(Markdown('**Shape of Audio Signal** : ' + str(vis_raw_audio.shape))) # print transcript corresponding to audio clip display(Markdown('**Transcript** : ' + str(vis_text))) # play the audio file Audio(vis_audio_path) ``` <a id='step1'></a> ## STEP 1: Acoustic Features for Speech Recognition For this project, you won't use the raw audio waveform as input to your model. Instead, we provide code that first performs a pre-processing step to convert the raw audio to a feature representation that has historically proven successful for ASR models. Your acoustic model will accept the feature representation as input. In this project, you will explore two possible feature representations. _After completing the project_, if you'd like to read more about deep learning architectures that can accept raw audio input, you are encouraged to explore this [research paper](https://pdfs.semanticscholar.org/a566/cd4a8623d661a4931814d9dffc72ecbf63c4.pdf). ### Spectrograms The first option for an audio feature representation is the [spectrogram](https://www.youtube.com/watch?v=_FatxGN3vAM). In order to complete this project, you will **not** need to dig deeply into the details of how a spectrogram is calculated; but, if you are curious, the code for calculating the spectrogram was borrowed from [this repository](https://github.com/baidu-research/ba-dls-deepspeech). The implementation appears in the `utils.py` file in your repository. The code that we give you returns the spectrogram as a 2D tensor, where the first (_vertical_) dimension indexes time, and the second (_horizontal_) dimension indexes frequency. To speed the convergence of your algorithm, we have also normalized the spectrogram. (You can see this quickly in the visualization below by noting that the mean value hovers around zero, and most entries in the tensor assume values close to zero.) ``` from data_generator import plot_spectrogram_feature # plot normalized spectrogram plot_spectrogram_feature(vis_spectrogram_feature) # print shape of spectrogram display(Markdown('**Shape of Spectrogram** : ' + str(vis_spectrogram_feature.shape))) ``` ### Mel-Frequency Cepstral Coefficients (MFCCs) The second option for an audio feature representation is [MFCCs](https://en.wikipedia.org/wiki/Mel-frequency_cepstrum). You do **not** need to dig deeply into the details of how MFCCs are calculated, but if you would like more information, you are welcome to peruse the [documentation](https://github.com/jameslyons/python_speech_features) of the `python_speech_features` Python package. Just as with the spectrogram features, the MFCCs are normalized in the supplied code. The main idea behind MFCC features is the same as spectrogram features: at each time window, the MFCC feature yields a feature vector that characterizes the sound within the window. Note that the MFCC feature is much lower-dimensional than the spectrogram feature, which could help an acoustic model to avoid overfitting to the training dataset. ``` from data_generator import plot_mfcc_feature # plot normalized MFCC plot_mfcc_feature(vis_mfcc_feature) # print shape of MFCC display(Markdown('**Shape of MFCC** : ' + str(vis_mfcc_feature.shape))) ``` When you construct your pipeline, you will be able to choose to use either spectrogram or MFCC features. If you would like to see different implementations that make use of MFCCs and/or spectrograms, please check out the links below: - This [repository](https://github.com/baidu-research/ba-dls-deepspeech) uses spectrograms. - This [repository](https://github.com/mozilla/DeepSpeech) uses MFCCs. - This [repository](https://github.com/buriburisuri/speech-to-text-wavenet) also uses MFCCs. - This [repository](https://github.com/pannous/tensorflow-speech-recognition/blob/master/speech_data.py) experiments with raw audio, spectrograms, and MFCCs as features. <a id='step2'></a> ## STEP 2: Deep Neural Networks for Acoustic Modeling In this section, you will experiment with various neural network architectures for acoustic modeling. You will begin by training five relatively simple architectures. **Model 0** is provided for you. You will write code to implement **Models 1**, **2**, **3**, and **4**. If you would like to experiment further, you are welcome to create and train more models under the **Models 5+** heading. All models will be specified in the `sample_models.py` file. After importing the `sample_models` module, you will train your architectures in the notebook. After experimenting with the five simple architectures, you will have the opportunity to compare their performance. Based on your findings, you will construct a deeper architecture that is designed to outperform all of the shallow models. For your convenience, we have designed the notebook so that each model can be specified and trained on separate occasions. That is, say you decide to take a break from the notebook after training **Model 1**. Then, you need not re-execute all prior code cells in the notebook before training **Model 2**. You need only re-execute the code cell below, that is marked with **`RUN THIS CODE CELL IF YOU ARE RESUMING THE NOTEBOOK AFTER A BREAK`**, before transitioning to the code cells corresponding to **Model 2**. ``` ##################################################################### # RUN THIS CODE CELL IF YOU ARE RESUMING THE NOTEBOOK AFTER A BREAK # ##################################################################### # allocate 50% of GPU memory (if you like, feel free to change this) from keras.backend.tensorflow_backend import set_session import tensorflow as tf config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.5 set_session(tf.Session(config=config)) # watch for any changes in the sample_models module, and reload it automatically %load_ext autoreload %autoreload 2 # import NN architectures for speech recognition from sample_models import * # import function for training acoustic model from train_utils import train_model ``` <a id='model0'></a> ### Model 0: RNN Given their effectiveness in modeling sequential data, the first acoustic model you will use is an RNN. As shown in the figure below, the RNN we supply to you will take the time sequence of audio features as input. <img src="images/simple_rnn.png" width="50%"> At each time step, the speaker pronounces one of 28 possible characters, including each of the 26 letters in the English alphabet, along with a space character (" "), and an apostrophe ('). The output of the RNN at each time step is a vector of probabilities with 29 entries, where the $i$-th entry encodes the probability that the $i$-th character is spoken in the time sequence. (The extra 29th character is an empty "character" used to pad training examples within batches containing uneven lengths.) If you would like to peek under the hood at how characters are mapped to indices in the probability vector, look at the `char_map.py` file in the repository. The figure below shows an equivalent, rolled depiction of the RNN that shows the output layer in greater detail. <img src="images/simple_rnn_unrolled.png" width="60%"> The model has already been specified for you in Keras. To import it, you need only run the code cell below. ``` model_0 = simple_rnn_model(input_dim=161) # change to 13 if you would like to use MFCC features ``` As explored in the lesson, you will train the acoustic model with the [CTC loss](http://www.cs.toronto.edu/~graves/icml_2006.pdf) criterion. Custom loss functions take a bit of hacking in Keras, and so we have implemented the CTC loss function for you, so that you can focus on trying out as many deep learning architectures as possible :). If you'd like to peek at the implementation details, look at the `add_ctc_loss` function within the `train_utils.py` file in the repository. To train your architecture, you will use the `train_model` function within the `train_utils` module; it has already been imported in one of the above code cells. The `train_model` function takes three **required** arguments: - `input_to_softmax` - a Keras model instance. - `pickle_path` - the name of the pickle file where the loss history will be saved. - `save_model_path` - the name of the HDF5 file where the model will be saved. If we have already supplied values for `input_to_softmax`, `pickle_path`, and `save_model_path`, please **DO NOT** modify these values. There are several **optional** arguments that allow you to have more control over the training process. You are welcome to, but not required to, supply your own values for these arguments. - `minibatch_size` - the size of the minibatches that are generated while training the model (default: `20`). - `spectrogram` - Boolean value dictating whether spectrogram (`True`) or MFCC (`False`) features are used for training (default: `True`). - `mfcc_dim` - the size of the feature dimension to use when generating MFCC features (default: `13`). - `optimizer` - the Keras optimizer used to train the model (default: `SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)`). - `epochs` - the number of epochs to use to train the model (default: `20`). If you choose to modify this parameter, make sure that it is *at least* 20. - `verbose` - controls the verbosity of the training output in the `model.fit_generator` method (default: `1`). - `sort_by_duration` - Boolean value dictating whether the training and validation sets are sorted by (increasing) duration before the start of the first epoch (default: `False`). The `train_model` function defaults to using spectrogram features; if you choose to use these features, note that the acoustic model in `simple_rnn_model` should have `input_dim=161`. Otherwise, if you choose to use MFCC features, the acoustic model should have `input_dim=13`. We have chosen to use `GRU` units in the supplied RNN. If you would like to experiment with `LSTM` or `SimpleRNN` cells, feel free to do so here. If you change the `GRU` units to `SimpleRNN` cells in `simple_rnn_model`, you may notice that the loss quickly becomes undefined (`nan`) - you are strongly encouraged to check this for yourself! This is due to the [exploding gradients problem](http://www.wildml.com/2015/10/recurrent-neural-networks-tutorial-part-3-backpropagation-through-time-and-vanishing-gradients/). We have already implemented [gradient clipping](https://arxiv.org/pdf/1211.5063.pdf) in your optimizer to help you avoid this issue. __IMPORTANT NOTE:__ If you notice that your gradient has exploded in any of the models below, feel free to explore more with gradient clipping (the `clipnorm` argument in your optimizer) or swap out any `SimpleRNN` cells for `LSTM` or `GRU` cells. You can also try restarting the kernel to restart the training process. ``` train_model(input_to_softmax=model_0, pickle_path='model_0.pickle', save_model_path='model_0.h5', spectrogram=True) # change to False if you would like to use MFCC features ``` <a id='model1'></a> ### (IMPLEMENTATION) Model 1: RNN + TimeDistributed Dense Read about the [TimeDistributed](https://keras.io/layers/wrappers/) wrapper and the [BatchNormalization](https://keras.io/layers/normalization/) layer in the Keras documentation. For your next architecture, you will add [batch normalization](https://arxiv.org/pdf/1510.01378.pdf) to the recurrent layer to reduce training times. The `TimeDistributed` layer will be used to find more complex patterns in the dataset. The unrolled snapshot of the architecture is depicted below. <img src="images/rnn_model.png" width="60%"> The next figure shows an equivalent, rolled depiction of the RNN that shows the (`TimeDistrbuted`) dense and output layers in greater detail. <img src="images/rnn_model_unrolled.png" width="60%"> Use your research to complete the `rnn_model` function within the `sample_models.py` file. The function should specify an architecture that satisfies the following requirements: - The first layer of the neural network should be an RNN (`SimpleRNN`, `LSTM`, or `GRU`) that takes the time sequence of audio features as input. We have added `GRU` units for you, but feel free to change `GRU` to `SimpleRNN` or `LSTM`, if you like! - Whereas the architecture in `simple_rnn_model` treated the RNN output as the final layer of the model, you will use the output of your RNN as a hidden layer. Use `TimeDistributed` to apply a `Dense` layer to each of the time steps in the RNN output. Ensure that each `Dense` layer has `output_dim` units. Use the code cell below to load your model into the `model_1` variable. Use a value for `input_dim` that matches your chosen audio features, and feel free to change the values for `units` and `activation` to tweak the behavior of your recurrent layer. ``` model_1 = rnn_model(input_dim=161, # change to 13 if you would like to use MFCC features units=200, activation='relu') ``` Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) in the HDF5 file `model_1.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_1.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required. ``` train_model(input_to_softmax=model_1, pickle_path='model_1.pickle', save_model_path='model_1.h5', spectrogram=True) # change to False if you would like to use MFCC features ``` <a id='model2'></a> ### (IMPLEMENTATION) Model 2: CNN + RNN + TimeDistributed Dense The architecture in `cnn_rnn_model` adds an additional level of complexity, by introducing a [1D convolution layer](https://keras.io/layers/convolutional/#conv1d). <img src="images/cnn_rnn_model.png" width="100%"> This layer incorporates many arguments that can be (optionally) tuned when calling the `cnn_rnn_model` module. We provide sample starting parameters, which you might find useful if you choose to use spectrogram audio features. If you instead want to use MFCC features, these arguments will have to be tuned. Note that the current architecture only supports values of `'same'` or `'valid'` for the `conv_border_mode` argument. When tuning the parameters, be careful not to choose settings that make the convolutional layer overly small. If the temporal length of the CNN layer is shorter than the length of the transcribed text label, your code will throw an error. Before running the code cell below, you must modify the `cnn_rnn_model` function in `sample_models.py`. Please add batch normalization to the recurrent layer, and provide the same `TimeDistributed` layer as before. ``` model_2 = cnn_rnn_model(input_dim=161, # change to 13 if you would like to use MFCC features filters=200, kernel_size=11, conv_stride=2, conv_border_mode='valid', units=200) ``` Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) in the HDF5 file `model_2.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_2.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required. ``` train_model(input_to_softmax=model_2, pickle_path='model_2.pickle', save_model_path='model_2.h5', spectrogram=True) # change to False if you would like to use MFCC features ``` <a id='model3'></a> ### (IMPLEMENTATION) Model 3: Deeper RNN + TimeDistributed Dense Review the code in `rnn_model`, which makes use of a single recurrent layer. Now, specify an architecture in `deep_rnn_model` that utilizes a variable number `recur_layers` of recurrent layers. The figure below shows the architecture that should be returned if `recur_layers=2`. In the figure, the output sequence of the first recurrent layer is used as input for the next recurrent layer. <img src="images/deep_rnn_model.png" width="80%"> Feel free to change the supplied values of `units` to whatever you think performs best. You can change the value of `recur_layers`, as long as your final value is greater than 1. (As a quick check that you have implemented the additional functionality in `deep_rnn_model` correctly, make sure that the architecture that you specify here is identical to `rnn_model` if `recur_layers=1`.) ``` model_3 = deep_rnn_model(input_dim=161, # change to 13 if you would like to use MFCC features units=200, recur_layers=2) ``` Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) in the HDF5 file `model_3.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_3.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required. ``` train_model(input_to_softmax=model_3, pickle_path='model_3.pickle', save_model_path='model_3.h5', spectrogram=True) # change to False if you would like to use MFCC features ``` <a id='model4'></a> ### (IMPLEMENTATION) Model 4: Bidirectional RNN + TimeDistributed Dense Read about the [Bidirectional](https://keras.io/layers/wrappers/) wrapper in the Keras documentation. For your next architecture, you will specify an architecture that uses a single bidirectional RNN layer, before a (`TimeDistributed`) dense layer. The added value of a bidirectional RNN is described well in [this paper](http://www.cs.toronto.edu/~hinton/absps/DRNN_speech.pdf). > One shortcoming of conventional RNNs is that they are only able to make use of previous context. In speech recognition, where whole utterances are transcribed at once, there is no reason not to exploit future context as well. Bidirectional RNNs (BRNNs) do this by processing the data in both directions with two separate hidden layers which are then fed forwards to the same output layer. <img src="images/bidirectional_rnn_model.png" width="80%"> Before running the code cell below, you must complete the `bidirectional_rnn_model` function in `sample_models.py`. Feel free to use `SimpleRNN`, `LSTM`, or `GRU` units. When specifying the `Bidirectional` wrapper, use `merge_mode='concat'`. ``` model_4 = bidirectional_rnn_model(input_dim=161, # change to 13 if you would like to use MFCC features units=200) ``` Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) in the HDF5 file `model_4.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_4.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required. ``` train_model(input_to_softmax=model_4, pickle_path='model_4.pickle', save_model_path='model_4.h5', spectrogram=True) # change to False if you would like to use MFCC features ``` <a id='model5'></a> ### (OPTIONAL IMPLEMENTATION) Models 5+ If you would like to try out more architectures than the ones above, please use the code cell below. Please continue to follow the same convention for saving the models; for the $i$-th sample model, please save the loss at **`model_i.pickle`** and saving the trained model at **`model_i.h5`**. ``` ## (Optional) TODO: Try out some more models! ### Feel free to use as many code cells as needed. model_5 = deep_bidirectional_rnn(input_dim=161, # change to 13 if you would like to use MFCC features units=200, recur_layers=2) train_model(input_to_softmax=model_5, pickle_path='model_5.pickle', save_model_path='model_5.h5', spectrogram=True) # change to False if you would like to use MFCC features ``` <a id='compare'></a> ### Compare the Models Execute the code cell below to evaluate the performance of the drafted deep learning models. The training and validation loss are plotted for each model. ``` from glob import glob import numpy as np import _pickle as pickle import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline sns.set_style(style='white') # obtain the paths for the saved model history all_pickles = sorted(glob("results/*.pickle")) # extract the name of each model model_names = [item[8:-7] for item in all_pickles] # extract the loss history for each model valid_loss = [pickle.load( open( i, "rb" ) )['val_loss'] for i in all_pickles] train_loss = [pickle.load( open( i, "rb" ) )['loss'] for i in all_pickles] # save the number of epochs used to train each model num_epochs = [len(valid_loss[i]) for i in range(len(valid_loss))] fig = plt.figure(figsize=(16,5)) # plot the training loss vs. epoch for each model ax1 = fig.add_subplot(121) for i in range(len(all_pickles)): ax1.plot(np.linspace(1, num_epochs[i], num_epochs[i]), train_loss[i], label=model_names[i]) # clean up the plot ax1.legend() ax1.set_xlim([1, max(num_epochs)]) plt.xlabel('Epoch') plt.ylabel('Training Loss') # plot the validation loss vs. epoch for each model ax2 = fig.add_subplot(122) for i in range(len(all_pickles)): ax2.plot(np.linspace(1, num_epochs[i], num_epochs[i]), valid_loss[i], label=model_names[i]) # clean up the plot ax2.legend() ax2.set_xlim([1, max(num_epochs)]) plt.xlabel('Epoch') plt.ylabel('Validation Loss') plt.show() ``` __Question 1:__ Use the plot above to analyze the performance of each of the attempted architectures. Which performs best? Provide an explanation regarding why you think some models perform better than others. __Answer:__ - Model_0 is a simple RNN network with one layer and 16,617 trainable parameters. Its model performance is the worst compared to the other trained networks. Because of its simple structure, this performance is pretty expected. <br> - Model_1 becomes more complex by having a simple RNN followed by Batch Normalization and a Dense layer in Time Distributed architecture. The total number of trainable parameters jumps to 223,429 here and we observe a significant improvement in model performance compared to model_0. - Model_2 adds a convolution layer before RNN layer in model_1 and increases the complexity of the model. This additional layer leads to 441,229 trainable parameters and improves the performance of the model significantly compared to other previous models. Model_2 is also the best model compared to all the six trained models. Having one convolution layer also decreases the training time significantly, while it converges pretty fast. - Model_3 is another version of model_1 with two recurrent layers and total 464,429 trainable parameters. Due to its more complex structure compared to model_0 and model_1, it has a better performance. However, it could not outperform model_2 in terms of performance, speed, and convergence. In other words, having convolution layers in the network could significantly improve the final model. - Model_4 is another version of model_1 while it calls bidirectional architecture before a Dense layer in Time Distributed architecture with 446,029 trainable parameters. It outperforms model_0 but not the rest. Surprisingly, model_1 performs better than model_4 with less trainable parameters. Because of the complex structure of this model, the speed and converges rate are pretty low. - Model_5 is another version of model_3 with two recurrent bidirectional layers with 1,168,829 trainable parameters. Still, we observe similar patterns of model_3 on it, while it requires two times of training time in model_3. <a id='final'></a> ### (IMPLEMENTATION) Final Model Now that you've tried out many sample models, use what you've learned to draft your own architecture! While your final acoustic model should not be identical to any of the architectures explored above, you are welcome to merely combine the explored layers above into a deeper architecture. It is **NOT** necessary to include new layer types that were not explored in the notebook. However, if you would like some ideas for even more layer types, check out these ideas for some additional, optional extensions to your model: - If you notice your model is overfitting to the training dataset, consider adding **dropout**! To add dropout to [recurrent layers](https://faroit.github.io/keras-docs/1.0.2/layers/recurrent/), pay special attention to the `dropout_W` and `dropout_U` arguments. This [paper](http://arxiv.org/abs/1512.05287) may also provide some interesting theoretical background. - If you choose to include a convolutional layer in your model, you may get better results by working with **dilated convolutions**. If you choose to use dilated convolutions, make sure that you are able to accurately calculate the length of the acoustic model's output in the `model.output_length` lambda function. You can read more about dilated convolutions in Google's [WaveNet paper](https://arxiv.org/abs/1609.03499). For an example of a speech-to-text system that makes use of dilated convolutions, check out this GitHub [repository](https://github.com/buriburisuri/speech-to-text-wavenet). You can work with dilated convolutions [in Keras](https://keras.io/layers/convolutional/) by paying special attention to the `padding` argument when you specify a convolutional layer. - If your model makes use of convolutional layers, why not also experiment with adding **max pooling**? Check out [this paper](https://arxiv.org/pdf/1701.02720.pdf) for example architecture that makes use of max pooling in an acoustic model. - So far, you have experimented with a single bidirectional RNN layer. Consider stacking the bidirectional layers, to produce a [deep bidirectional RNN](https://www.cs.toronto.edu/~graves/asru_2013.pdf)! All models that you specify in this repository should have `output_length` defined as an attribute. This attribute is a lambda function that maps the (temporal) length of the input acoustic features to the (temporal) length of the output softmax layer. This function is used in the computation of CTC loss; to see this, look at the `add_ctc_loss` function in `train_utils.py`. To see where the `output_length` attribute is defined for the models in the code, take a look at the `sample_models.py` file. You will notice this line of code within most models: ``` model.output_length = lambda x: x ``` The acoustic model that incorporates a convolutional layer (`cnn_rnn_model`) has a line that is a bit different: ``` model.output_length = lambda x: cnn_output_length( x, kernel_size, conv_border_mode, conv_stride) ``` In the case of models that use purely recurrent layers, the lambda function is the identity function, as the recurrent layers do not modify the (temporal) length of their input tensors. However, convolutional layers are more complicated and require a specialized function (`cnn_output_length` in `sample_models.py`) to determine the temporal length of their output. You will have to add the `output_length` attribute to your final model before running the code cell below. Feel free to use the `cnn_output_length` function, if it suits your model. ``` # specify the model model_end = final_model(input_dim=161, filters=200, kernel_size=11, conv_stride=2, recur_layers=2,conv_border_mode='valid', units=200) ``` Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) in the HDF5 file `model_end.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_end.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required. ``` train_model(input_to_softmax=model_end, pickle_path='model_end.pickle', save_model_path='model_end.h5', spectrogram=True) # change to False if you would like to use MFCC features ``` __Question 2:__ Describe your final model architecture and your reasoning at each step. __Answer:__ - The final model includes both a convolutional layer and also multi recurrent layers with 842,629 trainable parameters. - The convolutional layer tries to encode more complex features from spectrogram to the recurrent model. The 'relu' activation function is called at this layer, and the it is followed by Batch Normalization. - Next, two recurrent layers with GRU architecture and Batch Normalization are added to the network. Also, the dropout ratio is set 0.3 to make sure the overfitting is not going to happen. The gap between loss and validation loss validates it. - Finally, a Time Distributed Dense layer followed by softmax activation is added to the network to perform logits calculation. - The final model outperforms all the previous trained models. The train and validation predictions seem to be pretty close to what we expect. Still, the model could improve by adding more convolutional layers, recurrent layers, more epochs, and a larger data set. <a id='step3'></a> ## STEP 3: Obtain Predictions We have written a function for you to decode the predictions of your acoustic model. To use the function, please execute the code cell below. ``` import numpy as np from data_generator import AudioGenerator from keras import backend as K from utils import int_sequence_to_text from IPython.display import Audio def get_predictions(index, partition, input_to_softmax, model_path): """ Print a model's decoded predictions Params: index (int): The example you would like to visualize partition (str): One of 'train' or 'validation' input_to_softmax (Model): The acoustic model model_path (str): Path to saved acoustic model's weights """ # load the train and test data data_gen = AudioGenerator() data_gen.load_train_data() data_gen.load_validation_data() # obtain the true transcription and the audio features if partition == 'validation': transcr = data_gen.valid_texts[index] audio_path = data_gen.valid_audio_paths[index] data_point = data_gen.normalize(data_gen.featurize(audio_path)) elif partition == 'train': transcr = data_gen.train_texts[index] audio_path = data_gen.train_audio_paths[index] data_point = data_gen.normalize(data_gen.featurize(audio_path)) else: raise Exception('Invalid partition! Must be "train" or "validation"') # obtain and decode the acoustic model's predictions input_to_softmax.load_weights(model_path) prediction = input_to_softmax.predict(np.expand_dims(data_point, axis=0)) output_length = [input_to_softmax.output_length(data_point.shape[0])] pred_ints = (K.eval(K.ctc_decode( prediction, output_length)[0][0])+1).flatten().tolist() # play the audio file, and display the true and predicted transcriptions print('-'*80) Audio(audio_path) print('True transcription:\n' + '\n' + transcr) print('-'*80) print('Predicted transcription:\n' + '\n' + ''.join(int_sequence_to_text(pred_ints))) print('-'*80) ``` Use the code cell below to obtain the transcription predicted by your final model for the first example in the training dataset. ``` get_predictions(index=0, partition='train', input_to_softmax = final_model(input_dim=161, filters=200, kernel_size=11, conv_stride=2, recur_layers=2,conv_border_mode='valid', units=200), model_path='results/model_end.h5') ``` Use the next code cell to visualize the model's prediction for the first example in the validation dataset. ``` get_predictions(index=0, partition='validation', input_to_softmax=final_model(input_dim=161, filters=200, kernel_size=11, conv_stride=2, recur_layers=2,conv_border_mode='valid', units=200), model_path='results/model_end.h5') ``` One standard way to improve the results of the decoder is to incorporate a language model. We won't pursue this in the notebook, but you are welcome to do so as an _optional extension_. If you are interested in creating models that provide improved transcriptions, you are encouraged to download [more data](http://www.openslr.org/12/) and train bigger, deeper models. But beware - the model will likely take a long while to train. For instance, training this [state-of-the-art](https://arxiv.org/pdf/1512.02595v1.pdf) model would take 3-6 weeks on a single GPU!
github_jupyter
``` %load_ext autoreload %autoreload 2 import numpy as np import matplotlib.pyplot as plt from libwallerlab.opticsalgorithms.motiondeblur import blurkernel ``` # Overview This notebook explores a SNR vs. acquisition time analysis for strobed illumination, stop and stare, and coded illumination acquisition strategies. First, we determine a relationship between t_frame (frame rate) and t_exposure (exposure time). Then, we relate t_exposure to SNR for each method. These relationships should be smooth but non-linear. ``` # Define constants ps = 6.5e-3 #mm mag = 20 ps_eff = ps / mag #um n_px = np.asarray([2100, 2500]) fov = n_px * ps_eff motion_axis = 0 motion_velocity_mm_s = 20 motion_acceleration_mm_s_s = 1e4 t_settle = 0.1 #s t_ro = 0.01 #s figure_directory = '/Users/zfphil/Desktop/figures/' !mkdir -p /Users/zfphil/Desktop/figures/ np.random.choice(10) def genBlurVector_rand(kernel_length, beta=0.5, n_tests=10, metric='dnf'): ''' This is a helper function for solving for a blur vector in terms of it's condition # ''' kernel_list = [] n_elements_max = math.floor(beta * kernel_length) for test in range(n_tests): indicies = np.random.permutation(kernel_length) kernel = np.zeros(kernel_length) kernel[indicies[:n_elements_max]] = 1.0 # indicies = np.arange(kernel_length) # for index in range(n_elements_max): # rand_index = np.random.randint(0, high=np.size(indicies)-1, size=1) # kernel[indicies[rand_index]] = 1. # indicies = np.delete(indicies, rand_index) rand_index = np.random.permutation(kernel_length)[n_elements_max] kernel[rand_index] = beta * kernel_length - np.sum(kernel) assert beta * kernel_length - np.sum(kernel) <= 1 kernel_list.append(kernel) if metric == 'cond': # Determine kernel with best conditioon # metric_best = 1e10 kernel_best = [] for kernel in kernel_list: spectra = np.abs(np.fft.fft(kernel)) kappa = np.max(spectra) / np.min(spectra) if kappa < metric_best: kernel_best = kernel metric_best = kappa else: # Determine kernel with best conditioon # metric_best = 1e10 kernel_best = [] for kernel in kernel_list: dnf = (np.sum(1 / np.abs(scipy.fftpack.fft(kernel)) ** 2)) if dnf < metric_best: kernel_best = kernel metric_best = dnf return (metric_best, kernel_best) # import math # def condNumToDnf(cond, blur_length, image_size, beta=0.1): # dnf = ((blur_length * beta) ** 2 / cond ** 2) * math.sqrt(np.prod(image_size)) # return dnf # # condNumToDnf(40, 50, (1000,1000)) import scipy def calcDnfFromKernel(x): from libwallerlab.utilities.opticstools import Ft, iFt return (np.sum(1 / np.abs(scipy.fftpack.fft(x)) ** 2)) def getOptimalDnf(kernel_size, beta=0.5, n_tests=100, metric = 'dnf'): dnf, x = genBlurVector_rand(100, beta=beta, n_tests=n_tests, metric=metric) return(calcDnfFromKernel(x)) getOptimalDnf(100, n_tests=200, metric='dnf') def frameRateToExposure(t_frame, acquisition_strategy, motion_velocity_mm_s=10, motion_acceleration_mm_s_s=1e4, t_readout=0.01, t_settle=0.1, fov=[1,1], motion_axis=0, ps_eff_mm=6.5e-3/20, beta_coded=0.5, min_strobe_time_s=10e-6): if 'strobe' in acquisition_strategy: t_exp_camera = t_frame - t_readout v = fov[motion_axis] / t_frame t_illum_strobe = ps_eff / v if t_illum_strobe < min_strobe_time_s: t_exp = 0 else: t_exp = t_illum_strobe # No deconvolution here dnf = 1 elif 'stop_and_stare' in acquisition_strategy: t_start_stop = motion_velocity_mm_s / motion_acceleration_mm_s_s d_start_stop = 0.5 * motion_acceleration_mm_s_s * t_start_stop ** 2 t_move = (fov[motion_axis] - d_start_stop) / motion_velocity_mm_s t_exp_camera = t_frame - t_move - t_start_stop + t_readout t_exp = t_exp_camera # Illumination is on the whole time # No deconvolution here dnf = 1 elif 'code' in acquisition_strategy: t_exp_camera = t_frame - t_readout # Determine kernel length kernel_length = int(np.ceil(t_exp_camera / t_frame * fov[motion_axis] / ps_eff)) kernel_length = max(kernel_length, 1) if kernel_length == 1: dnf = 1 else: # dnf = blurkernel.dnfUpperBound(kernel_length, beta_coded) dnf = getOptimalDnf(kernel_length, beta=beta_coded, n_tests=10) t_exp_camera = t_frame - t_readout v = fov[motion_axis] / t_frame t_illum_strobe = ps_eff / v if t_illum_strobe < min_strobe_time_s: t_exp = 0 else: t_exp = t_exp_camera * beta_coded # # assert t_exp > 0 if t_exp <= 0 or t_exp_camera <= 0: t_exp = 0 return(t_exp, dnf) frame_time = 0.1 t_strobe, dnf_strobd = frameRateToExposure(frame_time, 'strobe', fov=fov) snr_strobe = blurkernel.dnf2snr(dnf_strobd, t_strobe*1000) print("Strobed illumination will have exposure time %.5f seconds and SNR %.5f" % (t_strobe, snr_strobe)) t_sns, dnf_sns = frameRateToExposure(frame_time, 'stop_and_stare', fov=fov) snr_sns = blurkernel.dnf2snr(dnf_sns, t_sns*1000) print("Stop-and-stare illumination will have exposure time %.5f seconds and SNR %.5f" % (t_sns, snr_sns)) t_coded, dnf_coded = frameRateToExposure(frame_time, 'code', fov=fov) snr_coded = blurkernel.dnf2snr(dnf_coded, t_coded*1000) print("Coded illumination will have exposure time %.5f seconds and SNR %.5f" % (t_coded, snr_coded)) ``` ## Plot SNR vs Frame Rate ``` frame_rates = np.arange(1,80,0.1) snr_strobe_list = [] snr_sns_list = [] snr_coded_list_25 = [] snr_coded_list_10 = [] snr_coded_list_50 = [] snr_coded_list_75 = [] snr_coded_list_99 = [] for index, rate in enumerate(frame_rates): t_frame = 1 / rate t_strobe, dnf_strobe = frameRateToExposure(t_frame, 'strobe', fov=fov) snr_strobe_list.append(blurkernel.dnf2snr(dnf_strobe, t_strobe*1000)) t_sns, dnf_sns = frameRateToExposure(t_frame, 'stop_and_stare', fov=fov) snr_sns_list.append(blurkernel.dnf2snr(dnf_sns, t_sns*1000)) t_coded_10, dnf_coded_10 = frameRateToExposure(t_frame, 'code', fov=fov, beta_coded=0.05) snr_coded_list_10.append(blurkernel.dnf2snr(dnf_coded_10, t_coded_10*1000)) t_coded_50, dnf_coded_50 = frameRateToExposure(t_frame, 'code', fov=fov, beta_coded=0.5) snr_coded_list_50.append(blurkernel.dnf2snr(dnf_coded_50, t_coded_50*1000)) # t_coded_75, dnf_coded_75 = frameRateToExposure(t_frame, 'code', fov=fov, beta_coded=0.75) # snr_coded_list_75.append(blurkernel.dnf2snr(dnf_coded_75, t_coded_75)) t_coded_99, dnf_coded_99 = frameRateToExposure(t_frame, 'code', fov=fov, beta_coded=0.95) snr_coded_list_99.append(blurkernel.dnf2snr(dnf_coded_99, t_coded_99*1000)) # snr_coded_list.append(0) # print("Coded illumination will have exposure time %.3f seconds and SNR %.2f" % (t_coded, snr_coded)) # print("Finished rate %d of %d" % (index, len(frame_rates))) # plt.style.use('seaborn-dark') jtplot.style() # plt.style.use('classic') plt.figure(figsize=(12,8)) plt.semilogy(frame_rates, snr_coded_list_10, 'b-') plt.semilogy(frame_rates, snr_coded_list_50, 'g-') plt.semilogy(frame_rates, snr_coded_list_99, 'y') plt.semilogy(frame_rates, snr_sns_list, 'r-', linewidth=2) plt.semilogy(frame_rates, snr_strobe_list, 'w-', linewidth=2) plt.ylim((0.5, 5000)) plt.xlim((0,75)) plt.legend(('Coded, 5% Illuminated', 'Coded, 50% Illuminated', 'Coded, 95% Illuminated', 'Stop-and-Stare', 'Strobed'), fontsize=24) plt.xlabel('Frame Rate (Hz)', fontsize=28) plt.ylabel('SNR', fontsize=28) ax = plt.gca() for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(24) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(24) plt.grid('on', which='both') plt.tight_layout() plt.savefig(figure_directory + 'strobe_sns_coded.png', transparent=True) # plt.style.use('seaborn-dark') jtplot.style() # plt.style.use('classic') plt.figure(figsize=(12,8)) plt.semilogy(frame_rates, snr_sns_list, 'r-', linewidth=2) plt.semilogy(frame_rates, snr_strobe_list, 'w-', linewidth=2) plt.ylim((0.5, 5000)) plt.xlim((0,75)) plt.legend(('Stop-and-Stare', 'Strobed'), fontsize=24) plt.xlabel('Frame Rate (Hz)', fontsize=28) plt.ylabel('SNR', fontsize=28) ax = plt.gca() for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(24) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(24) plt.grid('on', which='both') plt.tight_layout() plt.savefig(figure_directory + 'strobe_sns.png', transparent=True) ``` # Blur Kernel Optimization ``` data = np.load('single_illums.npz') kernel_vector = data['kernel_vector'] kernel_random = data['kernel_random'] blur_kernel_map = np.zeros(object_size) for position_index, position in enumerate(point_list): blur_kernel_map[position[0], position[1]] = kernel_vector[position_index] num_frames = iterates.shape[1] iterates = np.array(result['history']['x']) #.T print(iterates.shape) total_its = iterates.shape[1] interval = total_its / num_frames #interval=2 #ax = plt.subplot2grid((6, 1), (1, 5)) #ax = plt.subplot2grid((6, 1), (1, 0), colspan=5) initial_power_spectrum = 0; blur_operator = W * 0.5*np.sum(kernel_map, 0).astype(np.complex64).reshape(-1) static_power_spectrum = np.sum(np.abs(wotf.Ft(blur_operator.reshape(image_size))), axis=0) sigma_min_static = np.amin(static_power_spectrum) sigma_min_static = np.amax(static_power_spectrum) # Generate spatial frequency coordintes ps = 6.5 fov = 2000 * 6.5e-3/20 dk = 1/fov freqs = np.arange(-len(static_power_spectrum) // 2, len(static_power_spectrum) // 2) * dk assert len(freqs) == len(static_power_spectrum) kernel_random = iterates[:,0] for i in range(num_frames): illum = iterates[:,int(interval*i)] blur_operator_illum = W * (kernel_map.T.dot(iterates[:,int(interval*i)])).T.astype(np.complex64).reshape(-1) power_spectrum = np.sum(np.abs(wotf.Ft(blur_operator_illum.reshape(image_size))), axis=0) sigma_min = np.amin(power_spectrum) sigma_max = np.amax(power_spectrum) condition = sigma_max/sigma_min if i==0: initial_power_spectrum = power_spectrum fig = plt.figure(figsize=(10,5)) ax1 = plt.subplot2grid((8, 1), (0, 0), rowspan=4) ax2 = plt.subplot2grid((8, 1), (6, 0), rowspan=2) ax2.step(illum, 'orange', linewidth=3) ax2.set_ylim([-0.1,1.1]) ax2.set_xlim([0,24]) ax2.set_title('Illumination Pattern', fontsize=24, color='w') ax1.set_title('Power Spectrum', fontsize=24, color='w') # ax1.set_xlim([0,127]) # ax1.set_ylim([10,10^4]) # ax2.set_xticklabels([]) ax1.set_ylabel('Energy', color='w') ax1.set_xlabel('Spatial Frequencey (cycles/mm)', color='w') ax2.set_ylabel('Intensity', color='w') ax2.set_xlabel('Position', color='w') ax2.xaxis.set_ticks_position('none') ax2.yaxis.set_ticks_position('none') #ax2.axison = False ax2.set_yticklabels([0,0,1]) # ax1.semilogy(initial_power_spectrum, '--', color='white') # ax1.semilogy(static_power_spectrum, '--', color='white') ax1.semilogy(freqs, sigma_min*np.ones(power_spectrum.size), color='r', linewidth=3) ax1.semilogy(freqs, sigma_max*np.ones(power_spectrum.size), color='r', linewidth=3) ax1.semilogy(freqs, power_spectrum, color='blue', linewidth=3) ax1.set_ylim((10,6000)) # ax1.set_xticklabels([]) #ax1.set_yticklabels([]) #plt.suptitle('iteration '+str(int(interval*i))+',\t$\kappa=$'+str(np.round(condition,3))) plt.text(0.6,4.7,'iteration '+str(int(interval*i))+', $\kappa=$'+str(np.round(condition,3)),fontsize=15, color='w') # Set Axis Colors for ax in [ax1, ax2]: ax.tick_params(axis='both', which='major', labelsize=14, color='w') ax.tick_params(axis='both', which='minor', labelsize=14, color='w') [i.set_color("w") for i in ax.get_xticklabels()] [i.set_color("w") for i in ax.get_yticklabels()] plt.savefig("images/power_spectrum_optimization" + str(i) + ".png") ```
github_jupyter
**Pix-2-Pix Model using TensorFlow and Keras** A port of pix-2-pix model built using TensorFlow's high level `tf.keras` API. Note: GPU is required to make this model train quickly. Otherwise it could take hours. Original : https://www.kaggle.com/vikramtiwari/pix-2-pix-model-using-tensorflow-and-keras/notebook ## Installations ``` requirements = """ tensorflow drawSvg matplotlib numpy scipy pillow #urllib #skimage scikit-image #gzip #pickle """ %store requirements > requirements.txt !pip install -r requirements.txt ``` ## Data Import ``` # !mkdir datasets # URL="https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/facade.tar.gz" # TAR_FILE="./datasets/facade.tar.gz" # TARGET_DIR="./datasets/facade/" # !wget -N URL -O TAR_FILE # !mkdir TARGET_DIR # !tar -zxvf TAR_FILE -C ./datasets/ # !rm TAR_FILE #_URL = 'https://drive.google.com/uc?export=download&id=1dnLTTT19YROjpjwZIZpJ1fxAd91cGBJv' #path_to_zip = tf.keras.utils.get_file('pix2pix.zip', origin=_URL,extract=True) #PATH = os.path.join(os.path.dirname(path_to_zip), 'pix2pix/') ``` ## Imports ``` import os import datetime import imageio import skimage import scipy # # from PIL import Image as Img import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from glob import glob from IPython.display import Image tf.logging.set_verbosity(tf.logging.ERROR) datafolderpath = "/content/drive/My Drive/ToDos/Research/MidcurveNN/code/data/" datasetpath = datafolderpath+ "pix2pix/datasets/pix2pix/" # # datasetpath = "./" # Run this cell to mount your Google Drive. from google.colab import drive drive.mount('/content/drive') !ls $datafolderpath class DataLoader(): def __init__(self, dataset_name, img_res=(256, 256)): self.dataset_name = dataset_name self.img_res = img_res def binarize(self, image): h, w = image.shape for i in range(h): for j in range(w): if image[i][j] < 195: image[i][j] = 0 return image def load_data(self, batch_size=1, is_testing=False): data_type = "train" if not is_testing else "test" path = glob(datafolderpath+'%s/datasets/%s/%s/*' % (self.dataset_name, self.dataset_name, data_type)) #path = glob(PATH + '%s/*' % (data_type)) batch_images = np.random.choice(path, size=batch_size) imgs_A = [] imgs_B = [] for img_path in batch_images: img = self.imread(img_path) img = self.binarize(img) img = np.expand_dims(img, axis=-1) h, w, _ = img.shape _w = int(w/2) img_A, img_B = img[:, :_w, :], img[:, _w:, :] # img_A = scipy.misc.imresize(img_A, self.img_res) # img_A = np.array(Img.fromarray(img_A).resize(self.img_res)) #img_A = np.array(skimage.transform.resize(img_A,self.img_res)) # img_B = scipy.misc.imresize(img_B, self.img_res) # img_B = np.array(Img.fromarray(img_B).resize(self.img_res)) #img_B = np.array(skimage.transform.resize(img_B,self.img_res)) # If training => do random flip if not is_testing and np.random.random() < 0.5: img_A = np.fliplr(img_A) img_B = np.fliplr(img_B) imgs_A.append(img_A) imgs_B.append(img_B) imgs_A = np.array(imgs_A)/127.5 - 1. imgs_B = np.array(imgs_B)/127.5 - 1. return imgs_A, imgs_B def load_batch(self, batch_size=1, is_testing=False): data_type = "train" if not is_testing else "val" path = glob(datafolderpath+'%s/datasets/%s/%s/*' % (self.dataset_name, self.dataset_name, data_type)) #path = glob(PATH + '%s/*' % (data_type)) self.n_batches = int(len(path) / batch_size) for i in range(self.n_batches-1): batch = path[i*batch_size:(i+1)*batch_size] imgs_A, imgs_B = [], [] for img in batch: img = self.imread(img) img = self.binarize(img) img = np.expand_dims(img, axis=-1) h, w, _ = img.shape half_w = int(w/2) img_A = img[:, :half_w, :] img_B = img[:, half_w:, :] # img_A = scipy.misc.imresize(img_A, self.img_res) # img_A = np.array(Img.fromarray(img_A).resize(self.img_res)) #img_A = np.array(skimage.transform.resize(img_A,self.img_res)) # img_B = scipy.misc.imresize(img_B, self.img_res) # img_B = np.array(Img.fromarray(img_B).resize(self.img_res)) #img_B = np.array(skimage.transform.resize(img_B,self.img_res)) if not is_testing and np.random.random() > 0.5: img_A = np.fliplr(img_A) img_B = np.fliplr(img_B) imgs_A.append(img_A) imgs_B.append(img_B) imgs_A = np.array(imgs_A)/127.5 - 1. imgs_B = np.array(imgs_B)/127.5 - 1. yield imgs_A, imgs_B def imread(self, path): return imageio.imread(path).astype(np.float) class Pix2Pix(): def __init__(self): # Input shape self.img_rows = 256 self.img_cols = 256 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) # Configure data loader self.dataset_name = 'pix2pix' self.data_loader = DataLoader(dataset_name=self.dataset_name, img_res=(self.img_rows, self.img_cols)) # Calculate output shape of D (PatchGAN) patch = int(self.img_rows / 2**4) self.disc_patch = (patch, patch, 1) # Number of filters in the first layer of G and D self.gf = int(self.img_rows/4) # 64 self.df = int(self.img_rows/4) # 64 optimizer = tf.keras.optimizers.Adam(0.0002, 0.5) # Build and compile the discriminator self.discriminator = self.build_discriminator() self.discriminator.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) #------------------------- # Construct Computational # Graph of Generator #------------------------- # Build the generator self.generator = self.build_generator() # Input images and their conditioning images img_A = tf.keras.layers.Input(shape=self.img_shape) img_B = tf.keras.layers.Input(shape=self.img_shape) # By conditioning on B generate a fake version of A #fake_A = self.generator(img_B) #By conditioning on A generate a fake version of B fake_B = self.generator(img_A) # For the combined model we will only train the generator self.discriminator.trainable = False # Discriminators determines validity of translated images / condition pairs #valid = self.discriminator([fake_A, img_B]) valid = self.discriminator([img_A, fake_B]) self.combined = tf.keras.models.Model(inputs=[img_A, img_B], outputs=[valid, fake_B]) self.combined.compile(loss=['mse', 'mae'], loss_weights=[1, 100], optimizer=optimizer) def build_generator(self): """U-Net Generator""" def conv2d(layer_input, filters, f_size=4, bn=True): """Layers used during downsampling""" d = tf.keras.layers.Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = tf.keras.layers.LeakyReLU(alpha=0.2)(d) if bn: d = tf.keras.layers.BatchNormalization(momentum=0.8)(d) return d def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0): """Layers used during upsampling""" u = tf.keras.layers.UpSampling2D(size=2)(layer_input) u = tf.keras.layers.Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u) if dropout_rate: u = tf.keras.layers.Dropout(dropout_rate)(u) u = tf.keras.layers.BatchNormalization(momentum=0.8)(u) u = tf.keras.layers.Concatenate()([u, skip_input]) return u # Image input d0 = tf.keras.layers.Input(shape=self.img_shape) # Downsampling d1 = conv2d(d0, self.gf, bn=False) d2 = conv2d(d1, self.gf*2) d3 = conv2d(d2, self.gf*4) d4 = conv2d(d3, self.gf*8) d5 = conv2d(d4, self.gf*8) d6 = conv2d(d5, self.gf*8) d7 = conv2d(d6, self.gf*8) # Upsampling u1 = deconv2d(d7, d6, self.gf*8) u2 = deconv2d(u1, d5, self.gf*8) u3 = deconv2d(u2, d4, self.gf*8) u4 = deconv2d(u3, d3, self.gf*4) u5 = deconv2d(u4, d2, self.gf*2) u6 = deconv2d(u5, d1, self.gf) u7 = tf.keras.layers.UpSampling2D(size=2)(u6) output_img = tf.keras.layers.Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u7) return tf.keras.models.Model(d0, output_img) def build_discriminator(self): def d_layer(layer_input, filters, f_size=4, bn=True): """Discriminator layer""" d = tf.keras.layers.Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = tf.keras.layers.LeakyReLU(alpha=0.2)(d) if bn: d = tf.keras.layers.BatchNormalization(momentum=0.8)(d) return d img_A = tf.keras.layers.Input(shape=self.img_shape) img_B = tf.keras.layers.Input(shape=self.img_shape) # Concatenate image and conditioning image by channels to produce input combined_imgs = tf.keras.layers.Concatenate(axis=-1)([img_A, img_B]) d1 = d_layer(combined_imgs, self.df, bn=False) d2 = d_layer(d1, self.df*2) d3 = d_layer(d2, self.df*4) d4 = d_layer(d3, self.df*8) validity = tf.keras.layers.Conv2D(1, kernel_size=4, strides=1, padding='same')(d4) return tf.keras.models.Model([img_A, img_B], validity) def train(self, epochs, batch_size=1, sample_interval=50): start_time = datetime.datetime.now() # Adversarial loss ground truths valid = np.ones((batch_size,) + self.disc_patch) fake = np.zeros((batch_size,) + self.disc_patch) for epoch in range(epochs): for batch_i, (imgs_A, imgs_B) in enumerate(self.data_loader.load_batch(batch_size)): # --------------------- # Train Discriminator # --------------------- # Condition on B and generate a translated version #fake_A = self.generator.predict(imgs_B) #Condition on A and generate a translated version fake_B = self.generator.predict(imgs_A) # Train the discriminators (original images = real / generated = Fake) d_loss_real = self.discriminator.train_on_batch([imgs_A, imgs_B], valid) d_loss_fake = self.discriminator.train_on_batch([imgs_A, fake_B], fake) d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) # ----------------- # Train Generator # ----------------- # Train the generators g_loss = self.combined.train_on_batch([imgs_A, imgs_B], [valid, imgs_B]) elapsed_time = datetime.datetime.now() - start_time # Plot the progress print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %f] time: %s" % (epoch, epochs, batch_i, self.data_loader.n_batches, d_loss[0], 100*d_loss[1], g_loss[0], elapsed_time)) # If at save interval => save generated image samples if batch_i % sample_interval == 0: self.sample_images(epoch, batch_i) def sample_images(self, epoch, batch_i): os.makedirs(datafolderpath+'images/%s' % self.dataset_name, exist_ok=True) r, c = 3, 3 imgs_A, imgs_B = self.data_loader.load_data(batch_size=3, is_testing=True) fake_B = self.generator.predict(imgs_A) gen_imgs = np.concatenate([imgs_A, fake_B, imgs_B]) # Rescale images 0 - 1 gen_imgs = 0.5 * gen_imgs + 0.5 titles = ['Condition', 'Generated', 'Original'] fig, axs = plt.subplots(r, c, figsize=(15,15)) cnt = 0 for i in range(r): for j in range(c): axs[i,j].imshow(gen_imgs[cnt][:,:,0], cmap='gray') axs[i, j].set_title(titles[i]) axs[i,j].axis('off') cnt += 1 fig.savefig(datafolderpath+"images/%s/%d_%d.png" % (self.dataset_name, epoch, batch_i)) plt.close() gan = Pix2Pix() # gan.train(epochs=200, batch_size=1, sample_interval=200) gan.train(epochs=2, batch_size=1, sample_interval=200) # training logs are hidden in published notebook ``` Let's see how our model performed over time. ``` from PIL import Image as Img Image('/content/drive/My Drive/ToDos/Research/MidcurveNN/code/data/images/pix2pix/0_0.png') Img('/content/drive/My Drive/ToDos/Research/MidcurveNN/code/data/images/pix2pix/0_200.png') ``` This is the result of 2 iterations. You can train the model for more than 2 iterations and it will produce better results. Also, try this model with different datasets. ``` ```
github_jupyter
<a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a> $ \newcommand{\bra}[1]{\langle #1|} $ $ \newcommand{\ket}[1]{|#1\rangle} $ $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ $ \newcommand{\dot}[2]{ #1 \cdot #2} $ $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ $ \newcommand{\mypar}[1]{\left( #1 \right)} $ $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ $ \newcommand{\onehalf}{\frac{1}{2}} $ $ \newcommand{\donehalf}{\dfrac{1}{2}} $ $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ $ \newcommand{\vzero}{\myvector{1\\0}} $ $ \newcommand{\vone}{\myvector{0\\1}} $ $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $ $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $ $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $ $ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $ $ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $ $ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $ $ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $ $ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $ <font style="font-size:28px;" align="left"><b> <font color="blue"> Solutions for </font>Reflections </b></font> <br> _prepared by Abuzer Yakaryilmaz_ <br><br> <a id="task1"></a> <h3> Task 1</h3> Create a quantum ciruit with 5 qubits. Apply h-gate (Hadamard operator) to each qubit. Apply z-gate ($Z$ operator) to randomly picked qubits. (i.e., $ mycircuit.z(qreg[i]) $) Apply h-gate to each qubit. Measure each qubit. Execute your program 1000 times. Compare the outcomes of the qubits affected by z-gates, and the outcomes of the qubits not affected by z-gates. Does z-gate change the outcome? Why? <h3> Solution </h3> ``` # import all necessary objects and methods for quantum circuits from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer # import randrange for random choices from random import randrange number_of_qubit = 5 # define a quantum register with 5 qubits q = QuantumRegister(number_of_qubit) # define a classical register with 5 bits c = ClassicalRegister(number_of_qubit) # define our quantum circuit qc = QuantumCircuit(q,c) # apply h-gate to all qubits for i in range(number_of_qubit): qc.h(q[i]) # apply z-gate to randomly picked qubits for i in range(number_of_qubit): if randrange(2) == 0: # the qubit with index i is picked to apply z-gate qc.z(q[i]) # apply h-gate to all qubits for i in range(number_of_qubit): qc.h(q[i]) qc.barrier() # measure all qubits qc.measure(q,c) # draw the circuit display(qc.draw(output='mpl')) # execute the circuit 1000 times in the local simulator job = execute(qc,Aer.get_backend('qasm_simulator'),shots=1000) counts = job.result().get_counts(qc) print(counts) ``` <a id="task2"></a> <h3> Task 2 </h3> Randomly create a quantum state and multiply it with Hadamard matrix to find its reflection. Draw both states. Repeat the task for a few times. <h3>Solution</h3> A function for randomly creating a 2-dimensional quantum state: ``` # randomly create a 2-dimensional quantum state from math import cos, sin, pi from random import randrange def random_qstate_by_angle(): angle_degree = randrange(360) angle_radian = 2*pi*angle_degree/360 return [cos(angle_radian),sin(angle_radian)] %run quantum.py draw_qubit() # line of reflection for Hadamard from matplotlib.pyplot import arrow arrow(-1.109,-0.459,2.218,0.918,linestyle='dotted',color='red') [x1,y1] = random_qstate_by_angle() print(x1,y1) sqrttwo=2**0.5 oversqrttwo = 1/sqrttwo [x2,y2] = [ oversqrttwo*x1 + oversqrttwo*y1 , oversqrttwo*x1 - oversqrttwo*y1 ] print(x2,y2) draw_quantum_state(x1,y1,"main") draw_quantum_state(x2,y2,"ref") show_plt() ``` <a id="task3"></a> <h3> Task 3 </h3> Find the matrix representing the reflection over the line $y=x$. <i>Hint: Think about the reflections of the points $ \myrvector{0 \\ 1} $, $ \myrvector{-1 \\ 0} $, and $ \myrvector{-\sqrttwo \\ \sqrttwo} $ over the line $y=x$.</i> Randomly create a quantum state and multiply it with this matrix to find its reflection over the line $y = x$. Draw both states. Repeat the task for a few times. <h3>Solution</h3> The reflection over the line $y=x$ swaps the first and second amplitudes. This is the operetor NOT: $ X = \mymatrix{rr}{0 & 1 \\ 1 & 0} $. A function for randomly creating a 2-dimensional quantum state: ``` # randomly create a 2-dimensional quantum state from math import cos, sin, pi from random import randrange def random_qstate_by_angle(): angle_degree = randrange(360) angle_radian = 2*pi*angle_degree/360 return [cos(angle_radian),sin(angle_radian)] ``` Reflecting the randomly picked quantum state over the line $y=x$. ``` %run quantum.py draw_qubit() # the line y=x from matplotlib.pyplot import arrow arrow(-1,-1,2,2,linestyle='dotted',color='red') [x1,y1] = random_qstate_by_angle() [x2,y2] = [y1,x1] draw_quantum_state(x1,y1,"main") draw_quantum_state(x2,y2,"ref") show_plt() ```
github_jupyter
<a href="https://colab.research.google.com/github/lucianaribeiro/filmood/blob/master/SentimentDetectionRNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` # Installing Tensorflow ! pip install --upgrade tensorflow # Installing Keras ! pip install --upgrade keras # Install other packages ! pip install --upgrade pip nltk numpy # Importing the libraries from keras.datasets import imdb from keras.preprocessing import sequence from keras import Sequential from keras.layers import Embedding, LSTM, Dense, Dropout from numpy import array # Disable tensor flow warnings for better view from tensorflow.python.util import deprecation deprecation._PRINT_DEPRECATION_WARNINGS = False # Loading dataset from IMDB vocabulary_size = 10000 (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words = vocabulary_size) # Inspect a sample review and its label print('Review') print(X_train[6]) print('Label') print(y_train[6]) # Review back to the original words word2id = imdb.get_word_index() id2word = {i: word for word, i in word2id.items()} print('Review with words') print([id2word.get(i, ' ') for i in X_train[6]]) print('Label') print(y_train[6]) # Ensure that all sequences in a list have the same length X_train = sequence.pad_sequences(X_train, maxlen=500) X_test = sequence.pad_sequences(X_test, maxlen=500) # Initialising the RNN regressor=Sequential() # Adding a first Embedding layer and some Dropout regularization regressor.add(Embedding(vocabulary_size, 32, input_length=500)) regressor.add(Dropout(0.2)) # Adding a second LSTM layer and some Dropout regularization regressor.add(LSTM(units = 50, return_sequences = True)) regressor.add(Dropout(0.2)) # Adding a third LSTM layer and some Dropout regularization regressor.add(LSTM(units = 50, return_sequences = True)) regressor.add(Dropout(0.2)) # Adding a fourth LSTM layer and some Dropout regularization regressor.add(LSTM(units = 50)) regressor.add(Dropout(0.2)) # Adding the output layer regressor.add(Dense(1, activation='sigmoid')) # Compiling the RNN regressor.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) X_valid, y_valid = X_train[:64], y_train[:64] X_train2, y_train2 = X_train[64:], y_train[64:] regressor.fit(X_train2, y_train2, validation_data=(X_valid, y_valid), batch_size=64, epochs=25) ! pip install --upgrade nltk import nltk nltk.download('punkt') from nltk import word_tokenize # A value close to 0 means the sentiment was negative and a value close to 1 means its a positive review word2id = imdb.get_word_index() test=[] for word in word_tokenize("this is simply one of the best films ever made"): test.append(word2id[word]) test=sequence.pad_sequences([test],maxlen=500) regressor.predict(test) # A value close to 0 means the sentiment was negative and a value close to 1 means its a positive review word2id = imdb.get_word_index() test=[] for word in word_tokenize( "the script is a real insult to the intelligence of those watching"): test.append(word2id[word]) test=sequence.pad_sequences([test],maxlen=500) regressor.predict(test) ```
github_jupyter
``` !pip install torch torchtext !git clone https://github.com/neubig/nn4nlp-code.git from collections import defaultdict import math import time import random import numpy as np import torch import torch.nn as nn import torch.nn.functional as F N=2 #length of window on each side (so N=2 gives a total window size of 5, as in t-2 t-1 t t+1 t+2) EMB_SIZE = 128 # The size of the embedding embeddings_location = "embeddings.txt" #the file to write the word embeddings to labels_location = "labels.txt" #the file to write the labels to # We reuse the data reading from the language modeling class w2i = defaultdict(lambda: len(w2i)) S = w2i["<s>"] UNK = w2i["<unk>"] def read_dataset(filename): with open(filename, "r") as f: for line in f: yield [w2i[x] for x in line.strip().split(" ")] # Read in the data train = list(read_dataset("nn4nlp-code/data/ptb/train.txt")) w2i = defaultdict(lambda: UNK, w2i) dev = list(read_dataset("nn4nlp-code/data/ptb/valid.txt")) i2w = {v: k for k, v in w2i.items()} nwords = len(w2i) with open(labels_location, 'w') as labels_file: for i in range(nwords): labels_file.write(i2w[i] + '\n') class CBOW(nn.Module): def __init__(self, vocab_size, embed_dim): super(CBOW, self).__init__() self.embeddings_bag = nn.EmbeddingBag(vocab_size, embed_dim, mode='sum') self.fcl = nn.Linear(embed_dim, vocab_size, bias=False) def forward(self, x): x = self.embeddings_bag(x.view(1, -1)) return self.fcl(x) model = CBOW(nwords, EMB_SIZE) loss_fn = nn.CrossEntropyLoss() opt = torch.optim.SGD(model.parameters(), lr=0.1) # Calculate the loss value for the entire sentence def calc_sent_loss(sent): #add padding to the sentence equal to the size of the window #as we need to predict the eos as well, the future window at that point is N past it padded_sent = [S] * N + sent + [S] * N # Step through the sentence all_losses = [] for i in range(N,len(sent)+N): model.zero_grad() logits = model(torch.LongTensor(padded_sent[i-N:i] + padded_sent[i+1:i+N+1])) loss = F.cross_entropy(logits, torch.tensor(padded_sent[i]).view(1)) loss.backward() opt.step() all_losses.append(loss.cpu().detach().numpy()) return sum(all_losses) MAX_LEN = 100 for ITER in range(100): print("started iter %r" % ITER) # Perform training random.shuffle(train) train_words, train_loss = 0, 0.0 start = time.time() for sent_id, sent in enumerate(train): my_loss = calc_sent_loss(sent) train_loss += my_loss train_words += len(sent) # my_loss.backward() # trainer.update() if (sent_id+1) % 5000 == 0: print("--finished %r sentences" % (sent_id+1)) print("iter %r: train loss/word=%.4f, ppl=%.4f, time=%.2fs" % (ITER, train_loss/train_words, math.exp(train_loss/train_words), time.time()-start)) # Evaluate on dev set dev_words, dev_loss = 0, 0.0 start = time.time() for sent_id, sent in enumerate(dev): my_loss = calc_sent_loss(sent) dev_loss += my_loss dev_words += len(sent) # trainer.update() print("iter %r: dev loss/word=%.4f, ppl=%.4f, time=%.2fs" % (ITER, dev_loss/dev_words, math.exp(dev_loss/dev_words), time.time()-start)) print("saving embedding files") with open(embeddings_location, 'w') as embeddings_file: W_w_np = W_w_p.as_array() for i in range(nwords): ith_embedding = '\t'.join(map(str, W_w_np[i])) embeddings_file.write(ith_embedding + '\n') ```
github_jupyter
``` import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import roc_curve, roc_auc_score, precision_score, recall_score, f1_score, accuracy_score, confusion_matrix import glob import cv2 import random import tensorflow as tf #print versions print('tensorflow version',tf.__version__) labels = ['PNEUMONIA', 'NORMAL'] img_size = 180 def get_training_data(data_dir): data = [] for label in labels: path = os.path.join(data_dir, label) class_num = labels.index(label) for img in os.listdir(path): try: img_arr = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE) resized_arr = cv2.resize(img_arr, (img_size, img_size)) # Reshaping images to preferred size data.append([resized_arr, class_num]) except Exception as e: print(e) return np.array(data) train = get_training_data('C:/Users/Administrateur/OneDrive/Formation DATASCIENCE/Projet DATA SCIENCE/chest_xray/train') test = get_training_data('C:/Users/Administrateur/OneDrive/Formation DATASCIENCE/Projet DATA SCIENCE/chest_xray/test') val = get_training_data('C:/Users/Administrateur/OneDrive/Formation DATASCIENCE/Projet DATA SCIENCE/chest_xray/val') train_path = 'C:/Users/Administrateur/OneDrive/Formation DATASCIENCE/Projet DATA SCIENCE/chest_xray/train' test_path = 'C:/Users/Administrateur/OneDrive/Formation DATASCIENCE/Projet DATA SCIENCE/chest_xray/test' val_path = 'C:/Users/Administrateur/OneDrive/Formation DATASCIENCE/Projet DATA SCIENCE/chest_xray/val' train_files_original = glob.glob(train_path+'/*/*') val_files_original = glob.glob(val_path+'/*/*') test_files = glob.glob(test_path+'/*/*') print('number of train samples across classes:', len(train_files_original)) print('number of val samples across classes:', len(val_files_original)) print('number of test samples across classes:', len(test_files)) files = np.unique(train_files_original + val_files_original) train_files, val_files = train_test_split(files, test_size=0.3, shuffle=True) print('number of train samples:', len(train_files)) print('number of val samples:', len(val_files)) count_normal = len([x for x in train_files if 'NORMAL' in x]) count_pneumonia = len([x for x in train_files if 'PNEUMONIA' in x]) print('Count of NORMAL images in train:', count_normal) print('Count of PNEUMONIA images in train:', count_pneumonia) IMG_SIZE = 180 x_train = [] y_train = [] x_val = [] y_val = [] x_test = [] y_test = [] for feature, label in train: x_train.append(feature) y_train.append(label) for feature, label in val: x_val.append(feature) y_val.append(label) for feature, label in test: x_test.append(feature) y_test.append(label) x_train = np.array(x_train) / 255 x_val = np.array(x_val) / 255 x_test = np.array(x_test) / 255 x_train = x_train.reshape(-1, IMG_SIZE, IMG_SIZE, 1) y_train = np.array(y_train) x_val = x_val.reshape(-1, IMG_SIZE, IMG_SIZE, 1) y_val = np.array(y_val) x_test = x_test.reshape(-1, IMG_SIZE, IMG_SIZE, 1) y_test = np.array(y_test) plt.imshow(x_train[0].reshape(180,180), cmap='gray') print('label = ', y_train[0]) print(len(x_train)) print(len(x_val)) print(len(x_test)) fig, ax = plt.subplots(3, 3, figsize=(10, 7)) ax = ax.ravel() plt.tight_layout() for i in range(3): random_index = random.randint(0, min(len(x_train), len(x_val), len(x_test))) ax[i].imshow(x_train[random_index].reshape(180,180), cmap='gray') ax[i].set_title('Set: train, label (Pneumonia =) {}'.format(y_train[random_index])) ax[i+3].imshow(x_val[random_index].reshape(180,180), cmap='gray') ax[i+3].set_title('Set: val, label (Pneumonia =) {}'.format(y_val[random_index])) ax[i+6].imshow(x_test[random_index].reshape(180,180), cmap='gray') ax[i+6].set_title('Set: test, label (Pneumonia =) {}'.format(y_test[random_index])) def conv_block(filters): block = tf.keras.Sequential([ tf.keras.layers.SeparableConv2D(filters, (3,3), activation='relu', padding='same'), tf.keras.layers.SeparableConv2D(filters, (3,3), activation='relu', padding='same'), tf.keras.layers.BatchNormalization(), tf.keras.layers.MaxPool2D(), ]) return block def dense_block(units, dropout_rate): block = tf.keras.Sequential([ tf.keras.layers.Dense(units, activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dropout(dropout_rate), ]) return block def build_model(): model = tf.keras.Sequential([ tf.keras.Input(shape=(IMG_SIZE, IMG_SIZE, 1)), tf.keras.layers.Conv2D(16, (3,3), activation='relu', padding='same'), tf.keras.layers.Conv2D(16, (3,3), activation='relu', padding='same'), tf.keras.layers.MaxPool2D(), conv_block(32), conv_block(64), conv_block(128), tf.keras.layers.Dropout(0.2), conv_block(256), tf.keras.layers.Dropout(0.2), tf.keras.layers.Flatten(), dense_block(256, 0.7), dense_block(128, 0.5), dense_block(64, 0.3), tf.keras.layers.Dense(1, activation='sigmoid') ]) return model weight_for_normal = len(x_train) / (2 * count_normal) weight_for_pneumonia = len(x_train) / (2 * count_pneumonia) class_weight = {0:weight_for_normal, 1:weight_for_pneumonia} print('weight for class 0 (normal): {:.3f}'.format(weight_for_normal)) print('weight for class 1 (pneumonia): {:.3f}'.format(weight_for_pneumonia)) model_vanilla = build_model() metrics = [ 'accuracy', tf.keras.metrics.Precision(name='precision'), tf.keras.metrics.Recall(name='recall'), ] model_vanilla.compile(optimizer='adam', loss='binary_crossentropy', metrics=metrics) epochs = 10 batch_size = 100 history_vanilla = model_vanilla.fit( x=x_train, y=y_train, epochs=epochs, batch_size=batch_size, validation_data=(x_val,y_val), class_weight=class_weight ) epochs_array = [i for i in range(epochs)] fig, ax = plt.subplots(1,3) train_precision = history_vanilla.history['precision'] train_recall = history_vanilla.history['recall'] train_loss = history_vanilla.history['loss'] val_precision = history_vanilla.history['val_precision'] val_recall = history_vanilla.history['val_recall'] val_loss = history_vanilla.history['val_loss'] fig.set_size_inches(20,5) ax[0].plot(epochs_array, train_loss, 'g-o', label='Training Loss') ax[0].plot(epochs_array, val_loss, 'r-o', label='Validation Loss') ax[0].set_title('Training & Validation Loss') ax[0].legend() ax[0].set_xlabel('Epochs') ax[0].set_ylabel('Loss') ax[0].grid(True) ax[1].plot(epochs_array, train_precision, 'go-', label='Training Precision') ax[1].plot(epochs_array, val_precision, 'ro-', label='Validation Precision') ax[1].set_title('Training & Validation Precision') ax[1].legend() ax[1].set_xlabel('Epochs') ax[1].set_ylabel('Precision') ax[1].grid(True) ax[2].plot(epochs_array, train_recall, 'go-', label='Training Recall') ax[2].plot(epochs_array, val_recall, 'ro-', label='Validation Recall') ax[2].set_title('Training & Validation Recall') ax[2].legend() ax[2].set_xlabel('Epochs') ax[2].set_ylabel('Recall') ax[2].grid(True) plt.show() predictions = model_vanilla.predict(x=x_test) y_pred = np.round(predictions).reshape(1,-1)[0] def print_results(y_test, y_pred): print('Accuracy : {:.5f}'.format(accuracy_score(y_pred , y_test))) print('AUC : {:.5f}'.format(roc_auc_score(y_test , y_pred))) print('Precision : {:.5f}'.format(precision_score(y_test , y_pred))) print('Recall : {:.5f}'.format(recall_score(y_test , y_pred))) print('F1 : {:.5f}'.format(f1_score(y_test , y_pred))) print('Confusion Matrix : \n', confusion_matrix(y_test, y_pred)) print_results(y_test, y_pred) # compile fine tuned model model_ft = build_model() metrics = [ 'accuracy', tf.keras.metrics.Precision(name='precision'), tf.keras.metrics.Recall(name='recall'), ] model_ft.compile(optimizer='adam', loss='binary_crossentropy', metrics=metrics) checkpoint_cb = tf.keras.callbacks.ModelCheckpoint('xray_model.h5', save_best_only=True) early_stopping_cb = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=15, mode='min', verbose=1, restore_best_weights=True) def exponential_decay(lr0, s): def exponential_decay_fn(epoch): return lr0 * 0.1 **(epoch / s) return exponential_decay_fn exponential_decay_fn = exponential_decay(0.01, 20) lr_scheduler = tf.keras.callbacks.LearningRateScheduler(exponential_decay_fn) epochs = 10 batch_size = 64 history_ft = model_ft.fit( x=x_train, y=y_train, epochs=epochs, batch_size=batch_size, validation_data=(x_val,y_val), class_weight=class_weight, callbacks = [checkpoint_cb, early_stopping_cb, lr_scheduler] ) epochs_array = [i for i in range(len(history_ft.history['accuracy']))] fig, ax = plt.subplots(1,3) train_precision = history_ft.history['precision'] train_recall = history_ft.history['recall'] train_loss = history_ft.history['loss'] val_precision = history_ft.history['val_precision'] val_recall = history_ft.history['val_recall'] val_loss = history_ft.history['val_loss'] fig.set_size_inches(20,5) ax[0].plot(epochs_array, train_loss, 'g-o', label='Training Loss') ax[0].plot(epochs_array, val_loss, 'r-o', label='Validation Loss') ax[0].set_title('Training & Validation Loss') ax[0].legend() ax[0].set_xlabel('Epochs') ax[0].set_ylabel('Loss') ax[0].grid(True) ax[1].plot(epochs_array, train_precision, 'go-', label='Training Precision') ax[1].plot(epochs_array, val_precision, 'ro-', label='Validation Precision') ax[1].set_title('Training & Validation Precision') ax[1].legend() ax[1].set_xlabel('Epochs') ax[1].set_ylabel('Precision') ax[1].grid(True) ax[2].plot(epochs_array, train_recall, 'go-', label='Training Recall') ax[2].plot(epochs_array, val_recall, 'ro-', label='Validation Recall') ax[2].set_title('Training & Validation Recall') ax[2].legend() ax[2].set_xlabel('Epochs') ax[2].set_ylabel('Recall') ax[2].grid(True) plt.show() predictions = model_ft.predict(x=x_test) y_pred = np.round(predictions).reshape(1,-1)[0] print_results(y_test, y_pred) incorrect = np.nonzero(y_test != y_pred)[0] fig, ax = plt.subplots(3, 2, figsize=(15,15)) ax = ax.ravel() plt.subplots_adjust(wspace=0.25, hspace=0.75) plt.tight_layout() i = 0 for c in incorrect[:6]: ax[i].set_xticks([]) ax[i].set_yticks([]) ax[i].imshow(x_test[c].reshape(IMG_SIZE,IMG_SIZE), cmap='gray', interpolation='none') ax[i].set_title('Predicted Class: {}, Actual Class: {}'.format(y_pred[c], y_test[c])) i += 1 ```
github_jupyter
# Supplemental Information: > **"Clonal heterogeneity influences the fate of new adaptive mutations"** > Ignacio Vázquez-García, Francisco Salinas, Jing Li, Andrej Fischer, Benjamin Barré, Johan Hallin, Anders Bergström, Elisa Alonso-Pérez, Jonas Warringer, Ville Mustonen, Gianni Liti ## Figure 3 (+ Supp. Figs.) This IPython notebook is provided for reproduction of Figures 2, S3, S4 and S7 of the paper. It can be viewed by copying its URL to nbviewer and it can be run by opening it in binder. ``` # Load external dependencies from setup import * # Load internal dependencies import config,gmm,plot,utils %load_ext autoreload %autoreload 2 %matplotlib inline ids = pd.read_csv(dir_data+'seq/sample_ids_merged_dup.csv') ids.loc[ids.clone.isnull(),'type'] = 'population' ids.loc[(ids.clone.notnull()) & (ids.time==0),'type'] = 'ancestral clone' ids.loc[(ids.clone.notnull()) & (ids.time==32),'type'] = 'evolved clone' for seq_type, seq_id in ids.groupby('type'): print('{0} sequencing coverage\nBottom quartile: {1:.2f}x, Top quartile: {2:.2f}x, Min: {3:.2f}x, Max: {4:.2f}x, Median: {5:.2f}x\n'\ .format(seq_type.capitalize(), seq_id['coverage'].quantile(.25), \ seq_id['coverage'].quantile(.75), \ seq_id['coverage'].min(), \ seq_id['coverage'].max(), \ seq_id['coverage'].median())) ``` ## Data import Top panels - Import subclonal frequency ``` # Load data seq_st_df = pd.read_csv(dir_data+'seq/subclonality/seq_subclonality.csv', encoding='utf-8') # Compute cumulative haplotype frequencies for major subclones seq_st_df['clonal'] = seq_st_df.apply( lambda x: x[['subclone A','subclone B','subclone C','subclone D']].fillna(0).sum(), axis=1 ) # Calculate the remaining bulk fraction seq_st_df['bulk'] = 1.0 - seq_st_df['clonal'] seq_st_df.head() ``` Middle panels - Import mutation counts ``` # Load data seq_dn_df = pd.read_csv(dir_data+'seq/de-novo/seq_de_novo_snv_indel.csv', encoding='utf-8', keep_default_na=False) print(seq_dn_df.shape) seq_dn_df.head() ``` The tally of SNVs and indels across whole-population genome sequences is: ``` seq_dn_df[(seq_dn_df.clone!='')].groupby(['selection','population','time','variant_type']).size() seq_dn_df[(seq_dn_df.time==0) & (seq_dn_df.clone!='') & (seq_dn_df.ploidy=='haploid')].groupby(['selection','mutation_type','variant_type']).size() seq_dn_df[(seq_dn_df.time==32) & (seq_dn_df.clone!='')].groupby(['selection','mutation_type','variant_type']).size() ``` Bottom panels - Import phenotype evolution ``` # Load data pheno_df = pd.read_csv(dir_data+'pheno/populations/pheno_populations.csv.gz', encoding='utf-8', keep_default_na=False, na_values='NaN') # Filter out strains used for spatial control pheno_df = pheno_df[(pheno_df.group == 'ancestral')|\ (pheno_df.group == 'evolved')] groups_ph = pheno_df.groupby(['group','cross','cross_rep','selection','selection_rep']) pheno_df = pheno_df[pheno_df.selection_rep != ''] for (ii,((group,cross,cross_rep,selection,selection_rep),g1)) in enumerate(groups_ph): if group=='evolved': df = groups_ph.get_group(('ancestral',cross,cross_rep,selection,'')) df.loc[:,'selection_rep'] = df.selection_rep.replace([''],[selection_rep]) df.loc[:,'population'] = df['background']+'_'+df['cross']+'_'+df['cross_rep'].apply(str)+'_'+df['selection']+'_'+df['selection_rep'].apply(str) pheno_df = pheno_df.append(df) pheno_df = pheno_df.reset_index(drop=True) # Set reference as mean phenotype of the ancestral hybrid def normalize_phenotype(df, param_abs='norm_growth_rate', param_rel='rel_growth_rate'): df[param_rel] = df[param_abs] - df[df.group=='ancestral'][param_abs].mean() return df pheno_df = pheno_df.groupby(['selection','environment','population'], as_index=False).apply(normalize_phenotype, param_abs='norm_growth_rate', param_rel='rel_growth_rate') pheno_df = pheno_df.groupby(['selection','environment','population'], as_index=False).apply(normalize_phenotype, param_abs='norm_doubling_time', param_rel='rel_doubling_time') # # Filter out measurement replicates with >5% measurement error # pheno_df['pct'] = pheno_df.groupby(['selection','environment','population','group','isolate','gene','genotype_long'])['rel_growth_rate']\ # .apply(lambda x: (x-x.mean())/float(x.mean())) # pheno_df = pheno_df[abs(pheno_df['pct'])<10] pheno_df.head() # show dataframe header to stdout ``` ## Figure 3 - Subclonal heterogeneity ``` param = 'rel_growth_rate' panels = { 'HU': { 'WAxNA_F12_1_HU_2':0, 'WAxNA_F12_1_HU_3':1, 'WAxNA_F12_2_HU_3':2 }, 'RM': { 'WAxNA_F12_1_RM_3':0, 'WAxNA_F12_1_RM_4':1, 'WAxNA_F12_2_RM_2':2 } } populations = panels['HU'].keys()+panels['RM'].keys() groups_st = seq_st_df[seq_st_df.population.isin(populations)] groups_dn = seq_dn_df[(seq_dn_df.population.isin(populations))& \ (seq_dn_df.clone=='')& \ (seq_dn_df.gene!='non-coding')] groups_ph = pheno_df[pheno_df.population.isin(populations)& \ np.isfinite(pheno_df[param])] # Take rows where param is finite groups_st = groups_st.groupby('selection') groups_dn = groups_dn.groupby('selection') groups_ph = groups_ph.groupby(['selection','environment']) for (ii, environment) in enumerate(['HU','RM']): fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(6, 4), sharey='row') fig.subplots_adjust(left=0.07,bottom=0.07,right=0.85,top=0.95,hspace=0.3,wspace=0.1) # Set scales for ax in axes[0]: ax.set_xlim(0, 32) ax.set_ylim(0, 1) for ax in axes[1]: if environment=='HU': ax.set_xlim(-0.3, 0.5) ax.set_ylim(0, 0.15) elif environment=='RM': ax.set_xlim(-0.5, 1.9) ax.set_ylim(0, 0.12) ### Top panels ### # De novo mutations # for (jj, (population, gdn)) in enumerate(groups_dn.get_group(environment).groupby('population')): # Retrieve axes ax1 = axes[0][panels[environment][population]] for (gene, cds_pos, sub, protein_pos, amino_acids, consequence), gdx in \ gdn.groupby(['gene','cds_position','substitution','protein_position','amino_acids','consequence_short']): assignment = gdx.assignment.unique()[0] mutation_type = gdx.mutation_type.unique()[0] gdx.time = gdx.time.apply(int) gdx = gdx.sort_values('time').reset_index(drop=True) gdx = gdx.sort_index() ax1.plot( gdx.index.values, gdx.frequency.values, color=config.lineages[assignment]['fill'], **utils.merge_two_dicts(config.mutation_type[mutation_type], config.consequence_short[consequence]) ) if mutation_type=='driver': index = np.argmax(gdx.frequency) ax1.annotate(gene, xy=(index,gdx.frequency[index]), style='italic', fontsize=6, textcoords='offset points', xytext=(0, 13), ha = 'center', va = 'top', path_effects=[path_effects.withStroke(linewidth=0.5, foreground="w")], zorder=3) ax1.annotate(amino_acids.split('/')[0]+protein_pos+amino_acids.split('/')[1], xy=(index,gdx.frequency[index]), fontsize=5, textcoords='offset points', xytext=(0, 7), ha = 'center', va = 'top', path_effects=[path_effects.withStroke(linewidth=0.4, foreground="w")], zorder=3) # Subclonal frequency # for (jj, (population,gst)) in enumerate(groups_st.get_group(environment).groupby('population')): # Retrieve axes ax2 = axes[0][panels[environment][population]] # Set title ax2.set_title(population.replace('_',' '), fontsize=7, weight='bold') # gst.set_index('time', inplace=True) colors=[config.lineages[x]['fill'] for x in ['subclone A','subclone B','bulk']] gst[['subclone A','subclone B','bulk']].plot( ax=ax2, kind='bar', legend=False, stacked=True, rot=0, width=0.75, position=0.5, color=colors ) # Rotate the x-axis ticks ax2.set_xlabel('', rotation=0) ### Bottom panels ### for (jj, (population, gph)) in enumerate(groups_ph.get_group((environment,environment)).groupby('population')): # Retrieve axes ax3 = axes[1][panels[environment][population]] utils.simple_axes(ax3) for (kk, (time, gt)) in enumerate(gph.groupby('group')): print(environment, population, time) x, y = plot.histogram_binned_data(ax, gt[param], bins=34) ax3.plot(x, y, color=config.population['color'][time], linewidth=0.75) ax3.fill_between(x, 0, y, label=config.population['long_label'][time], alpha=0.45, facecolor=config.population['color'][time]) # Mean of all isolates gt_all = gt.groupby(['isolate','gene','genotype_long','assignment']) gt_all = gt_all[param].agg(np.mean)#.mean() # Mean of random isolates gt_random = gt[(gt['assignment']=='')].groupby(['isolate','gene','genotype_long','assignment']) gt_random = gt_random[param].agg(np.mean)#.mean() # Mean of targeted isolates gt_target = gt[(gt['assignment']!='')].groupby(['isolate','gene','genotype_long','assignment']) gt_target = gt_target[param].agg(np.mean)#.mean() # Gaussian mixture model X = gt_random[:, np.newaxis] N = np.arange(1, 4) models = gmm.gmm_fit(X, N) # Compute the AIC and the BIC AIC = [m.aic(X) for m in models] BIC = [m.bic(X) for m in models] M_best = models[np.argmin(BIC)] print BIC # Mean of the distribution for m, v in zip(abs(M_best.means_.ravel()), M_best.covariances_.ravel()): print('Mean: %.6f, Variance: %.6f' % (m, v,)) ax3.plot([m,m], ax3.get_ylim(), color=config.population['color'][time], linestyle='--', dashes=(4,3), linewidth=1) pos = ax3.get_ylim()[0] * 0.75 + ax3.get_ylim()[1] * 0.25 trans = ax3.get_xaxis_transform() # x in data units, y in axes fraction ax3.annotate( np.around(m, 2), xy=(m, 0.85), xycoords=trans, fontsize=6, color='k', va='center', ha=('right' if time=='ancestral' else 'left'), xytext=((-5 if time=='ancestral' else 5),0), textcoords='offset points', path_effects=[path_effects.withStroke(linewidth=0.5, foreground="w")] ) x_data = np.array(gt_all) y_data = np.repeat([0.03*(ax3.get_ylim()[1]-ax3.get_ylim()[0])], len(x_data)) markerline, stemlines, baseline = ax3.stem(x_data, y_data) plt.setp(markerline, 'markerfacecolor', config.population['color'][time], markersize = 0) plt.setp(stemlines, linewidth=1, color=config.population['color'][time], path_effects=[path_effects.withStroke(linewidth=0.75, foreground="w")]) plt.setp(baseline, 'color', 'none') if len(gt_target)>0: x_data = np.array(gt_target) y_data = np.repeat([0.2*(ax3.get_ylim()[1]-ax3.get_ylim()[0])], len(x_data)) markerline, stemlines, baseline = ax3.stem(x_data, y_data) plt.setp(markerline, 'color', config.population['color'][time], markersize = 2.75, markeredgewidth=.75, markeredgecolor='k', zorder=3) plt.setp(stemlines, linewidth=.75, color=config.population['color'][time], path_effects=[path_effects.withStroke(linewidth=1.25, foreground='k')], zorder=2) plt.setp(baseline, 'color', 'none', zorder=1) for (isolate, gene, genotype, assignment), mean in gt_target.iteritems(): ax3.annotate( gene, xy = (mean, 0.2), xycoords=('data','axes fraction'), xytext = (0, 8), textcoords = 'offset points', ha = 'center', va = 'top', fontsize = 6, style = 'italic', path_effects=[path_effects.withStroke(linewidth=0.5, foreground="w")] ) # Set axes labels axes[0, 1].set_xlabel(r'Time, $t$ (days)') axes[0, 0].set_ylabel('Cumulative subclone\n frequency, $f_j$ (bars)') axes[0, 2].twinx().set_ylabel('Allele frequency (lines)', rotation=270, va='baseline') axes[1, 1].set_xlabel(r'Rel. growth rate, $\lambda_{k}(t)$') axes[1, 0].set_ylabel('Density') # Set legends leg1 = axes[0, 2].legend(bbox_to_anchor=(1.3, 0.75), frameon=False, loc='center left', borderaxespad=0., handlelength=0.75, title='Lineage', prop={'size':6}) driver_artist = lines.Line2D((0,1),(0,0), color=config.lineages['bulk']['fill'], **config.mutation_type['driver']) passenger_artist = lines.Line2D((0,1),(0,0), color=config.lineages['bulk']['fill'], **config.mutation_type['passenger']) nonsyn_artist = lines.Line2D((0,1),(0,0), mfc=config.lineages['bulk']['fill'], linestyle='', linewidth=1.5, path_effects=[path_effects.withStroke(linewidth=2, foreground="k")], **config.consequence_short['non-synonymous']) syn_artist = lines.Line2D((0,1),(0,0), mfc=config.lineages['bulk']['fill'], linestyle='', linewidth=1.5, path_effects=[path_effects.withStroke(linewidth=2, foreground="k")], **config.consequence_short['synonymous']) leg2 = axes[0, 2].legend([driver_artist,passenger_artist,nonsyn_artist,syn_artist], ['driver','passenger','non-synonymous','synonymous'], bbox_to_anchor=(1.3, 0.25), ncol=1, frameon=False, loc='lower left', borderaxespad=0, handlelength=1.75, title='Mutation', prop={'size':6}) axes[0, 2].add_artist(leg1) axes[0, 2].get_legend().get_title().set_fontsize('7') leg3 = axes[1, 2].legend(bbox_to_anchor=(1.3, 0.5), frameon=False, loc='center left', borderaxespad=0., framealpha=1, handlelength=0.75, title='Time', prop={'size':6}) axes[1, 2].get_legend().get_title().set_fontsize('7') for leg in [leg1,leg2]: plt.setp(leg.get_title(), fontsize=7) # Set panel labels axes[0,0].text(-0.24, 1.1, chr(2*ii + ord('A')), transform=axes[0,0].transAxes, fontsize=9, fontweight='bold', va='top', ha='right') axes[0,1].text(0.5, 1.2, 'Selection: %s' % config.selection['long_label'][environment], transform=axes[0,1].transAxes, fontsize=8, va='center', ha='center') axes[1,0].text(-0.24, 1.1, chr(2*ii + ord('B')), transform=axes[1,0].transAxes, fontsize=9, fontweight='bold', va='top', ha='right') # Axes limits for ax in fig.get_axes(): ax.xaxis.label.set_size(6) ax.yaxis.label.set_size(6) ax.tick_params(axis='both', which='major', size=2, labelsize=6) ax.tick_params(axis='both', which='minor', size=0, labelsize=0) plt.setp(ax.get_xticklabels(), fontsize=6) plt.setp(ax.get_yticklabels(), fontsize=6) for loc in ['top','bottom','left','right']: ax.spines[loc].set_linewidth(0.75) if ax.is_last_row(): if environment=='HU': ax.xaxis.set_major_locator(ticker.MaxNLocator(nbins=5)) ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins=5)) elif environment=='RM': ax.xaxis.set_major_locator(ticker.MaxNLocator(nbins=5)) ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins=4)) plot.save_figure(dir_paper+'figures/figure3/figure3_%s' % environment) plt.show() ``` **Fig. 3:** Reconstruction of subclonal dynamics. (**A** and **C**), Competing subclones evolved in (*A*) hydroxyurea and (*C*) rapamycin experienced a variety of fates. Time is on the $x$-axis, starting after crossing when the population has no competing subclones. Cumulative haplotype frequency of subclones (bars) and allele frequency of *de novo* mutants (lines) are on the $y$-axis. Most commonly, selective sweeps were observed where a spontaneous mutation arose and increased in frequency. Driver mutations are solid lines and passenger mutations are dashed lines, colored by subclone assignment; circles and squares denote non-synonymous and synonymous mutations, respectively. (**B** and **D**) Variability in intra-population growth rate , estimated by random sampling of 96 individuals at initial ($t = 0$ days, blue) and final ($t = 32$ days, red) time points ($n = 32$ technical replicates per individual). Mean growth rates by individual are shown at the foot of the histogram (Fig. S7). The posterior means of the distribution modes fitted by a Gaussian mixture model are indicated as dashed lines. The fitter individuals (pins) carry driver mutations, measured by targeted sampling and sequencing. ## Figure S3 - Sequence evolution of WAxNA founders ``` panels = { 'HU': { 'WAxNA_F12_1_HU_1':(0,1), 'WAxNA_F12_1_HU_2':(0,2), 'WAxNA_F12_1_HU_3':(0,3), 'WAxNA_F12_2_HU_1':(1,1), 'WAxNA_F12_2_HU_2':(1,2), 'WAxNA_F12_2_HU_3':(1,3) }, 'RM': { 'WAxNA_F2_1_RM_1':(0,0), 'WAxNA_F12_1_RM_1':(0,1), 'WAxNA_F12_1_RM_2':(0,2), 'WAxNA_F12_1_RM_3':(0,3), 'WAxNA_F12_1_RM_4':(0,4), 'WAxNA_F2_1_RM_2':(1,0), 'WAxNA_F12_2_RM_1':(1,1), 'WAxNA_F12_2_RM_2':(1,2), 'WAxNA_F12_2_RM_3':(1,3), 'WAxNA_F12_2_RM_4':(1,4) } } populations = panels['HU'].keys()+panels['RM'].keys() groups_st = seq_st_df[seq_st_df.population.isin(populations)].groupby(['selection','population']) groups_dn = seq_dn_df[(seq_dn_df.population.isin(populations))&\ (seq_dn_df.clone=='')&\ (seq_dn_df.gene!='non-coding')].groupby(['selection','population']) # Create a figure with subplots fig = plt.figure(figsize=(10, 10)) grid = gridspec.GridSpec(2, 1) gs = {} for (ii, e) in enumerate(['HU','RM']): nrows = 2 ncols = 5 gs[e] = gridspec.GridSpecFromSubplotSpec(nrows, ncols, subplot_spec=grid[ii], hspace=0.3, wspace=0.15) for (jj, p) in enumerate(panels[e]): # Retrieve axes ax1 = plt.subplot(gs[e][panels[e][p]]) ax2 = ax1.twinx() ### Subclone frequency ### gst = groups_st.get_group((e,p)) # Set title ax1.set_title(p.replace('_',' '), fontsize=7, weight='bold') # Bar plot gst = gst.set_index('time') gst = gst[['subclone A','subclone B','subclone C','subclone D','bulk']] gst.plot(ax=ax1, kind='bar', legend=False, stacked=True, width=0.75, position=0.5, color=[config.lineages[c]['fill'] for c in gst.columns]) ### De novo mutations ### if (e,p) in groups_dn.groups.keys(): gdn = groups_dn.get_group((e,p)) for (gene, pos, cds, sub, protein_pos, amino_acids, consequence), gdx \ in gdn.groupby(['gene','pos','cds_position','substitution',\ 'protein_position','amino_acids','consequence_short']): assignment = gdx.assignment.unique()[0] mutation_type = gdx.mutation_type.unique()[0] gdx = gdx.sort_values('time').reset_index(drop=True) gdx = gdx.sort_index() ax2.plot(gdx.index.values, gdx.frequency.values, color=config.lineages[assignment]['line'], **utils.merge_two_dicts(config.mutation_type[mutation_type], config.consequence_short[consequence])) if mutation_type=='driver': index = np.argmax(gdx.frequency) ax2.annotate( gene, xy=(index,gdx.frequency[index]), style='italic', fontsize=6, textcoords='offset points', xytext=(0, 13), ha = 'center', va = 'top', path_effects=[path_effects.withStroke(linewidth=0.5, foreground="w")], zorder=3 ) ax2.annotate( amino_acids.split('/')[0]+protein_pos+amino_acids.split('/')[1], xy=(index,gdx.frequency[index]), fontsize=5, textcoords='offset points', xytext=(0, 7), ha = 'center', va = 'top', path_effects=[path_effects.withStroke(linewidth=0.4, foreground="w")], zorder=3 ) # Set legends if (e,p) in [('HU','WAxNA_F12_1_HU_3'),('RM','WAxNA_F12_1_RM_4')]: leg1 = ax1.legend(bbox_to_anchor=(1.3, -0.125), ncol=1, frameon=False, loc='lower left', borderaxespad=0., handlelength=0.7, title='Lineage', prop={'size':6}) if (e,p) in [('HU','WAxNA_F12_2_HU_3'),('RM','WAxNA_F12_2_RM_4')]: driver_artist = lines.Line2D((0,1),(0,0), color=config.lineages['bulk']['fill'], **config.mutation_type['driver']) passenger_artist = lines.Line2D((0,1),(0,0), color=config.lineages['bulk']['fill'], **config.mutation_type['passenger']) nonsyn_artist = lines.Line2D((0,1),(0,0), mfc=config.lineages['bulk']['fill'], linestyle='', path_effects=[path_effects.withStroke(linewidth=2, foreground="k")], **config.consequence_short['non-synonymous']) syn_artist = lines.Line2D((0,1),(0,0), mfc=config.lineages['bulk']['fill'], linestyle='', path_effects=[path_effects.withStroke(linewidth=2, foreground="k")], **config.consequence_short['synonymous']) leg2 = ax1.legend([driver_artist,passenger_artist,nonsyn_artist,syn_artist], ['driver','passenger','non-synonymous','synonymous'], bbox_to_anchor=(1.3, 1.125), ncol=1, frameon=False, loc='upper left', borderaxespad=0, handlelength=1.75, title='Mutation', prop={'size':6}) for leg in [leg1,leg2]: plt.setp(leg.get_title(),fontsize=6) # Set axes labels if (e,p) in [('HU','WAxNA_F12_2_HU_2'),('RM','WAxNA_F12_2_RM_2')]: ax1.set_xlabel(r'Time, $t$ (days)') else: ax1.set_xlabel('') if (e,p) in [('HU','WAxNA_F12_1_HU_1'),('RM','WAxNA_F2_1_RM_1'), ('HU','WAxNA_F12_2_HU_1'),('RM','WAxNA_F2_1_RM_2')]: ax1.set_ylabel('Cumulative subclone\n frequency, $f_j$ (bars)') else: ax1.set_yticklabels([]) if (e,p) in [('HU','WAxNA_F12_1_HU_3'),('RM','WAxNA_F12_1_RM_4'), ('HU','WAxNA_F12_2_HU_3'),('RM','WAxNA_F12_2_RM_4')]: ax2.set_ylabel('Allele frequency (lines)', rotation=270, va='baseline') else: ax2.set_yticklabels([]) plt.setp(ax1.xaxis.get_majorticklabels(), rotation=0) # rotate the x-axis ticks # Set panel labels if (e,p) in [('HU','WAxNA_F12_1_HU_1'),('RM','WAxNA_F2_1_RM_1')]: ax1.text(-0.25, 1.2, chr(ii + ord('A')), transform=ax1.transAxes, fontsize=9, fontweight='bold', va='center', ha='right') if (e,p) in [('HU','WAxNA_F12_1_HU_2'),('RM','WAxNA_F12_1_RM_2')]: ax1.text(0.5, 1.2, 'Selection: %s' % config.selection['long_label'][e], transform=ax1.transAxes, fontsize=8, va='center', ha='center') for ax in fig.get_axes(): ax.set_ylim(0, 1) # axes limits ax.xaxis.label.set_size(6) ax.yaxis.label.set_size(6) ax.tick_params(axis='both', which='major', size=2, labelsize=6) ax.tick_params(axis='both', which='minor', size=0, labelsize=0) plt.setp(ax.get_xticklabels(), fontsize=6) plt.setp(ax.get_yticklabels(), fontsize=6) for tick in ax.get_xticklabels(): tick.set_visible(True) for loc in ['top','bottom','left','right']: ax.spines[loc].set_linewidth(.75) plot.save_figure(dir_supp+'figures/supp_figure_seq_subclonal_dynamics/supp_figure_seq_subclonal_dynamics_cross') plt.show() ``` **Fig. S3:** Subclonal dynamics in time for WAxNA founders evolved in (**A**) hydroxyurea and (**B**) rapamycin, measured by whole-population sequencing. Time is on the $x$-axis, starting after crossing when the population has no competing subclones. Cumulative haplotype frequency of subclones (bars) and allele frequency of *de novo* mutants (lines) are on the $y$-axis. Driver mutations are solid lines and passenger mutations are dashed lines, colored by subclone assignment; circles and squares denote non-synonymous and synonymous mutations, respectively. No macroscopic subclones or *de novo* mutations were detected in any of the control replicates in YPD. ## Figure S4 - Sequence evolution of WA, NA founders ``` panels = { 'HU': { 'WA_HU_1':(0,0), 'WA_HU_2':(0,1), 'NA_HU_1':(0,2), 'NA_HU_2':(0,3), }, 'RM': { 'WA_RM_1':(0,0), 'WA_RM_2':(0,1), 'NA_RM_1':(0,2), 'NA_RM_2':(0,3), } } populations = panels['HU'].keys()+panels['RM'].keys() groups_dn = seq_dn_df[(seq_dn_df.population.isin(populations)) & \ (seq_dn_df.clone=='') & \ (seq_dn_df.gene!='non-coding')].groupby(['selection','population']) # Get a figure with a lot of subplots fig = plt.figure(figsize=(8, 5)) grid = gridspec.GridSpec(2, 1, hspace=0.5) gs = {} for (ii, e) in enumerate(['HU','RM']): nrows = 1 ncols = 4 gs[e] = gridspec.GridSpecFromSubplotSpec(nrows, ncols, subplot_spec=grid[ii], wspace=0.15) ### De novo mutations ### for (jj, p) in enumerate(panels[e].keys()): # Retrieve axes ax = plt.subplot(gs[e][panels[e][p]]) # Set title ax.set_title(p.replace('_',' '), fontsize=7, weight='bold') # Set axes labels if (e,p) in [('HU','WA_HU_1'),('RM','WA_RM_1')]: ax.set_ylabel('Allele frequency') ax.text(-0.15, 1.2, chr(ii + ord('A')), transform=ax.transAxes, fontsize=9, fontweight='bold', va='center', ha='right') ax.text(0., 1.2, 'Selection: %s' % config.selection['long_label'][e], transform=ax.transAxes, fontsize=8, va='center', ha='left') ax.set_yticklabels([0.0,0.2,0.4,0.6,0.8,1.0]) else: ax.set_yticklabels([]) ax.set_xlabel(r'Time, $t$ (days)') # Set legend if (e,p) in [('HU','NA_HU_2')]: driver_artist = lines.Line2D((0,1),(0,0), color=config.lineages['bulk']['fill'], **config.mutation_type['driver']) passenger_artist = lines.Line2D((0,1),(0,0), color=config.lineages['bulk']['fill'], **config.mutation_type['passenger']) nonsyn_artist = lines.Line2D((0,1),(0,0), mfc=config.lineages['bulk']['fill'], linestyle='', path_effects=[path_effects.withStroke(linewidth=2, foreground="k")], **config.consequence_short['non-synonymous']) syn_artist = lines.Line2D((0,1),(0,0), mfc=config.lineages['bulk']['fill'], linestyle='', path_effects=[path_effects.withStroke(linewidth=2, foreground="k")], **config.consequence_short['synonymous']) leg1 = ax.legend([driver_artist,passenger_artist,nonsyn_artist,syn_artist], ['driver','passenger','non-synonymous','synonymous'], bbox_to_anchor=(1.1, -0.25), ncol=1, frameon=False, loc='center left', borderaxespad=0, handlelength=1.75, title='Mutation', prop={'size':6}) plt.setp(leg1.get_title(),fontsize=6) # Set empty panels if (e,p) in groups_dn.groups.keys(): gdn = groups_dn.get_group((e,p)) else: ax.axvspan(8, 32, facecolor='w', edgecolor='0.5', alpha=0.5, hatch='//') ax.annotate('Extinct', xy=(16,0.5), fontsize=6, ha='center', path_effects=[path_effects.withStroke(linewidth=0.5, foreground="w")]) continue for (gene, cds_pos, sub, protein_pos, amino_acids, consequence), gdx in \ gdn.groupby(['gene','cds_position','substitution','protein_position','amino_acids','consequence_short']): assignment = gdx.assignment.unique()[0] mutation_type = gdx.mutation_type.unique()[0] gdx.time = gdx.time.apply(int) gdx = gdx.sort_values('time').reset_index(drop=True) gdx = gdx.sort_index() gdx = gdx.set_index('time') ax.plot(gdx.index, gdx.frequency, color=config.lineages['bulk']['line'], **utils.merge_two_dicts(config.mutation_type[mutation_type], config.consequence_short[consequence])) if mutation_type=='driver': index = np.argmax(gdx.frequency) ax.annotate( gene, xy=(index,gdx.frequency[index]), style='italic', fontsize=6, textcoords='offset points', xytext=(0, 13), ha = 'center', va = 'top', path_effects=[path_effects.withStroke(linewidth=0.5, foreground="w")], zorder=3 ) ax.annotate( amino_acids.split('/')[0]+protein_pos+amino_acids.split('/')[1], xy=(index,gdx.frequency[index]), fontsize=5, textcoords='offset points', xytext=(0, 7), ha = 'center', va = 'top', path_effects=[path_effects.withStroke(linewidth=0.4, foreground="w")], zorder=3 ) for ax in fig.get_axes(): ax.set_xlim(2, 32) # axes limits ax.set_ylim(0, 1) ax.xaxis.label.set_size(6) ax.yaxis.label.set_size(6) ax.tick_params(axis='both', which='major', size=2, labelsize=6) ax.tick_params(axis='both', which='minor', size=0, labelsize=0) plt.setp(ax.get_xticklabels(), fontsize=6) plt.setp(ax.get_yticklabels(), fontsize=6) ax.set_xscale('log', base=2) ax.set_xticks([2, 4, 8, 16, 32]) ax.xaxis.set_major_formatter(ticker.ScalarFormatter()) ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins=5)) for loc in ['top','bottom','left','right']: ax.spines[loc].set_linewidth(0.75) plot.save_figure(dir_supp+'figures/supp_figure_seq_subclonal_dynamics/supp_figure_seq_subclonal_dynamics_parents') plt.show() ``` **Fig. S4:** Subclonal dynamics in time for WA and NA founders evolved in (**A**) hydroxyurea and (**B**) rapamycin, measured by whole-population sequencing. WA founders evolved in hydroxyurea did not survive after 4 days. Driver mutations are solid lines and passenger mutations are dashed lines, colored by subclone assignment; circles and squares denote non-synonymous and synonymous mutations, respectively. No *de novo* mutations were detected in any of the control replicates in YPD. ## Figure S9 - Phenotype evolution We are inferring the model's components ($F, \lambda_1, \sigma_{\lambda_1}, \lambda_2, \sigma_{\lambda_2}$) using a Gaussian mixture model. ``` param='rel_growth_rate' scatter_panels = { 'WAxNA_F12_1_HU_2':0, 'WAxNA_F12_1_HU_3':1, 'WAxNA_F12_2_HU_3':2, 'WAxNA_F12_1_RM_3':3, 'WAxNA_F12_1_RM_4':4, 'WAxNA_F12_2_RM_2':5, } data = pheno_df[pheno_df.population.isin(scatter_panels.keys())& \ np.isfinite(pheno_df[param])] # Take rows where param is finite data = pd.pivot_table( data, index=['selection','population','group','isolate','gene','genotype_long','assignment'], columns='environment', values=param, aggfunc=np.mean ) corr = pheno_df[pheno_df.population.isin(scatter_panels.keys())& \ np.isfinite(pheno_df[param])] # Take rows where param is finite corr = pd.pivot_table( corr, index=['isolate','gene','genotype_long','assignment'], columns=['selection','population','group','environment'], values=param, aggfunc=np.mean ) corr = corr.groupby(level=['selection','population','group'], axis=1, group_keys=False) corr = corr.apply(lambda x: x.corr(method='spearman')) corr = corr.query('environment==\'SC\'') corr = pd.melt(corr).dropna() corr = corr.pivot_table(columns=['group'], index=['selection','population','environment'], values='value') fig = plt.figure(figsize=(7.5,5.25)) fig.subplots_adjust(left=0.02, right=0.98, bottom=0.02, top=0.98) # Make outer gridspec grid = gridspec.GridSpec(nrows=2, ncols=3, width_ratios=[2, 2, 2], hspace=.5, wspace=.25) gs = {} for ii, ((s, p), gp) in enumerate(data.groupby(level=['selection','population'])): print(s, p) # Use gridspec to assign different formats to panels in one plot gs[(s,p)] = gridspec.GridSpecFromSubplotSpec(nrows=2, ncols=2, hspace=.05, wspace=.05, width_ratios=[4,1], height_ratios=[1,4], subplot_spec=grid[scatter_panels[p]]) ax = plt.subplot(gs[(s,p)][:]) ax_scatter = plt.subplot(gs[(s,p)][1,0]) ax_x = plt.subplot(gs[(s,p)][0,0]) ax_y = plt.subplot(gs[(s,p)][1,1]) # Define plot ranges at beginning, since used often later x = gp['SC'].values y = gp[s].values if s=='HU': x_range = [-0.2, 0.45] y_range = [-0.175, 0.225] x_count_range = [0, 0.4] y_count_range = [0, 0.3] elif s=='RM': x_range = [-0.4, 1.6] y_range = [-0.2, 0.19] x_count_range = [0, 0.4] y_count_range = [0, 0.2] # Set title ax_x.set_title(p.replace('_',' '), fontsize=7, weight='bold') ax_scatter.annotate( 'Ancestral (t = 0d)\n' r'$\rho$ = {:.2f}'.format(corr.ix[s, p, s]['ancestral']), xy=(1.25, 1.15), xycoords='axes fraction', fontsize=6, color=config.population['color']['ancestral'], ha='center', va='bottom' ) ax_scatter.annotate( 'Evolved (t = 32d)\n' r'$\rho$ = {:.2f}'.format(corr.ix[s, p, s]['evolved']), xy=(1.25, 1.025), xycoords='axes fraction', fontsize=6, color=config.population['color']['evolved'], ha='center', va='bottom' ) ax_scatter.axvline(x=0, ls='--', lw=1.5, color='lightgray', zorder=0) ax_scatter.axhline(y=0, ls='--', lw=1.5, color='lightgray', zorder=0) for jj, (t, gt) in enumerate(gp.groupby(level='group')): gt_all = gt.groupby(level=['isolate','gene','genotype_long','assignment']).agg([np.mean]) gt_random = gt.query('assignment==\'\'').groupby(level=['isolate','gene','genotype_long','assignment']).agg([np.mean]) gt_target = gt.query('assignment!=\'\'').groupby(level=['isolate','gene','genotype_long','assignment']).agg([np.mean]) print gt_target x_a = gt_all[s] y_a = gt_all['SC'] x_r = gt_random[s] y_r = gt_random['SC'] color = config.population['color'][t] # Scatter plot plot.scatter_plot(x_r, y_r, ax=ax_scatter, marker='.', color=color, ms=3) ax_scatter.set_xlim(x_range) ax_scatter.set_ylim(y_range) # ax_scatter.annotate(corr.ix[s, p, 'SC'][t], # xy=(0.95, 0.05), xycoords='axes fraction', fontsize=8, # color=color, ha='right', va='bottom') for (isolate, gene, genotype, assignment), data in gt_target.iterrows(): x_t = gt_target[s] y_t = gt_target['SC'] plot.scatter_plot(x_t, y_t, ax=ax_scatter, marker='o', ms=3, mec='k', mfc=color) ax_scatter.annotate( gene, xy = (data[s], data['SC']), xycoords='data', xytext = (0, 8), textcoords = 'offset points', ha = 'center', va = 'top', fontsize = 6, style = 'italic', path_effects=[path_effects.withStroke(linewidth=0.5, foreground="w")] ) # x-axis plot.histogram_x(x_r, ax=ax_x, time=t) ax_x.set_xlim(x_range) ax_x.set_ylim(y_count_range) # Mean of sequenced isolates # lollipops(x_s, ax_x) # y-axis plot.histogram_y(y_r, ax=ax_y, time=t) ax_y.set_xlim(x_count_range) ax_y.set_ylim(y_range) # Set axes labels ax = plt.subplot(gs[('HU','WAxNA_F12_1_HU_3')][1,0]) ax.set_xlabel('%s\nRel. growth rate, $\lambda_k(t)$' % config.environment['long_label']['HU']) ax = plt.subplot(gs[('HU','WAxNA_F12_1_HU_2')][1,0]) ax.set_ylabel('Rel. growth rate, $\lambda_k(t)$\n%s' % config.environment['long_label']['SC']) ax = plt.subplot(gs[('RM','WAxNA_F12_1_RM_4')][1,0]) ax.set_xlabel('%s\nRel. growth rate, $\lambda_k(t)$' % config.environment['long_label']['RM']) ax = plt.subplot(gs[('RM','WAxNA_F12_1_RM_3')][1,0]) ax.set_ylabel('Rel. growth rate, $\lambda_k(t)$\n%s' % config.environment['long_label']['SC']) # Set panel labels ax = plt.subplot(gs[('HU','WAxNA_F12_1_HU_2')][0,0]) ax.text(-.2, 1.75, chr(ord('A')), transform=ax.transAxes, fontsize=9, fontweight='bold', va='center', ha='right') ax = plt.subplot(gs[('HU','WAxNA_F12_1_HU_3')][0,0]) ax.text(0.5, 1.75, 'Selection: %s' % config.selection['long_label']['HU'], transform=ax.transAxes, fontsize=8, va='center', ha='center') ax = plt.subplot(gs[('RM','WAxNA_F12_1_RM_3')][0,0]) ax.text(-.2, 1.75, chr(ord('B')), transform=ax.transAxes, fontsize=9, fontweight='bold', va='center', ha='right') ax = plt.subplot(gs[('RM','WAxNA_F12_1_RM_4')][0,0]) ax.text(0.5, 1.75, 'Selection: %s' % config.selection['long_label']['RM'], transform=ax.transAxes, fontsize=8, va='center', ha='center') # Axes limits for ax in fig.get_axes(): ax.xaxis.label.set_size(6) ax.yaxis.label.set_size(6) ax.tick_params(axis='both', which='major', size=2, labelsize=6) ax.tick_params(axis='both', which='minor', size=0, labelsize=6) for loc in ['top','bottom','left','right']: ax.spines[loc].set_linewidth(0.75) plot.save_figure(dir_supp+'figures/supp_figure_pheno_evolution/supp_figure_pheno_evolution') plt.show() ``` **Fig. S9:** Variability in intra-population growth rate and fitness correlations. Fitness correlations of ancestral and evolved populations across environments, estimated by random sampling of individuals at initial (0 days, green) and final time points (32 days, purple), before and after selection in (**A**) hydroxyurea and (**B**) rapamycin. The relative growth rate $\lambda_k(t)$ per individual $k$ is shown, calculated by averaging over ${n_r\,{=}\,32}$ technical replicates per individual. Relative growth rates are normalized with respect to the mean population growth rate $\langle\lambda_k\rangle_{t=0}$ at $t=0$ days (see Figures 3B and 3D). The relative growth rates $\lambda_k(t)$ in the stress environment ($x$-axis) are compared to the control environment ($y$-axis). Using a Gaussian mixture model, we found the posterior probability of the mixture modes of the the best-fit mixture (solid lines). The posterior means of the distribution modes are indicated as dashed lines. The fitter individuals carry driver mutations, as determined by targeted sampling and sequencing. Spearman's rank correlation, $\rho$, is shown on the top right of each panel, to assess the association between the growth rate of isolates in the stress and control environments at 0 and 32 days.
github_jupyter
# The Constellation Wizard requires a STK Scenario to be open Simply run the cell below and the constelation wizard will appear ``` from tkinter import Tk from tkinter.ttk import * from tkinter import W from tkinter import E from tkinter import scrolledtext from tkinter import INSERT from tkinter import END from tkinter import IntVar from tkinter import messagebox from DeckAccessReaderGUI import * import numpy as np import pandas as pd import os from os import listdir from os.path import isfile, join from shutil import copyfile from comtypes.client import CreateObject from comtypes.client import GetActiveObject from comtypes.gen import STKObjects # Define window layout window = Tk() window.title('Constellation Wizard') window.geometry('587x510') cwd = os.getcwd() cwdFiles = cwd+'\\Files' window.iconbitmap(cwdFiles+'\\Misc\\'+'ConstellationWizardIcon.ico') # # Configure Style Style().theme_use('vista') # # fonts for all widgets # window.option_add("*Font", "calabri 9") ######################################### Col0 ######################################################## width = 35 padx = 3 pady = 1 column=0 row = 1 # Connect to STK try: root = ConnectToSTK(version=12) startTime = root.CurrentScenario.QueryInterface(STKObjects.IAgScenario).StartTime stopTime = root.CurrentScenario.QueryInterface(STKObjects.IAgScenario).StartTime+3600 except: res = messagebox.askyesno('Constellation Wizard','Failed to connect to a scenario.\nIs a scenario in STK open?') if res == True: try: root = ConnectToSTK(version=12) startTime = root.CurrentScenario.QueryInterface(STKObjects.IAgScenario).StartTime stopTime = root.CurrentScenario.QueryInterface(STKObjects.IAgScenario).StartTime+3600 except: window.quit() window.destroy() else: window.quit() window.destroy() def createConMsgBox(): res = messagebox.askyesno('Constellation Wizard',txt.get().replace(' ','-')+'.tce will be created and overwrite any existing file.\nThis may take a while if there are many satellites in the scenario.\nContinue?') if res == True: CreateConstellation(root,txt,txtBox,ssc=00000) btnCreateCon = Button(window,width=width,text='Create Constellation From STK',command=lambda: createConMsgBox()) btnCreateCon.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2) row += 1 # Load MTO btnLoadMTO = Button(window,width=width,text='Load Constellation as MTO',command=lambda: LoadMTO(root,txtBox,MTOName = comboCon.get(),timestep=60,color=comboColor.get().lower(),orbitsOnOrOff=onOffStr(),orbitFrame=frameValue())) btnLoadMTO.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2) row += 1 # Orbit options lblFrame = Label(window,text = 'Show Orbits:') lblFrame.grid(column=column,row=row,padx=padx,pady=pady,sticky=E) # Checkbox def onOffStr(): onOff = showOrbits.get() if onOff == 0: onOff = 'off' elif onOff == 1: onOff = 'on' return onOff showOrbits = IntVar() showOrbits.set(0) checkButton = Checkbutton(window, variable=showOrbits,offvalue=0,onvalue=1) checkButton.grid(column=column+1,row=row,padx=padx,pady=pady,sticky=W) row += 1 row += 1 # Run Deck Access btnDeckAccess = Button(window,width=width,text='Run Deck Access',command=lambda: runDeckAccess(root,txtStart.get(),txtStop.get(),comboCon,comboDA,txtBox,constraintSatName = comboSat.get())) btnDeckAccess.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2) row += 1 # Save Deck Access def saveDA(): newName = txt.get().replace(' ','-') res = messagebox.askyesno('Constellation Wizard',newName+'.tce will be created and overwrite any existing file.\nContinue?') if res == True: copyfile(cwdFiles+'\\Constellations\\deckAccessTLE.tce', cwdFiles+'\\Constellations\\'+newName+'.tce') txtBox.insert(END,'Created: '+txt.get().replace(' ','-')+'.tce\n') btnSave = Button(window,text='Save Deck Access',command=saveDA) btnSave.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 1,sticky=W+E) row += 2 # # Load Subset btnLoadSubset = Button(window,width=width,text='Load Satellites Using Template',command= lambda: LoadSatsFromFileUsingTemplate(root,txtStart.get(),txtStop.get(),comboCon,selected,txtBox,comboSat.get(),color=comboColor.get().lower())) btnLoadSubset.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2) row += 2 # Do Analysis def AddToChain(): addObj = comboChainCov.get() chainName = comboChain.get() try: chain = root.GetObjectFromPath('*/Chain/'+chainName) chain2 = chain.QueryInterface(STKObjects.IAgChain) chain2.Objects.Add(addObj) txtBox.insert(END,'Added: '+addObj.split('/')[-1]+'\n') except: txtBox.insert(END,'Failed to Add: '+addObj.split('/')[-1]+'\n') btnAddChain = Button(window,width=width,text='Add To Chain',command=AddToChain) btnAddChain.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2) row += 1 # Do Analysis def computeChain(): chainName = comboChain.get() if root.CurrentScenario.Children.Contains(STKObjects.eChain,chainName): chain = root.GetObjectFromPath('*/Chain/'+chainName) chain2 = chain.QueryInterface(STKObjects.IAgChain) chain2.ClearAccess() chain2.ComputeAccess() txtBox.insert(END,'Computed: '+chainName+'\n') else: txtBox.insert(END,'Failed to Compute: '+chainName+'\n') btnComputeChain = Button(window,text='Compute Chain',command=computeChain) btnComputeChain.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 1,sticky=W+E) def removeAssets(): chainName = comboChain.get() if root.CurrentScenario.Children.Contains(STKObjects.eChain,chainName): chain = root.GetObjectFromPath('*/Chain/'+chainName) chain2 = chain.QueryInterface(STKObjects.IAgChain) chain2.Objects.RemoveAll() txtBox.insert(END,'Removed Objects: '+chainName+'\n') else: txtBox.insert(END,'Failed to Removed Objects: '+chainName+'\n') btnRemoveChain = Button(window,text='Remove Objects',command=removeAssets) btnRemoveChain.grid(column=column+1,row=row,padx=padx,pady=pady,columnspan = 1,sticky=W+E) row += 1 # Do Analysis def AddToCoverage(): addObj = comboChainCov.get() covName = comboCov.get() if root.CurrentScenario.Children.Contains(STKObjects.eCoverageDefinition,covName): cov = root.GetObjectFromPath('*/CoverageDefinition/'+covName) cov2 = cov.QueryInterface(STKObjects.IAgCoverageDefinition) if cov2.AssetList.CanAssignAsset(addObj): cov2.AssetList.Add(addObj) txtBox.insert(END,'Added: '+addObj.split('/')[-1]+'\n') else: txtBox.insert(END,'Already Assigned: '+addObj.split('/')[-1]+'\n') else: txtBox.insert(END,'Failed to Add: '+addObj.split('/')[-1]+'\n') btnAddCoverage = Button(window,width=width,text='Add To Coverage',command=AddToCoverage) btnAddCoverage.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2) row += 1 # Do Analysis def computeCov(): covName = comboCov.get() if root.CurrentScenario.Children.Contains(STKObjects.eCoverageDefinition,covName): cov = root.GetObjectFromPath('*/CoverageDefinition/'+covName) cov2 = cov.QueryInterface(STKObjects.IAgCoverageDefinition) cov2.ClearAccesses() cov2.ComputeAccesses() txtBox.insert(END,'Computed: '+covName+'\n') else: txtBox.insert(END,'Failed to Compute: '+covName+'\n') btnComputeCoverage = Button(window,text='Compute Coverage',command=computeCov) btnComputeCoverage.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 1,sticky=W+E) def removeAssestsCov(): covName = comboCov.get() if root.CurrentScenario.Children.Contains(STKObjects.eCoverageDefinition,covName): cov = root.GetObjectFromPath('*/CoverageDefinition/'+covName) cov2 = cov.QueryInterface(STKObjects.IAgCoverageDefinition) cov2.AssetList.RemoveAll() txtBox.insert(END,'Removed Assets: '+covName+'\n') else: txtBox.insert(END,'Failed to Removed Assets: '+covName+'\n') btnRemoveCov = Button(window,text='Remove Assets',command=removeAssestsCov) btnRemoveCov.grid(column=column+1,row=row,padx=padx,pady=pady,columnspan = 1,sticky=W+E) row += 1 row += 3 txtBox = scrolledtext.ScrolledText(window,width=35,height=10) txtBox.insert(INSERT,'Connected: '+root.CurrentScenario.InstanceName+'\n') txtBox.grid(column=column,row=row,padx=padx+0,pady=pady,rowspan=4,columnspan = 3,sticky=W+E) rowTxt = row ######################################### Col2 ######################################################## # Labels width2 = 30 column = 2 row = 1 lblCreateCon = Label(window,text = 'Create/Save Constellation:') lblCreateCon.grid(column=column,row=row,padx=padx,pady=pady,sticky=E) row += 1 lblCon = Label(window,text = 'Constellation:') lblCon.grid(column=column,row=row,padx=padx,pady=pady,sticky=E) row += 1 # MTO Options row += 1 lblColor = Label(window,text = 'MTO/Satellite Color:') lblColor.grid(column=column,row=row,padx=padx,pady=pady,sticky=E) row +=1 lblDA = Label(window,text = 'Access From:') lblDA.grid(column=column,row=row,padx=padx,pady=pady,sticky=E) row += 1 lblStart = Label(window,text = 'Start Time:') lblStart.grid(column=column,row=row,padx=padx,pady=pady,sticky=E) row += 1 lblStop = Label(window,text = 'Stop Time:') lblStop.grid(column=column,row=row,padx=padx,pady=pady,sticky=E) row += 1 lblSatTemp = Label(window,text = 'Satellite Template:') lblSatTemp.grid(column=column,row=row,padx=padx,pady=pady,sticky=E) row += 2 lblSatTemp = Label(window,text = 'Chain/Coverage Object:') lblSatTemp.grid(column=column,row=row,padx=padx,pady=pady,sticky=E) row += 1 lblSatTemp = Label(window,text = 'Chain:') lblSatTemp.grid(column=column,row=row,padx=padx,pady=pady,sticky=E) row += 1 lblSatTemp = Label(window,text = 'Coverage:') lblSatTemp.grid(column=column,row=row,padx=padx,pady=pady,sticky=E) row += 2 ######################################### Col3 ######################################################## column = 3 row=1 # Entry box for Create Constellation txt = Entry(window,width=width2+3) txt.delete(0, END) txt.insert(0, 'NewConstellationName') txt.grid(column=column, row=row,padx=padx,pady=pady,columnspan=2,sticky=W) row += 1 # Constellation Options def updateTCEList(): tceList = [f.split('.')[0] for f in listdir(cwdFiles+'\\Constellations') if (isfile(join(cwdFiles+'\\Constellations', f))) & (f.split('.')[-1]=='tce' )& (f !='deckAccessTLE.tce')] comboCon['values'] = tceList comboCon = Combobox(window,width=width2,state='readonly',postcommand = updateTCEList) updateTCEList() comboCon.current(0) # set the selected item comboCon.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W) row += 1 # Radio Buttons def frameValue(): frame = selectedFrame.get() if frame == 0: frame = 'Inertial' elif frame == 1: frame = 'Fixed' return frame selectedFrame = IntVar() selectedFrame.set(0) radFrame1 = Radiobutton(window,text='Inertial', value=0, variable=selectedFrame) radFrame2 = Radiobutton(window,text='Fixed', value=1, variable=selectedFrame) radFrame1.grid(column=column-1,row=row,padx=padx,pady=pady) radFrame2.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W) row += 1 # Colors colorsList = ['Green','Cyan','Blue','Magenta','Red','Yellow','White','Black'] comboColor = Combobox(window,width=width2,state='readonly') comboColor['values'] = colorsList comboColor.current(0) # set the selected item comboColor.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W) row +=1 # Deck Access Available Objects def updateAccessList(root): objs = deckAccessAvailableObjs(root) for ii in range(len(objs)): objType = objs[ii].split('/')[-2] if objType == 'Sensor': objs[ii] = '/'.join(objs[ii].split('/')[-4:]) else: objs[ii] = '/'.join(objs[ii].split('/')[-2:]) comboDA['values'] = objs comboDA = Combobox(window,width=width2,state='readonly',postcommand = lambda: updateAccessList(root)) updateAccessList(root) try: comboDA.current(0) # set the selected item except: pass comboDA.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W) row += 1 # Entry box Times startTimeUTCG = root.ConversionUtility.ConvertDate('EpSec','UTCG',str(startTime)) txtStart = Entry(window,width=width2+3) txtStart.delete(0, END) txtStart.insert(0, startTimeUTCG) txtStart.grid(column=column,row=row,padx=padx,pady=pady,columnspan=2,sticky=W) startTime = root.ConversionUtility.ConvertDate('UTCG','EpSec',str(txtStart.get())) row += 1 stopTimeUTCG = root.ConversionUtility.ConvertDate('EpSec','UTCG',str(stopTime)) txtStop = Entry(window,width=width2+3) txtStop.delete(0, END) txtStop.insert(0, stopTimeUTCG) txtStop.grid(column=column,row=row,padx=padx,pady=pady,columnspan=2,sticky=W) stopTime = root.ConversionUtility.ConvertDate('UTCG','EpSec',str(txtStop.get())) row += 1 # Satellite Template def updateSatList(root): sats = FilterObjectsByType(root,'Satellite',name = '') for ii in range(len(sats)): sats[ii] = sats[ii].split('/')[-1] sats.insert(0,'') comboSat['values'] = sats comboSat = Combobox(window,width=width2,state='readonly',postcommand = lambda: updateSatList(root)) updateSatList(root) try: comboSat.current(0) # set the selected item except: pass comboSat.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W) row += 1 # Radio Buttons selected = IntVar() selected.set(1) rad1 = Radiobutton(window,text='Deck Access Only', value=1, variable=selected) rad2 = Radiobutton(window,text='Entire Constellation', value=2, variable=selected) rad1.grid(column=column-1,row=row,padx=padx,pady=pady) rad2.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W) row += 1 # Deck Access Available Objects def updateChainCovList(root): objs = chainCovAvailableObjs(root) for ii in range(len(objs)): objSplit = objs[ii].split('/') if objSplit[-4] =='Scenario': objs[ii] = '/'.join(objSplit[-2:]) elif objSplit[-4]=='Sensor': objs[ii] = '/'.join(objSplit[-6:]) else: objs[ii] = '/'.join(objSplit[-4:]) comboChainCov['values'] = objs comboChainCov = Combobox(window,width=width2,state='readonly',postcommand = lambda: updateChainCovList(root)) updateChainCovList(root) try: comboChainCov.current(0) # set the selected item except: pass comboChainCov.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W) row += 1 # Chain Template def updateChainList(root): chains = FilterObjectsByType(root,'Chain',name = '') for ii in range(len(chains)): chains[ii] = chains[ii].split('/')[-1] # chains.insert(0,'') comboChain['values'] = chains comboChain = Combobox(window,width=width2,state='readonly',postcommand = lambda: updateChainList(root)) updateChainList(root) try: comboChain.current(0) # set the selected item except: pass comboChain.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W) row += 1 # Chain Coverage def updateCovList(root): covs = FilterObjectsByType(root,'CoverageDefinition',name = '') for ii in range(len(covs)): covs[ii] = covs[ii].split('/')[-1] # covs.insert(0,'') comboCov['values'] = covs comboCov = Combobox(window,width=width2,state='readonly',postcommand = updateCovList) updateCovList(root) try: comboCov.current(0) # set the selected item except: pass comboCov.grid(column=column,row=row,padx=padx,pady=pady,columnspan = 2,sticky=W) row += 2 # row += 4 # Unload Satellites btnUnload = Button(window,width=15,text='Unload Satellites',command=lambda: UnloadObjs(root,'Satellite',pattern=txtUnload.get())) btnUnload.grid(column=3,row=rowTxt+0,padx=padx,pady=pady,sticky=W+E) txtUnload = Entry(window,width=15) txtUnload.delete(0, END) txtUnload.insert(0, 'tle-*') txtUnload.grid(column=4,row=rowTxt+0,padx=padx,pady=pady,columnspan = 1,sticky=W) btnUnloadMTO = Button(window,width=15,text='Unload MTOs',command=lambda: UnloadObjs(root,'MTO',pattern=txtUnloadMTO.get())) btnUnloadMTO.grid(column=3,row=rowTxt+1,padx=padx,pady=pady,sticky=W) txtUnloadMTO = Entry(window,width=15) txtUnloadMTO.delete(0, END) txtUnloadMTO.insert(0, '*') txtUnloadMTO.grid(column=4,row=rowTxt+1,padx=padx,pady=pady,columnspan = 1,sticky=W) btnUnloadCon = Button(window,width=15,text='Unload Con.',command=lambda: UnloadObjs(root,'Constellation',pattern=txtUnloadCon.get())) btnUnloadCon.grid(column=3,row=rowTxt+2,padx=padx,pady=pady,sticky=W) txtUnloadCon = Entry(window,width=15) txtUnloadCon.delete(0, END) txtUnloadCon.insert(0, '*') txtUnloadCon.grid(column=4,row=rowTxt+2,padx=padx,pady=pady,columnspan = 1,sticky=W) def clear(): txtBox.delete(1.0,END) btnClear = Button(window,width=15,text='Clear TextBox',command=clear) btnClear.grid(column=3,row=rowTxt+3,padx=padx,pady=pady,sticky=W) # Keep window open window.mainloop() ```
github_jupyter
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt print("pandas", pd.__version__) print("numpy",np.__version__) ``` # Cookbook This is a repository for *short and sweet* examples and links for useful pandas recipes. We encourage users to add to this documentation. Adding interesting links and/or inline examples to this section is a great *First Pull Request*. Simplified, condensed, new-user friendly, in-line examples have been inserted where possible to augment the Stack-Overflow and GitHub links. Many of the links contain expanded information, above what the in-line examples offer. pandas (pd) and NumPy (np) are the only two abbreviated imported modules. The rest are kept explicitly imported for newer users. ## Idioms <a id='cookbook-idioms'></a> These are some neat pandas `idioms` [if-then/if-then-else on one column, and assignment to another one or more columns:](https://stackoverflow.com/questions/17128302/python-pandas-idiom-for-if-then-else) ``` df = pd.DataFrame( {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]} ) df ``` ### if-then… An if-then on one column ``` df.loc[df.AAA >= 5, "BBB"] = -1 df ``` An if-then with assignment to 2 columns: ``` df.loc[df.AAA >= 5, ["BBB", "CCC"]] = 555 df ``` Add another line with different logic, to do the -else ``` df.loc[df.AAA < 5, ["BBB", "CCC"]] = 2000 df ``` Or use pandas where after you’ve set up a mask ``` df_mask = pd.DataFrame( {"AAA": [True] * 4, "BBB": [False] * 4, "CCC": [True, False] * 2} ) df.where(df_mask, -1000) ``` [if-then-else using NumPy’s where()](https://stackoverflow.com/questions/19913659/pandas-conditional-creation-of-a-series-dataframe-column) ``` df = pd.DataFrame( {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]} ) df df["logic"] = np.where(df["AAA"] > 5, "high", "low") df ``` ### Splitting [Split a frame with a boolean criterion](https://stackoverflow.com/questions/14957116/how-to-split-a-dataframe-according-to-a-boolean-criterion) ``` df = pd.DataFrame( {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]} ) df df[df.AAA <= 5] df[df.AAA > 5] ``` ### Building criteria [Select with multi-column criteria](https://stackoverflow.com/questions/15315452/selecting-with-complex-criteria-from-pandas-dataframe) ``` df = pd.DataFrame( {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]} ) df ``` …and (without assignment returns a Series) ``` df.loc[(df["BBB"] < 25) & (df["CCC"] >= -40), "AAA"] ``` …or (without assignment returns a Series) ``` df.loc[(df["BBB"] > 25) | (df["CCC"] >= -40), "AAA"] ``` …or (with assignment modifies the DataFrame.) ``` df.loc[(df["BBB"] > 25) | (df["CCC"] >= 75), "AAA"] = 0.1 df ``` [Select rows with data closest to certain value using argsort](https://stackoverflow.com/questions/17758023/return-rows-in-a-dataframe-closest-to-a-user-defined-number) ``` df = pd.DataFrame( {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]} ) df aValue = 43.0 df.loc[(df.CCC - aValue).abs().argsort()] ``` [Dynamically reduce a list of criteria using a binary operators](https://stackoverflow.com/questions/21058254/pandas-boolean-operation-in-a-python-list/21058331) ``` df = pd.DataFrame( {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]} ) df Crit1 = df.AAA <= 5.5 Crit2 = df.BBB == 10.0 Crit3 = df.CCC > -40.0 ``` One could hard code: ``` AllCrit = Crit1 & Crit2 & Crit3 ``` …Or it can be done with a list of dynamically built criteria ``` import functools CritList = [Crit1, Crit2, Crit3] AllCrit = functools.reduce(lambda x, y: x & y, CritList) df[AllCrit] ``` <a id='cookbook-selection'></a> ## Selection ### Dataframes The indexing docs. [Using both row labels and value conditionals](https://stackoverflow.com/questions/14725068/pandas-using-row-labels-in-boolean-indexing) ``` df = pd.DataFrame( {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]} ) df df[(df.AAA <= 6) & (df.index.isin([0, 2, 4]))] ``` [Use loc for label-oriented slicing and iloc positional slicing](https://github.com/pandas-dev/pandas/issues/2904) ``` df = pd.DataFrame( {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]}, index=["foo", "bar", "boo", "kar"], ) ``` There are 2 explicit slicing methods, with a third general case 1. Positional-oriented (Python slicing style : exclusive of end) 1. Label-oriented (Non-Python slicing style : inclusive of end) 1. General (Either slicing style : depends on if the slice contains labels or positions) <dl style='margin: 20px 0;'> <dt>::</dt> <dd> df.iloc[0:3] # Positional df.loc[“bar”:”kar”] # Label # Generic df[0:3] df[“bar”:”kar”] </dd> </dl> Ambiguity arises when an index consists of integers with a non-zero start or non-unit increment. ``` data = {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]} df2 = pd.DataFrame(data=data, index=[1, 2, 3, 4]) # Note index starts at 1. df2.iloc[1:3] # Position-oriented df2.loc[1:3] # Label-oriented ``` [Using inverse operator (~) to take the complement of a mask](https://stackoverflow.com/questions/14986510/picking-out-elements-based-on-complement-of-indices-in-python-pandas) ``` df = pd.DataFrame( {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]} ) df df[~((df.AAA <= 6) & (df.index.isin([0, 2, 4])))] ``` ### New columns [Efficiently and dynamically creating new columns using applymap](https://stackoverflow.com/questions/16575868/efficiently-creating-additional-columns-in-a-pandas-dataframe-using-map) ``` df = pd.DataFrame({"AAA": [1, 2, 1, 3], "BBB": [1, 1, 2, 2], "CCC": [2, 1, 3, 1]}) df source_cols = df.columns # Or some subset would work too new_cols = [str(x) + "_cat" for x in source_cols] categories = {1: "Alpha", 2: "Beta", 3: "Charlie"} df[new_cols] = df[source_cols].applymap(categories.get) df ``` [Keep other columns when using min() with groupby](https://stackoverflow.com/questions/23394476/keep-other-columns-when-using-min-with-groupby) ``` df = pd.DataFrame( {"AAA": [1, 1, 1, 2, 2, 2, 3, 3], "BBB": [2, 1, 3, 4, 5, 1, 2, 3]} ) df ``` Method 1 : idxmin() to get the index of the minimums ``` df.loc[df.groupby("AAA")["BBB"].idxmin()] ``` Method 2 : sort then take first of each ``` df.sort_values(by="BBB").groupby("AAA", as_index=False).first() ``` Notice the same results, with the exception of the index. <a id='cookbook-multi-index'></a> ## Multiindexing The multindexing docs. [Creating a MultiIndex from a labeled frame](https://stackoverflow.com/questions/14916358/reshaping-dataframes-in-pandas-based-on-column-labels) ``` df = pd.DataFrame( { "row": [0, 1, 2], "One_X": [1.1, 1.1, 1.1], "One_Y": [1.2, 1.2, 1.2], "Two_X": [1.11, 1.11, 1.11], "Two_Y": [1.22, 1.22, 1.22], } ) df # As Labelled Index df = df.set_index("row") df # With Hierarchical Columns df.columns = pd.MultiIndex.from_tuples([tuple(c.split("_")) for c in df.columns]) df # Now stack & Reset df = df.stack(0).reset_index(1) df # And fix the labels (Notice the label 'level_1' got added automatically) df.columns = ["Sample", "All_X", "All_Y"] df ``` ### Arithmetic [Performing arithmetic with a MultiIndex that needs broadcasting](https://stackoverflow.com/questions/19501510/divide-entire-pandas-multiindex-dataframe-by-dataframe-variable/19502176#19502176) ``` cols = pd.MultiIndex.from_tuples( [(x, y) for x in ["A", "B", "C"] for y in ["O", "I"]] ) df = pd.DataFrame(np.random.randn(2, 6), index=["n", "m"], columns=cols) df df = df.div(df["C"], level=1) df ``` ### Slicing [Slicing a MultiIndex with xs](https://stackoverflow.com/questions/12590131/how-to-slice-multindex-columns-in-pandas-dataframes) ``` coords = [("AA", "one"), ("AA", "six"), ("BB", "one"), ("BB", "two"), ("BB", "six")] index = pd.MultiIndex.from_tuples(coords) df = pd.DataFrame([11, 22, 33, 44, 55], index, ["MyData"]) df ``` To take the cross section of the 1st level and 1st axis the index: ``` # Note : level and axis are optional, and default to zero df.xs("BB", level=0, axis=0) ``` …and now the 2nd level of the 1st axis. ``` df.xs("six", level=1, axis=0) ``` [Slicing a MultiIndex with xs, method #2](https://stackoverflow.com/questions/14964493/multiindex-based-indexing-in-pandas) ``` import itertools index = list(itertools.product(["Ada", "Quinn", "Violet"], ["Comp", "Math", "Sci"])) headr = list(itertools.product(["Exams", "Labs"], ["I", "II"])) indx = pd.MultiIndex.from_tuples(index, names=["Student", "Course"]) cols = pd.MultiIndex.from_tuples(headr) # Notice these are un-named data = [[70 + x + y + (x * y) % 3 for x in range(4)] for y in range(9)] df = pd.DataFrame(data, indx, cols) df All = slice(None) df.loc["Violet"] df.loc[(All, "Math"), All] df.loc[(slice("Ada", "Quinn"), "Math"), All] df.loc[(All, "Math"), ("Exams")] df.loc[(All, "Math"), (All, "II")] ``` [Setting portions of a MultiIndex with xs](https://stackoverflow.com/questions/19319432/pandas-selecting-a-lower-level-in-a-dataframe-to-do-a-ffill) ### Sorting [Sort by specific column or an ordered list of columns, with a MultiIndex](https://stackoverflow.com/questions/14733871/mutli-index-sorting-in-pandas) ``` df.sort_values(by=("Labs", "II"), ascending=False) ``` [Partial selection, the need for sortedness;](https://github.com/pandas-dev/pandas/issues/2995) ### Levels [Prepending a level to a multiindex](https://stackoverflow.com/questions/14744068/prepend-a-level-to-a-pandas-multiindex) [Flatten Hierarchical columns](https://stackoverflow.com/questions/14507794/python-pandas-how-to-flatten-a-hierarchical-index-in-columns) <a id='cookbook-missing-data'></a> ## Missing data The missing data docs. Fill forward a reversed timeseries ``` df = pd.DataFrame( np.random.randn(6, 1), index=pd.date_range("2013-08-01", periods=6, freq="B"), columns=list("A"), ) df.loc[df.index[3], "A"] = np.nan df df.reindex(df.index[::-1]).ffill() ``` [cumsum reset at NaN values](https://stackoverflow.com/questions/18196811/cumsum-reset-at-nan) ### Replace [Using replace with backrefs](https://stackoverflow.com/questions/16818871/extracting-value-and-creating-new-column-out-of-it) <a id='cookbook-grouping'></a> ## Grouping The grouping docs. [Basic grouping with apply](https://stackoverflow.com/questions/15322632/python-pandas-df-groupy-agg-column-reference-in-agg) Unlike agg, apply’s callable is passed a sub-DataFrame which gives you access to all the columns ``` df = pd.DataFrame( { "animal": "cat dog cat fish dog cat cat".split(), "size": list("SSMMMLL"), "weight": [8, 10, 11, 1, 20, 12, 12], "adult": [False] * 5 + [True] * 2, } ) df # List the size of the animals with the highest weight. df.groupby("animal").apply(lambda subf: subf["size"][subf["weight"].idxmax()]) ``` [Using get_group](https://stackoverflow.com/questions/14734533/how-to-access-pandas-groupby-dataframe-by-key) ``` gb = df.groupby(["animal"]) gb.get_group("cat") ``` [Apply to different items in a group](https://stackoverflow.com/questions/15262134/apply-different-functions-to-different-items-in-group-object-python-pandas) ``` def GrowUp(x): avg_weight = sum(x[x["size"] == "S"].weight * 1.5) avg_weight += sum(x[x["size"] == "M"].weight * 1.25) avg_weight += sum(x[x["size"] == "L"].weight) avg_weight /= len(x) return pd.Series(["L", avg_weight, True], index=["size", "weight", "adult"]) expected_df = gb.apply(GrowUp) expected_df ``` [Expanding apply](https://stackoverflow.com/questions/14542145/reductions-down-a-column-in-pandas) ``` S = pd.Series([i / 100.0 for i in range(1, 11)]) def cum_ret(x, y): return x * (1 + y) def red(x): return functools.reduce(cum_ret, x, 1.0) S.expanding().apply(red, raw=True) ``` [Replacing some values with mean of the rest of a group](https://stackoverflow.com/questions/14760757/replacing-values-with-groupby-means) ``` df = pd.DataFrame({"A": [1, 1, 2, 2], "B": [1, -1, 1, 2]}) gb = df.groupby("A") def replace(g): mask = g < 0 return g.where(mask, g[~mask].mean()) gb.transform(replace) ``` [Sort groups by aggregated data](https://stackoverflow.com/questions/14941366/pandas-sort-by-group-aggregate-and-column) ``` df = pd.DataFrame( { "code": ["foo", "bar", "baz"] * 2, "data": [0.16, -0.21, 0.33, 0.45, -0.59, 0.62], "flag": [False, True] * 3, } ) code_groups = df.groupby("code") agg_n_sort_order = code_groups[["data"]].transform(sum).sort_values(by="data") sorted_df = df.loc[agg_n_sort_order.index] sorted_df ``` [Create multiple aggregated columns](https://stackoverflow.com/questions/14897100/create-multiple-columns-in-pandas-aggregation-function) ``` rng = pd.date_range(start="2014-10-07", periods=10, freq="2min") ts = pd.Series(data=list(range(10)), index=rng) def MyCust(x): if len(x) > 2: return x[1] * 1.234 return pd.NaT mhc = {"Mean": np.mean, "Max": np.max, "Custom": MyCust} ts.resample("5min").apply(mhc) ts ``` [Create a value counts column and reassign back to the DataFrame](https://stackoverflow.com/questions/17709270/i-want-to-create-a-column-of-value-counts-in-my-pandas-dataframe) ``` df = pd.DataFrame( {"Color": "Red Red Red Blue".split(), "Value": [100, 150, 50, 50]} ) df df["Counts"] = df.groupby(["Color"]).transform(len) df ``` [Shift groups of the values in a column based on the index](https://stackoverflow.com/q/23198053/190597) ``` df = pd.DataFrame( {"line_race": [10, 10, 8, 10, 10, 8], "beyer": [99, 102, 103, 103, 88, 100]}, index=[ "Last Gunfighter", "Last Gunfighter", "Last Gunfighter", "Paynter", "Paynter", "Paynter", ], ) df df["beyer_shifted"] = df.groupby(level=0)["beyer"].shift(1) df ``` [Select row with maximum value from each group](https://stackoverflow.com/q/26701849/190597) ``` df = pd.DataFrame( { "host": ["other", "other", "that", "this", "this"], "service": ["mail", "web", "mail", "mail", "web"], "no": [1, 2, 1, 2, 1], } ).set_index(["host", "service"]) mask = df.groupby(level=0).agg("idxmax") df_count = df.loc[mask["no"]].reset_index() df_count ``` [Grouping like Python’s itertools.groupby](https://stackoverflow.com/q/29142487/846892) ``` df = pd.DataFrame([0, 1, 0, 1, 1, 1, 0, 1, 1], columns=["A"]) df["A"].groupby((df["A"] != df["A"].shift()).cumsum()).groups df["A"].groupby((df["A"] != df["A"].shift()).cumsum()).cumsum() ``` ### Expanding data [Alignment and to-date](https://stackoverflow.com/questions/15489011/python-time-series-alignment-and-to-date-functions) [Rolling Computation window based on values instead of counts](https://stackoverflow.com/questions/14300768/pandas-rolling-computation-with-window-based-on-values-instead-of-counts) [Rolling Mean by Time Interval](https://stackoverflow.com/questions/15771472/pandas-rolling-mean-by-time-interval) ### Splitting [Splitting a frame](https://stackoverflow.com/questions/13353233/best-way-to-split-a-dataframe-given-an-edge/15449992#15449992) Create a list of dataframes, split using a delineation based on logic included in rows. ``` df = pd.DataFrame( data={ "Case": ["A", "A", "A", "B", "A", "A", "B", "A", "A"], "Data": np.random.randn(9), } ) dfs = list( zip( *df.groupby( (1 * (df["Case"] == "B")) .cumsum() .rolling(window=3, min_periods=1) .median() ) ) )[-1] dfs[0] dfs[1] dfs[2] ``` <a id='cookbook-pivot'></a> ### Pivot The Pivot docs. [Partial sums and subtotals](https://stackoverflow.com/questions/15570099/pandas-pivot-tables-row-subtotals/15574875#15574875) ``` df = pd.DataFrame( data={ "Province": ["ON", "QC", "BC", "AL", "AL", "MN", "ON"], "City": [ "Toronto", "Montreal", "Vancouver", "Calgary", "Edmonton", "Winnipeg", "Windsor", ], "Sales": [13, 6, 16, 8, 4, 3, 1], } ) table = pd.pivot_table( df, values=["Sales"], index=["Province"], columns=["City"], aggfunc=np.sum, margins=True, ) table.stack("City") ``` [Frequency table like plyr in R](https://stackoverflow.com/questions/15589354/frequency-tables-in-pandas-like-plyr-in-r) ``` grades = [48, 99, 75, 80, 42, 80, 72, 68, 36, 78] df = pd.DataFrame( { "ID": ["x%d" % r for r in range(10)], "Gender": ["F", "M", "F", "M", "F", "M", "F", "M", "M", "M"], "ExamYear": [ "2007", "2007","2007","2008","2008","2008","2008","2009","2009","2009", ], "Class": [ "algebra","stats","bio","algebra","algebra","stats","stats","algebra","bio","bio", ], "Participated": [ "yes", "yes", "yes", "yes","no", "yes", "yes","yes","yes","yes", ], "Passed": ["yes" if x > 50 else "no" for x in grades], "Employed": [ True,True,True,False,False,False,False,True,True,False, ], "Grade": grades, } ) df.groupby("ExamYear").agg( { "Participated": lambda x: x.value_counts()["yes"], "Passed": lambda x: sum(x == "yes"), "Employed": lambda x: sum(x), "Grade": lambda x: sum(x) / len(x), } ) ``` [Plot pandas DataFrame with year over year data](https://stackoverflow.com/questions/30379789/plot-pandas-data-frame-with-year-over-year-data) To create year and month cross tabulation: ``` df = pd.DataFrame( {"value": np.random.randn(36)}, index=pd.date_range("2011-01-01", freq="M", periods=36), ) pd.pivot_table( df, index=df.index.month, columns=df.index.year, values="value", aggfunc="sum" ) ``` ### Apply [Rolling apply to organize - Turning embedded lists into a MultiIndex frame](https://stackoverflow.com/questions/17349981/converting-pandas-dataframe-with-categorical-values-into-binary-values) ``` df = pd.DataFrame( data={ "A": [[2, 4, 8, 16], [100, 200], [10, 20, 30]], "B": [["a", "b", "c"], ["jj", "kk"], ["ccc"]], }, index=["I", "II", "III"], ) def SeriesFromSubList(aList): return pd.Series(aList) df_orgz = pd.concat( {ind: row.apply(SeriesFromSubList) for ind, row in df.iterrows()} ) df_orgz ``` [Rolling apply with a DataFrame returning a Series](https://stackoverflow.com/questions/19121854/using-rolling-apply-on-a-dataframe-object) Rolling Apply to multiple columns where function calculates a Series before a Scalar from the Series is returned ``` df = pd.DataFrame( data=np.random.randn(2000, 2) / 10000, index=pd.date_range("2001-01-01", periods=2000), columns=["A", "B"], ) df def gm(df, const): v = ((((df["A"] + df["B"]) + 1).cumprod()) - 1) * const return v.iloc[-1] s = pd.Series( { df.index[i]: gm(df.iloc[i: min(i + 51, len(df) - 1)], 5) for i in range(len(df) - 50) } ) s ``` [Rolling apply with a DataFrame returning a Scalar](https://stackoverflow.com/questions/21040766/python-pandas-rolling-apply-two-column-input-into-function/21045831#21045831) Rolling Apply to multiple columns where function returns a Scalar (Volume Weighted Average Price) ``` rng = pd.date_range(start="2014-01-01", periods=100) df = pd.DataFrame( { "Open": np.random.randn(len(rng)), "Close": np.random.randn(len(rng)), "Volume": np.random.randint(100, 2000, len(rng)), }, index=rng, ) df def vwap(bars): return (bars.Close * bars.Volume).sum() / bars.Volume.sum() window = 5 s = pd.concat( [ (pd.Series(vwap(df.iloc[i: i + window]), index=[df.index[i + window]])) for i in range(len(df) - window) ] ) s.round(2) ``` ## Timeseries [Between times](https://stackoverflow.com/questions/14539992/pandas-drop-rows-outside-of-time-range) [Using indexer between time](https://stackoverflow.com/questions/17559885/pandas-dataframe-mask-based-on-index) [Constructing a datetime range that excludes weekends and includes only certain times](https://stackoverflow.com/questions/24010830/pandas-generate-sequential-timestamp-with-jump/24014440#24014440?) [Vectorized Lookup](https://stackoverflow.com/questions/13893227/vectorized-look-up-of-values-in-pandas-dataframe) [Aggregation and plotting time series](https://nipunbatra.github.io/blog/visualisation/2013/05/01/aggregation-timeseries.html) Turn a matrix with hours in columns and days in rows into a continuous row sequence in the form of a time series. [How to rearrange a Python pandas DataFrame?](https://stackoverflow.com/questions/15432659/how-to-rearrange-a-python-pandas-dataframe) [Dealing with duplicates when reindexing a timeseries to a specified frequency](https://stackoverflow.com/questions/22244383/pandas-df-refill-adding-two-columns-of-different-shape) Calculate the first day of the month for each entry in a DatetimeIndex ``` dates = pd.date_range("2000-01-01", periods=5) dates.to_period(freq="M").to_timestamp() ``` <a id='cookbook-resample'></a> ### Resampling The [Resample](38_timeseries.ipynb#timeseries-resampling) docs. [Using Grouper instead of TimeGrouper for time grouping of values](https://stackoverflow.com/questions/15297053/how-can-i-divide-single-values-of-a-dataframe-by-monthly-averages) [Time grouping with some missing values](https://stackoverflow.com/questions/33637312/pandas-grouper-by-frequency-with-completeness-requirement) Valid frequency arguments to Grouper [Timeseries](38_timeseries.ipynb#timeseries-offset-aliases) [Grouping using a MultiIndex](https://stackoverflow.com/questions/41483763/pandas-timegrouper-on-multiindex) [Using TimeGrouper and another grouping to create subgroups, then apply a custom function](https://github.com/pandas-dev/pandas/issues/3791) [Resampling with custom periods](https://stackoverflow.com/questions/15408156/resampling-with-custom-periods) [Resample intraday frame without adding new days](https://stackoverflow.com/questions/14898574/resample-intrday-pandas-dataframe-without-add-new-days) [Resample minute data](https://stackoverflow.com/questions/14861023/resampling-minute-data) [Resample with groupby](https://stackoverflow.com/q/18677271/564538) <a id='cookbook-merge'></a> ## Merge The [Concat](25_merging_join_concat.ipynb#merging-concatenation) docs. The [Join](25_merging_join_concat.ipynb#merging-join) docs. [Append two dataframes with overlapping index (emulate R rbind)](https://stackoverflow.com/questions/14988480/pandas-version-of-rbind) ``` rng = pd.date_range("2000-01-01", periods=6) df1 = pd.DataFrame(np.random.randn(6, 3), index=rng, columns=["A", "B", "C"]) df2 = df1.copy() ``` Depending on df construction, `ignore_index` may be needed ``` df = df1.append(df2, ignore_index=True) df ``` [Self Join of a DataFrame](https://github.com/pandas-dev/pandas/issues/2996) ``` df = pd.DataFrame( data={ "Area": ["A"] * 5 + ["C"] * 2, "Bins": [110] * 2 + [160] * 3 + [40] * 2, "Test_0": [0, 1, 0, 1, 2, 0, 1], "Data": np.random.randn(7), } ) df df["Test_1"] = df["Test_0"] - 1 pd.merge( df, df, left_on=["Bins", "Area", "Test_0"], right_on=["Bins", "Area", "Test_1"], suffixes=("_L", "_R"), ) ``` [How to set the index and join](https://stackoverflow.com/questions/14341805/pandas-merge-pd-merge-how-to-set-the-index-and-join) [KDB like asof join](https://stackoverflow.com/questions/12322289/kdb-like-asof-join-for-timeseries-data-in-pandas/12336039#12336039) [Join with a criteria based on the values](https://stackoverflow.com/questions/15581829/how-to-perform-an-inner-or-outer-join-of-dataframes-with-pandas-on-non-simplisti) [Using searchsorted to merge based on values inside a range](https://stackoverflow.com/questions/25125626/pandas-merge-with-logic/2512764) <a id='cookbook-plotting'></a> ## Plotting The Plotting docs. [Make Matplotlib look like R](https://stackoverflow.com/questions/14349055/making-matplotlib-graphs-look-like-r-by-default) [Setting x-axis major and minor labels](https://stackoverflow.com/questions/12945971/pandas-timeseries-plot-setting-x-axis-major-and-minor-ticks-and-labels) [Plotting multiple charts in an IPython Jupyter notebook](https://stackoverflow.com/questions/16392921/make-more-than-one-chart-in-same-ipython-notebook-cell) [Creating a multi-line plot](https://stackoverflow.com/questions/16568964/make-a-multiline-plot-from-csv-file-in-matplotlib) [Plotting a heatmap](https://stackoverflow.com/questions/17050202/plot-timeseries-of-histograms-in-python) [Annotate a time-series plot](https://stackoverflow.com/questions/11067368/annotate-time-series-plot-in-matplotlib) [Annotate a time-series plot #2](https://stackoverflow.com/questions/17891493/annotating-points-from-a-pandas-dataframe-in-matplotlib-plot) [Generate Embedded plots in excel files using Pandas, Vincent and xlsxwriter](https://pandas-xlsxwriter-charts.readthedocs.io/) [Boxplot for each quartile of a stratifying variable](https://stackoverflow.com/questions/23232989/boxplot-stratified-by-column-in-python-pandas) ``` df = pd.DataFrame( { "stratifying_var": np.random.uniform(0, 100, 20), "price": np.random.normal(100, 5, 20), } ) df["quartiles"] = pd.qcut( df["stratifying_var"], 4, labels=["0-25%", "25-50%", "50-75%", "75-100%"] ) df.boxplot(column="price", by="quartiles") ``` ## Data in/out [Performance comparison of SQL vs HDF5](https://stackoverflow.com/questions/16628329/hdf5-and-sqlite-concurrency-compression-i-o-performance) <a id='cookbook-csv'></a> ### CSV The CSV docs [read_csv in action](https://wesmckinney.com/blog/update-on-upcoming-pandas-v0-10-new-file-parser-other-performance-wins/) [appending to a csv](https://stackoverflow.com/questions/17134942/pandas-dataframe-output-end-of-csv) [Reading a csv chunk-by-chunk](https://stackoverflow.com/questions/11622652/large-persistent-dataframe-in-pandas/12193309#12193309) [Reading only certain rows of a csv chunk-by-chunk](https://stackoverflow.com/questions/19674212/pandas-data-frame-select-rows-and-clear-memory) [Reading the first few lines of a frame](https://stackoverflow.com/questions/15008970/way-to-read-first-few-lines-for-pandas-dataframe) Reading a file that is compressed but not by `gzip/bz2` (the native compressed formats which `read_csv` understands). This example shows a `WinZipped` file, but is a general application of opening the file within a context manager and using that handle to read. [See here](https://stackoverflow.com/questions/17789907/pandas-convert-winzipped-csv-file-to-data-frame) [Inferring dtypes from a file](https://stackoverflow.com/questions/15555005/get-inferred-dataframe-types-iteratively-using-chunksize) [Dealing with bad lines](https://github.com/pandas-dev/pandas/issues/2886) [Dealing with bad lines II](http://nipunbatra.github.io/2013/06/reading-unclean-data-csv-using-pandas/) [Reading CSV with Unix timestamps and converting to local timezone](http://nipunbatra.github.io/2013/06/pandas-reading-csv-with-unix-timestamps-and-converting-to-local-timezone/) [Write a multi-row index CSV without writing duplicates](https://stackoverflow.com/questions/17349574/pandas-write-multiindex-rows-with-to-csv) <a id='cookbook-csv-multiple-files'></a> #### Reading multiple files to create a single DataFrame The best way to combine multiple files into a single DataFrame is to read the individual frames one by one, put all of the individual frames into a list, and then combine the frames in the list using `pd.concat()`: ``` for i in range(3): data = pd.DataFrame(np.random.randn(10, 4)) data.to_csv("file_{}.csv".format(i)) files = ["file_0.csv", "file_1.csv", "file_2.csv"] result = pd.concat([pd.read_csv(f) for f in files], ignore_index=True) ``` You can use the same approach to read all files matching a pattern. Here is an example using `glob`: ``` import glob import os files = glob.glob("file_*.csv") result = pd.concat([pd.read_csv(f) for f in files], ignore_index=True) ``` Finally, this strategy will work with the other `pd.read_*(...)` functions described in the io docs. <dl style='margin: 20px 0;'> <dt>::</dt> <dd> <dl style='margin: 20px 0;'> <dt>suppress</dt> <dd> </dd> </dl> <dl style='margin: 20px 0;'> <dt>for i in range(3):</dt> <dd> os.remove(“file_{}.csv”.format(i)) </dd> </dl> </dd> </dl> #### Parsing date components in multi-columns Parsing date components in multi-columns is faster with a format ``` i = pd.date_range("20000101", periods=10000) df = pd.DataFrame({"year": i.year, "month": i.month, "day": i.day}) df.head() %timeit pd.to_datetime(df.year * 10000 + df.month * 100 + df.day, format='%Y%m%d') ds = df.apply(lambda x: "%04d%02d%02d" % (x["year"], x["month"], x["day"]), axis=1) ds.head() %timeit pd.to_datetime(ds) ``` #### Skip row between header and data ``` data = """;;;; ;;;; ;;;; ;;;; ;;;; ;;;; ;;;; ;;;; ;;;; ;;;; date;Param1;Param2;Param4;Param5 ;m²;°C;m²;m ;;;; 01.01.1990 00:00;1;1;2;3 01.01.1990 01:00;5;3;4;5 01.01.1990 02:00;9;5;6;7 01.01.1990 03:00;13;7;8;9 01.01.1990 04:00;17;9;10;11 01.01.1990 05:00;21;11;12;13 """ ``` ##### Option 1: pass rows explicitly to skip rows ``` from io import StringIO pd.read_csv( StringIO(data), sep=";", skiprows=[11, 12], index_col=0, parse_dates=True, header=10, ) ``` ##### Option 2: read column names and then data ``` pd.read_csv(StringIO(data), sep=";", header=10, nrows=10).columns columns = pd.read_csv(StringIO(data), sep=";", header=10, nrows=10).columns pd.read_csv( StringIO(data), sep=";", index_col=0, header=12, parse_dates=True, names=columns ) ``` <a id='cookbook-sql'></a> ### SQL The SQL docs [Reading from databases with SQL](https://stackoverflow.com/questions/10065051/python-pandas-and-databases-like-mysql) <a id='cookbook-excel'></a> ### Excel The Excel docs [Reading from a filelike handle](https://stackoverflow.com/questions/15588713/sheets-of-excel-workbook-from-a-url-into-a-pandas-dataframe) [Modifying formatting in XlsxWriter output](https://pbpython.com/improve-pandas-excel-output.html) <a id='cookbook-html'></a> ### HTML [Reading HTML tables from a server that cannot handle the default request header](https://stackoverflow.com/a/18939272/564538) <a id='cookbook-hdf'></a> ### HDFStore The HDFStores docs [Simple queries with a Timestamp Index](https://stackoverflow.com/questions/13926089/selecting-columns-from-pandas-hdfstore-table) [Managing heterogeneous data using a linked multiple table hierarchy](https://github.com/pandas-dev/pandas/issues/3032) [Merging on-disk tables with millions of rows](https://stackoverflow.com/questions/14614512/merging-two-tables-with-millions-of-rows-in-python/14617925#14617925) [Avoiding inconsistencies when writing to a store from multiple processes/threads](https://stackoverflow.com/a/29014295/2858145) De-duplicating a large store by chunks, essentially a recursive reduction operation. Shows a function for taking in data from csv file and creating a store by chunks, with date parsing as well. [See here](https://stackoverflow.com/questions/16110252/need-to-compare-very-large-files-around-1-5gb-in-python/16110391#16110391) [Creating a store chunk-by-chunk from a csv file](https://stackoverflow.com/questions/20428355/appending-column-to-frame-of-hdf-file-in-pandas/20428786#20428786) [Appending to a store, while creating a unique index](https://stackoverflow.com/questions/16997048/how-does-one-append-large-amounts-of-data-to-a-pandas-hdfstore-and-get-a-natural/16999397#16999397) [Large Data work flows](https://stackoverflow.com/questions/14262433/large-data-work-flows-using-pandas) [Reading in a sequence of files, then providing a global unique index to a store while appending](https://stackoverflow.com/questions/16997048/how-does-one-append-large-amounts-of-data-to-a-pandas-hdfstore-and-get-a-natural) [Groupby on a HDFStore with low group density](https://stackoverflow.com/questions/15798209/pandas-group-by-query-on-large-data-in-hdfstore) [Groupby on a HDFStore with high group density](https://stackoverflow.com/questions/25459982/trouble-with-grouby-on-millions-of-keys-on-a-chunked-file-in-python-pandas/25471765#25471765) [Hierarchical queries on a HDFStore](https://stackoverflow.com/questions/22777284/improve-query-performance-from-a-large-hdfstore-table-with-pandas/22820780#22820780) [Counting with a HDFStore](https://stackoverflow.com/questions/20497897/converting-dict-of-dicts-into-pandas-dataframe-memory-issues) [Troubleshoot HDFStore exceptions](https://stackoverflow.com/questions/15488809/how-to-trouble-shoot-hdfstore-exception-cannot-find-the-correct-atom-type) [Setting min_itemsize with strings](https://stackoverflow.com/questions/15988871/hdfstore-appendstring-dataframe-fails-when-string-column-contents-are-longer) [Using ptrepack to create a completely-sorted-index on a store](https://stackoverflow.com/questions/17893370/ptrepack-sortby-needs-full-index) Storing Attributes to a group node ``` df = pd.DataFrame(np.random.randn(8, 3)) store = pd.HDFStore("test.h5") store.put("df", df) # you can store an arbitrary Python object via pickle store.get_storer("df").attrs.my_attribute = {"A": 10} store.get_storer("df").attrs.my_attribute store.close() os.remove("test.h5") ``` You can create or load a HDFStore in-memory by passing the `driver` parameter to PyTables. Changes are only written to disk when the HDFStore is closed. ``` store = pd.HDFStore("test.h5", "w", diver="H5FD_CORE") df = pd.DataFrame(np.random.randn(8, 3)) store["test"] = df # only after closing the store, data is written to disk: store.close() os.remove("test.h5") ``` ### Binary files pandas readily accepts NumPy record arrays, if you need to read in a binary file consisting of an array of C structs. For example, given this C program in a file called `main.c` compiled with `gcc main.c -std=gnu99` on a 64-bit machine, ```c #include <stdio.h> #include <stdint.h> typedef struct _Data { int32_t count; double avg; float scale; } Data; int main(int argc, const char *argv[]) { size_t n = 10; Data d[n]; for (int i = 0; i < n; ++i) { d[i].count = i; d[i].avg = i + 1.0; d[i].scale = (float) i + 2.0f; } FILE *file = fopen("binary.dat", "wb"); fwrite(&d, sizeof(Data), n, file); fclose(file); return 0; } ``` the following Python code will read the binary file `'binary.dat'` into a pandas `DataFrame`, where each element of the struct corresponds to a column in the frame: ```python names = "count", "avg", "scale" # note that the offsets are larger than the size of the type because of # struct padding offsets = 0, 8, 16 formats = "i4", "f8", "f4" dt = np.dtype({"names": names, "offsets": offsets, "formats": formats}, align=True) df = pd.DataFrame(np.fromfile("binary.dat", dt)) ``` >**Note** > >The offsets of the structure elements may be different depending on the architecture of the machine on which the file was created. Using a raw binary file format like this for general data storage is not recommended, as it is not cross platform. We recommended either HDF5 or parquet, both of which are supported by pandas’ IO facilities. ## Computation [Numerical integration (sample-based) of a time series](https://nbviewer.ipython.org/5720498) ### Correlation Often it’s useful to obtain the lower (or upper) triangular form of a correlation matrix calculated from `DataFrame.corr()`. This can be achieved by passing a boolean mask to `where` as follows: ``` df = pd.DataFrame(np.random.random(size=(100, 5))) corr_mat = df.corr() mask = np.tril(np.ones_like(corr_mat, dtype=np.bool_), k=-1) corr_mat.where(mask) ``` The `method` argument within `DataFrame.corr` can accept a callable in addition to the named correlation types. Here we compute the [distance correlation](https://en.wikipedia.org/wiki/Distance_correlation) matrix for a `DataFrame` object. ``` def distcorr(x, y): n = len(x) a = np.zeros(shape=(n, n)) b = np.zeros(shape=(n, n)) for i in range(n): for j in range(i + 1, n): a[i, j] = abs(x[i] - x[j]) b[i, j] = abs(y[i] - y[j]) a += a.T b += b.T a_bar = np.vstack([np.nanmean(a, axis=0)] * n) b_bar = np.vstack([np.nanmean(b, axis=0)] * n) A = a - a_bar - a_bar.T + np.full(shape=(n, n), fill_value=a_bar.mean()) B = b - b_bar - b_bar.T + np.full(shape=(n, n), fill_value=b_bar.mean()) cov_ab = np.sqrt(np.nansum(A * B)) / n std_a = np.sqrt(np.sqrt(np.nansum(A ** 2)) / n) std_b = np.sqrt(np.sqrt(np.nansum(B ** 2)) / n) return cov_ab / std_a / std_b df = pd.DataFrame(np.random.normal(size=(100, 3))) df.corr(method=distcorr) ``` ## Timedeltas The [Timedeltas](39_timedeltas.ipynb#timedeltas-timedeltas) docs. [Using timedeltas](https://github.com/pandas-dev/pandas/pull/2899) ``` import datetime s = pd.Series(pd.date_range("2012-1-1", periods=3, freq="D")) s - s.max() s.max() - s s - datetime.datetime(2011, 1, 1, 3, 5) s + datetime.timedelta(minutes=5) datetime.datetime(2011, 1, 1, 3, 5) - s datetime.timedelta(minutes=5) + s ``` [Adding and subtracting deltas and dates](https://stackoverflow.com/questions/16385785/add-days-to-dates-in-dataframe) ``` deltas = pd.Series([datetime.timedelta(days=i) for i in range(3)]) df = pd.DataFrame({"A": s, "B": deltas}) df df["New Dates"] = df["A"] + df["B"] df["Delta"] = df["A"] - df["New Dates"] df df.dtypes ``` [Another example](https://stackoverflow.com/questions/15683588/iterating-through-a-pandas-dataframe) Values can be set to NaT using np.nan, similar to datetime ``` y = s - s.shift() y y[1] = np.nan y ``` ## Creating example data To create a dataframe from every combination of some given values, like R’s `expand.grid()` function, we can create a dict where the keys are column names and the values are lists of the data values: ``` def expand_grid(data_dict): rows = itertools.product(*data_dict.values()) return pd.DataFrame.from_records(rows, columns=data_dict.keys()) df = expand_grid( {"height": [60, 70], "weight": [100, 140, 180], "sex": ["Male", "Female"]} ) df ```
github_jupyter
## Using low dimensional embeddings to discover subtypes of breast cancer This notebook is largely based on https://towardsdatascience.com/reduce-dimensions-for-single-cell-4224778a2d67 (credit to Nikolay Oskolkov). https://www.nature.com/articles/s41467-018-07582-3#data-availability ``` import pandas as pd import numpy as np import GEOparse from matplotlib import pyplot as plt GEO_ID = "GSE111229" # from the article ``` #### Exercise 1. load the dataset into `rna_seq` using GEOparse. ``` # %load solutions/ex4_1.py rna_seq = GEOparse.get_GEO(geo=GEO_ID, destdir="./") dir(rna_seq) rna_seq.download_SRA?? rna_seq.geotype rna_seq.phenotype_data.shape rna_seq.phenotype_data.shape rna_seq.to_soft('test', False) cafs = pd.read_csv('data/CAFs.txt', sep='\t') sorted(cafs.cluster.unique()) expr = cafs ``` ### The expression matrix 716 cells has been sequenced, and the expression levels has been assessed for 558 genes. Arranging the cells as rows and genes as columns we obtain an *expression matrix*. ``` expr.shape expr ``` Before going further, try to reflect for a moment how you would try to illuminate any pattern in this data, given what you already know. #### Plot the expression matrix ``` plt.figure(figsize=(8,8)) plt.imshow(expr.values, cmap='Greens', vmax=4000, vmin=0) plt.title('Expression matrix') plt.ylabel('Cells') plt.xlabel('Genes') plt.colorbar() plt.show() ``` #### Exercise 2. The data is very sparse (most entries are zero), can you quantify how sparse it is? (i.e. how many of the entries are 0) ``` # %load solutions/ex4_2.py np.count_nonzero(expr.values) / np.prod(expr.shape) # only 20% of the entries are non-zero. print("\n" + "Dimensions of input file: " + str(expr.shape) + "\n") print("\n" + "Last column corresponds to cluster assignments: " + "\n") print(expr.iloc[0:4, (expr.shape[1]-4):expr.shape[1]]) X = expr.values[:,0:(expr.shape[1]-1)] Y = expr.values[:,expr.shape[1]-1] #cluster X = np.log(X + 1) cafs.dtypes.unique() ``` ### Decomposing the signals Now that we have gained some basic understanding of the data, we see it is fit for machine learning. You have already seen a few techniques to reduce data dimensionality reduction. We start with PCA ``` from sklearn.decomposition import PCA #from matplotlib import cm #dir(cm) # available colors ``` #### Exercise 3. Perform PCA on the expression data and visualize the results (with colors to represent the ground truth clusters) ``` # %load solutions/ex4_3.py model = PCA() pca = model.fit_transform(X) plt.scatter(pca[:, 0], pca[:, 1], c = Y, cmap = 'rainbow', s = 1) plt.xlabel("PC1", fontsize = 20); plt.ylabel("PC2", fontsize = 20) plt.plot(model.explained_variance_ratio_[:10]) plt.xticks(range(10));plt.show() ``` PCA is completely unsupervised. Linear discriminant analysis (LDA) is often used for the same purpose as PCA (dimensionality reduction), but is strictly speaking not unsupervised. ``` from sklearn.discriminant_analysis import LinearDiscriminantAnalysis model = LinearDiscriminantAnalysis(n_components = 2, priors = None, shrinkage = 'auto', solver = 'eigen', store_covariance = False, tol = 0.0001) lda = model.fit_transform(X, Y) plt.scatter(lda[:, 0], lda[:, 1], c = Y, cmap = 'viridis', s = 1) plt.xlabel("LDA1", fontsize = 20); plt.ylabel("LDA2", fontsize = 20) feature_importances = pd.DataFrame({'Gene':np.array(expr.columns)[:-1], 'Score':abs(model.coef_[0])}) print(feature_importances.sort_values('Score', ascending = False).head(20)) ``` The way to interpret the data above: we clearly see the data lay in three clusters, suggesting we have found 3 different separable expression-signatures. However, we also see one cluster is occupied by 2 clusters (the colors are imposed by the fact that we know the "ground truth", but unsupervised methods are generally used for data exploration in which we do not know of these things. # Non-linear methods # t-SNE t-SNE is a very popular decomposition technique used in molecular biology, especially for visualization purposes. t-SNE does generally not cope well with high dimensionality, so it is common to first transform the data with PCA and then run this through t-SNE. Here we will do both with and without prereducing the dimensionality. ``` from sklearn.manifold import TSNE model = TSNE(learning_rate = 10, n_components = 2, random_state = 123, perplexity = 30) tsne = model.fit_transform(X) plt.scatter(tsne[:, 0], tsne[:, 1], c = Y, cmap = 'rainbow', s = 2, marker='x') plt.title('tSNE', fontsize = 20) plt.xlabel("tSNE1", fontsize = 20) plt.ylabel("tSNE2", fontsize = 20) ``` #### Exercise 4. Reduce the data first with PCA to 30 principal components, then rerun the tSNE on this transformed data. ``` # %load solutions/ex4_4.py X_reduced = PCA(n_components = 30).fit_transform(X) model = TSNE(learning_rate = 10, n_components = 2, random_state = 123, perplexity = 30) tsne = model.fit_transform(X_reduced) plt.scatter(tsne[:, 0], tsne[:, 1], c = Y, cmap = 'rainbow', s = 2, marker='x') plt.title('tSNE on PCA', fontsize = 20) plt.xlabel("tSNE1", fontsize = 20) plt.ylabel("tSNE2", fontsize = 20) ``` While it can be hard to discern the performance boost of prereduction, we can certainly see that t-SNE performs better than a linear method like PCA. However, non-linearity is no guarantee of success itself. For instance Isomap does not do well with this data. ``` from sklearn.manifold import Isomap model = Isomap() isomap = model.fit_transform(X) plt.scatter(isomap[:, 0], isomap[:, 1], c = Y, cmap = 'viridis', s = 1) plt.title('ISOMAP') #plt.colorbar() plt.xlabel("ISO1") plt.ylabel("ISO2") ``` We should not throw Isomap out the window yet, like most algorithm, there is no one-size-fits-all. Isomap is well suited for tasks without clear clusters, but continuous change is present. # UMAP A more recent alternative to t-SNE is [UMAP](https://arxiv.org/abs/1802.03426), which also produces high quality visualizations with good separation, and scales better than t-sne with large datasets. ``` from umap import UMAP print("Performing Uniform Manifold Approximation and Projection (UMAP) ...") #model = UMAP(n_neighbors = 30, min_dist = 0.3, n_components = 2) model = UMAP() umap = model.fit_transform(X) # or X_reduced plt.scatter(umap[:, 0], umap[:, 1], c = Y, cmap = 'viridis', s = 1) plt.title('UMAP') #plt.colorbar() plt.xlabel("UMAP1") plt.ylabel("UMAP2") ``` #### Conclusion In summary, when doing data exploration of gene expression (and other biomedical data), non-linear methods are preferred to linear ones.
github_jupyter
``` import hoomd import hoomd.hpmc import ex_render import math from matplotlib import pyplot import numpy %matplotlib inline ``` # Selecting move sizes HPMC allows you to set the translation and rotation move sizes. Set the move size too small and almost all trial moves are accepted, but it takes many time steps to move the whole system an appreciable amount. Set the move size too large and individual moves will advance the system significantly, but most of the trial moves are rejected. To find the true optimal move size, you need to define the slowest evolving order parameter in the system. Then perform simulations at many move sizes and find the one with where that order parameter has the fastest decorrelation time. ## Acceptance rule of thumb In a wide range of systems, the optimal move size is one where the move acceptance ratio is 20%. This rule applies in moderately dense to dense system configurations. HPMC can auto-tune the move size to meet a given acceptance ratio. To demonstrate, here is the hard square tutorial script: ``` hoomd.context.initialize('--mode=cpu'); system = hoomd.init.create_lattice(unitcell=hoomd.lattice.sq(a=1.2), n=10); mc = hoomd.hpmc.integrate.convex_polygon(d=0.1, a=0.1, seed=1); square_verts = [[-0.5, -0.5], [0.5, -0.5], [0.5, 0.5], [-0.5, 0.5]]; mc.shape_param.set('A', vertices=square_verts); log1 = hoomd.analyze.log(filename="log-output.log", quantities=['hpmc_sweep', 'hpmc_translate_acceptance', 'hpmc_rotate_acceptance', 'hpmc_d', 'hpmc_a', 'hpmc_move_ratio', 'hpmc_overlap_count'], period=10, overwrite=True); ``` Activate the tuner and tell it to tune both the **d** and **a** moves. You can restrict it to only tune one of the move types and provide a range of move sizes the tuner is allowed to choose from. This example sets a maximum translation move size of half the particle width, and a maximum rotation move size that rotates the square all the way to the next symmetric configuration. ``` tuner = hoomd.hpmc.util.tune(obj=mc, tunables=['d', 'a'], max_val=[0.5, 2*math.pi/4], target=0.2); ``` Update the tuner between short runs. It will examine the acceptance ratio and adjust the move sizes to meet the target acceptance ratio. ``` for i in range(20): hoomd.run(100, quiet=True); tuner.update(); ``` In this example, the acceptance ratios converges after only 10 steps of the tuner. ``` data = numpy.genfromtxt(fname='log-output.log', skip_header=True); pyplot.figure(figsize=(4,2.2), dpi=140); pyplot.plot(data[:,0], data[:,2], label='translate acceptance'); pyplot.plot(data[:,0], data[:,4], label='d'); pyplot.xlabel('time step'); pyplot.ylabel('acceptance / move size'); pyplot.legend(); pyplot.figure(figsize=(4,2.2), dpi=140); pyplot.plot(data[:,0], data[:,3], label='rotate acceptance'); pyplot.plot(data[:,0], data[:,5], label='a'); pyplot.xlabel('time step'); pyplot.ylabel('acceptance / move size'); pyplot.legend(loc='right'); ``` ## Sampling equilibrium states Strictly speaking, changing the move size with an tuner **VIOLATES DETAILED BALANCE**. When you make ensemble averages, do not include the period of the simulation where you executed the tuner. This example shows how to make the equilibrium run as a second stage of the script. ``` d = hoomd.dump.gsd("trajectory-square.gsd", period=1000, group=hoomd.group.all(), overwrite=True); hoomd.run(10000); ``` Examine how the system configuration evolves over time. [ex_render](ex_render.py) is a helper script that builds animated gifs from trajectory files and system snapshots. It is part of the [hoomd-examples](https://github.com/glotzerlab/hoomd-examples) repository and designed only to render these examples. ``` ex_render.display_movie(lambda x: ex_render.render_polygon_frame(x, square_verts), 'trajectory-square.gsd') ```
github_jupyter
# Modeling: Aspect-Based Sentiment Analysis ## BerTweet Oversampling as a solution to the imabalance still wasn't enough to raise the model's performance significantly. This was especially the case because the validation and test sets were very small and still imbalanced (plus, we can't resample these!). Thus, my next step is to gather more data by sampling tweets I had not previously annotated, annotating this new sample and ONLY retaining tweets that have aspects – as we have enough tweets without aspects already! ## 1. Library Importation ``` #Data manipulation and visualization packages import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt #Modeling packages import torch from tqdm.auto import tqdm import pytorch_lightning as pl from torchmetrics import Accuracy, F1, AUROC from torch.utils.data import Dataset, DataLoader from pytorch_lightning.loggers import TensorBoardLogger from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping from transformers import AdamW, get_linear_schedule_with_warmup from transformers import AutoTokenizer, AutoModel #Model evaluation and metrics from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, multilabel_confusion_matrix #Set seed RANDOM_SEED = 123 pl.seed_everything(RANDOM_SEED) ``` ## 2. Loading the data ``` #Load the dataframe with punctuated tweets. We will replace the tweets in the dataset below with this df_with_punct = pd.read_csv("../data/processed/sample_encoded_and_cleaned.csv") print(df_with_punct.shape) df_with_punct.head() #Load the corresponding aspect annotated data with the text cleaned with punctuation removal df = pd.read_csv("../data/processed/absa_labelled.csv") print(df.shape) df.head() #Create new dataset with the punctuated tweets and the aspects annotated aspect_df = pd.concat([df_with_punct[['Text']], df[['Aspects','Sentiment']]],axis=1) aspect_df.head() #Save the new dataframe for future reference aspect_df.to_csv("../data/processed/absa_text_with_punct.csv", index=False) ``` ## 3. Reformat the data ### a. Convert lists from string format to list ``` #Convert Aspects column aspect_df.Aspects = aspect_df.Aspects.apply(lambda x: eval(x) if (pd.notnull(x)) else x) #Convert Sentiment column aspect_df.Sentiment = aspect_df.Sentiment.apply(lambda x: eval(x) if (pd.notnull(x)) else x) #Make a copy of the dataframe processed_aspect_df = aspect_df.copy() #Quick preview processed_aspect_df.head() ``` ### b. Fill NaNs with [None] ``` processed_aspect_df = processed_aspect_df.apply(lambda s: s.fillna({i: [None] for i in df.index})) processed_aspect_df.head() ``` ### b. Binarizing the aspects ``` #List aspects determined during the annotation phase #Note: This might not be exhaustive! But it should cover most cases. It is also subjective! #Also using synonyms of these words will likely yield different results ASPECTS = ['cost','speed','reliability','coverage', 'customer service', 'trustworthiness'] #Iterate through all the aspects and if the aspect is not in the tweet, record 0 else record 1 for aspect in ASPECTS[::-1]: processed_aspect_df.insert(1,aspect,processed_aspect_df.Aspects.apply(lambda x: 1 if aspect in x else 0)) #Drop the Aspects column processed_aspect_df.drop(columns=['Aspects'] , inplace=True) #Quick preview processed_aspect_df.head() #Save the binarized dataframe processed_aspect_df.to_csv("../data/processed/absa_binarized.csv",index=False) ``` ## 4. Quick EDA ``` with plt.style.context(['notebook','no-latex','grid']): plt.figure(figsize = (12,7), dpi=300) processed_aspect_df[ASPECTS].sum().sort_values()\ .plot(kind = "barh", color = 'cornflowerblue')\ .set(xlabel = "Number of times detected in tweets", ylabel = "Aspect category") plt.show() ``` From the above, we see that the data is quite imbalanced. Trustworthiness is especially important to point out. As there are very few tweets tagged with trustworthiness, we might need to oversample the label. ## 5. Split data into training, validation and test set ### a. Split to training and test set ``` #Split the data 80:20 (training and test) train_df, test_df = train_test_split(processed_aspect_df,test_size=0.20, stratify = processed_aspect_df[['trustworthiness','customer service']]) print(f"Train size: {train_df.shape}", f"Test size: {test_df.shape}") ``` ### b. Split training set to training and validation set ``` train_df, val_df = train_test_split(train_df,test_size=0.25, stratify = train_df[['trustworthiness','customer service']]) print(f"Train size: {train_df.shape}", f"Validation size: {val_df.shape}") ``` ### c. Check to see that all the sets have all the aspect categories ``` df_sets = [('Training',train_df),('Validation', val_df),('Test',test_df)] plt.figure(figsize = (12,6), dpi=300) for idx,df in enumerate(df_sets): plt.subplot(1,3,idx+1) plt.tight_layout() with plt.style.context(['notebook','no-latex','grid']): plt.title(df[0]) df[1][ASPECTS].sum().plot(kind = "bar", color = 'cornflowerblue') plt.xticks(rotation=90) if idx == 0: plt.ylabel('Number of tags in the tweets', fontsize=16) plt.show() ``` ## 6. Modeling - Aspect Extraction ### a. Load the tokenizer and the BERT model ``` from transformers import BertTokenizerFast as BertTokenizer, BertModel, AdamW, get_linear_schedule_with_warmup BERT_MODEL_NAME = 'bert-base-cased' #Load BerTweet tokenizer TOKENIZER = BertTokenizer.from_pretrained(BERT_MODEL_NAME) #Load the BERTweet model BERT_MODEL = BertModel.from_pretrained(BERT_MODEL_NAME, from_tf = True, return_dict=True) ``` ### b. Prepare the training, validation, and test sets ``` TRAIN_BATCH_SIZE = 16 TEST_BATCH_SIZE = 8 class Generate_PyTorch_Dataset(torch.utils.data.Dataset): def __init__(self, dataframe, tokenizer): self.dataframe = dataframe self.tokenizer = tokenizer self.max_len = tokenizer.model_max_length def __len__(self): return len(self.dataframe) def __getitem__(self, idx): #Get each row of the dataframe data_row = self.dataframe.iloc[idx] #Get the tweet text = str(data_row.Text) #Get the aspect labels labels = data_row[ASPECTS] #Encode the tweet encoded_text = self.tokenizer.encode_plus( text, add_special_tokens = True, truncation = True, return_attention_mask = True, return_token_type_ids = False, return_length = True, max_length = self.max_len, return_tensors = 'pt', padding = "max_length" ) return { 'input_ids': encoded_text['input_ids'].flatten(), 'attention_mask': encoded_text['attention_mask'].flatten(), 'labels': torch.tensor(labels, dtype=torch.float) } class PyTorchDataModule(pl.LightningDataModule): def __init__(self, train_df, val_df, test_df, tokenizer, train_batch_size=16, test_batch_size=8): super().__init__() self.train_df = train_df self.val_df = train_df self.test_df = test_df self.tokenizer = tokenizer self.train_batch_size = train_batch_size self.test_batch_size = test_batch_size self.max_len = self.tokenizer.model_max_length def setup(self): """ Setup the training, validation and test sets by converting them to Pytorch datasets """ self.train_dataset = Generate_PyTorch_Dataset(self.train_df,self.tokenizer) self.val_dataset = Generate_PyTorch_Dataset(self.val_df,self.tokenizer) self.test_dataset = Generate_PyTorch_Dataset(self.test_df,self.tokenizer) def train_dataloader(self): """ Training set dataloader """ return DataLoader(self.train_dataset, batch_size = self.train_batch_size, shuffle = True, num_workers = 2) def val_dataloader(self): """ Validation set dataloader """ return DataLoader(self.val_dataset, batch_size = self.test_batch_size, num_workers=2) def test_dataloader(self): """ Test set dataloader """ return DataLoader(self.test_dataset, batch_size = self.test_batch_size, num_workers=2) #Instantiate and set up the data_module data_module = PyTorchDataModule(train_df,val_df,test_df,TOKENIZER, TRAIN_BATCH_SIZE, TEST_BATCH_SIZE) data_module.setup() ``` ### c. Define the model ``` class ISP_TweetAspectClassifier(pl.LightningModule): #Set the aspect classifier def __init__(self, n_classes=6, n_training_steps=None, n_warmup_steps=None, lr=2e-5): super().__init__() self.bert = BERT_MODEL self.classifier = torch.nn.Linear(self.bert.config.hidden_size, n_classes) self.n_training_steps = n_training_steps self.n_warmup_steps = n_warmup_steps self.lr = lr self.criterion = torch.nn.BCELoss() def forward(self, input_ids, attention_mask, labels = None): output = self.bert(input_ids, attention_mask=attention_mask) output = self.classifier(output.pooler_output) output = torch.sigmoid(output) loss = 0 if labels is not None: loss = self.criterion(output, labels) return loss, output def training_step(self, batch, batch_idx): input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] labels = batch["labels"] loss, outputs = self(input_ids, attention_mask, labels) self.log("train_loss", loss, prog_bar=True, logger=True) return {"loss": loss, "predictions": outputs.detach(), "labels": labels} def validation_step(self, batch, batch_idx): input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] labels = batch["labels"] loss, outputs = self(input_ids, attention_mask,labels) self.log("val_loss", loss, prog_bar=True, logger=True) return loss def test_step(self, batch, batch_idx): input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] labels = batch["labels"] loss, outputs = self(input_ids, attention_mask, labels) self.log("test_loss", loss, prog_bar=True, logger=True) return loss def training_epoch_end(self, outputs): #List to store the true labels and the model's predictions labels = [] predictions = [] #Iterate through all the outputs and get the true vs. predicted label for output in outputs: for label in output["labels"].detach().cpu(): labels.append(label) for pred in output["predictions"].detach().cpu(): predictions.append(pred) #Stack the tensors labels = torch.stack(labels).int() predictions = torch.stack(predictions) #Record the AUROC for each aspect after each training epoch for idx, name in enumerate(ASPECTS): metric = AUROC() class_roc_auc = metric(predictions[:,idx], labels[:,idx]) self.logger.experiment.add_scalar(f"{name}_roc_auc/Train", class_roc_auc, self.current_epoch) def configure_optimizers(self): optimizer = AdamW(self.parameters(), lr = self.lr) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=self.n_warmup_steps, num_training_steps=self.n_training_steps) return {'optimizer': optimizer, 'lr_scheduler':{'scheduler':scheduler,'interval':'step'}} ``` class ISP_TweetAspectClassifier(pl.LightningModule): #Set the aspect classifier def __init__(self, n_classes=6, n_training_steps=None, n_warmup_steps=None, lr=2e-5): super().__init__() self.bertweet = BERTWEET_MODEL self.classifier = torch.nn.Linear(self.bertweet.config.hidden_size, n_classes) self.n_training_steps = n_training_steps self.n_warmup_steps = n_warmup_steps self.lr = lr self.criterion = torch.nn.BCEWithLogitsLoss() def forward(self, input_ids, attention_mask): output = self.bertweet(input_ids, attention_mask=attention_mask) output = self.classifier(output.pooler_output) output = torch.sigmoid(output) return output def training_step(self, batch, batch_idx): input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] labels = batch["labels"] outputs = self(input_ids, attention_mask) loss = self.criterion(outputs, labels) self.log("train_loss", loss, prog_bar=True, logger=True) return {"loss": loss, "predictions": outputs.detach(), "labels": labels} def validation_step(self, batch, batch_idx): input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] labels = batch["labels"] outputs = self(input_ids, attention_mask) loss = self.criterion(outputs, labels) self.log("val_loss", loss, prog_bar=True, logger=True) return loss def test_step(self, batch, batch_idx): input_ids = batch["input_ids"] attention_mask = batch["attention_mask"] labels = batch["labels"] outputs = self(input_ids, attention_mask) loss = self.criterion(outputs, labels) self.log("test_loss", loss, prog_bar=True, logger=True) return loss def training_epoch_end(self, outputs): #List to store the true labels and the model's predictions labels = [] predictions = [] #Iterate through all the outputs and get the true vs. predicted label for output in outputs: for label in output["labels"].detach().cpu(): labels.append(label) for pred in output["predictions"].detach().cpu(): predictions.append(pred) #Stack the tensors labels = torch.stack(labels).int() predictions = torch.stack(predictions) #Record the AUROC for each aspect after each training epoch for idx, name in enumerate(ASPECTS): metric = AUROC() class_roc_auc = metric(predictions[:,idx], labels[:,idx]) self.logger.experiment.add_scalar(f"{name}_roc_auc/Train", class_roc_auc, self.current_epoch) def configure_optimizers(self): optimizer = AdamW(self.parameters(), lr = self.lr) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=self.n_warmup_steps, num_training_steps=self.n_training_steps) return {'optimizer': optimizer, 'lr_scheduler':{'scheduler':scheduler,'interval':'step'}} ``` #Define variables N_EPOCHS = 20 STEPS_PER_EPOCH =len(train_df)//TRAIN_BATCH_SIZE TOTAL_TRAIN_STEPS = STEPS_PER_EPOCH * N_EPOCHS N_WARMUP_STEPS = TOTAL_TRAIN_STEPS // 4 #Instantiate the classifier model model = ISP_TweetAspectClassifier(n_training_steps = TOTAL_TRAIN_STEPS, n_warmup_steps = N_WARMUP_STEPS) ``` ### d. Training ``` #Setup callback to perform saves during training checkpoint_callback = ModelCheckpoint( dirpath = "../models/absa-aspect-extraction/bert", filename = "ae-{epoch:02d}-{val_loss:.2f}", save_top_k = 3, #Save the top 3 models verbose = True, monitor = "val_loss", mode = "min" #Minimize val loss ) #Log progress in Tensorboard logger = TensorBoardLogger("../models/absa-aspect-extraction/bert/lightning_logs", name = "isp-tweets") trainer = pl.Trainer( #gpus = 1, logger = logger, log_every_n_steps = 15, callbacks = [checkpoint_callback], max_epochs = N_EPOCHS, progress_bar_refresh_rate = 30 ) trainer.fit(model, data_module) ``` ### e. Model Evaluation ``` #Evaluate the model's performance on the test dataset trainer.test(model,data_module) # Visualize the logs using tensorboard. %load_ext tensorboard %tensorboard --logdir ../models/absa-aspect-extraction/bert/lightning_logs/ #%reload_ext tensorboard ``` #### (i) Evaluate model performance on a generated example ``` trainer.checkpoint_callback.best_model_path #Load the best model based on validation loss trained_model = ISP_TweetAspectClassifier.load_from_checkpoint( trainer.checkpoint_callback.best_model_path, n_classes=len(ASPECTS) ) trained_model #Put model into evaluation mode trained_model.eval() trained_model.freeze() test_comment = "The internet is so slow and it's so expensive" encoding = TOKENIZER.encode_plus( test_comment, add_special_tokens=True, max_length=TOKENIZER.model_max_length, return_token_type_ids=False, padding="max_length", return_attention_mask=True, return_tensors='pt', ) _, test_prediction = trained_model(encoding["input_ids"], encoding["attention_mask"]) test_prediction = test_prediction.flatten().numpy() for label, prediction in zip(ASPECTS, test_prediction): print(f"{label}: {prediction}") ``` Doesn't seem to do a good job distinguishing the different aspects for this case. #### (ii) Evaluate model performance on the validation set ``` device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') trained_model = trained_model.to(device) #Prepare the validation set val_dataset = Generate_PyTorch_Dataset( val_df, TOKENIZER, ) #Lists to store the model predictions and the true labels model_preds = [] true_labels = [] for item in tqdm(val_dataset): _, pred = trained_model( item["input_ids"].unsqueeze(dim=0).to(device), item["attention_mask"].unsqueeze(dim=0).to(device) ) model_preds.append(pred.flatten()) true_labels.append(item["labels"].int()) model_preds = torch.stack(model_preds).detach().cpu() true_labels = torch.stack(true_labels).detach().cpu() #Compute the accuracy on the validation set acc_metric = Accuracy() acc_metric(model_preds, true_labels) ``` We get a fairly high accuracy on the validation set. However, we note that the dataset is quite imbalanced. So it would be important to also check the model's performance on the different aspects (remember: trustworthiness had very few samples) ``` auroc_metric = AUROC(pos_label=1) print("AUROC per tag") for i, name in enumerate(ASPECTS): tag_auroc = auroc_metric(model_preds[:, i], true_labels[:, i]) print(f"{name}: {tag_auroc}") true_labels model_preds ``` ### Classification Report ``` y_pred = model_preds.numpy() y_true = true_labels.numpy() y_pred = np.where(y_pred > 0.2, 1, 0) print(classification_report( y_true, y_pred, target_names=ASPECTS, zero_division=0 )) ```
github_jupyter
## VISUALIZING YOUR FAVOURITE NBA PLAYER 3 POINTERS GRAPH Tools we are going to use: - The NBA API to get the data from any NBA player - CARTOframes to upload the data seamlessly to CARTO - The CARTO Python SDK to analyze and create a 3-pointers map - carto-print to generate a high resolution ready-to-print image #### Let's start by importing the required modules ``` import os import sys import time from carto.auth import APIKeyAuthClient from carto.maps import NamedMapManager from carto.print import Printer from nba_api.stats.static import players from nba_api.stats.static import teams from nba_api.stats.endpoints import shotchartdetail import pandas as pd from cartoframes.auth import Credentials, set_default_credentials from cartoframes import to_carto from cartoframes.data.clients import SQLClient import geopandas as gpd ``` #### Time to set the CARTO credentials to use ``` CARTO_BASE_URL = os.environ['CARTO_API_URL'] CARTO_BASE_URL = 'https://aromeu.carto.com/' CARTO_API_KEY = os.environ['CARTO_API_KEY'] CARTO_API_KEY = '424dec8b179567aace6ef7b229c9afa1d78d68e7' CARTO_USER_NAME = 'aromeu' ``` #### Set the player name and the teams he has played with ``` PLAYER_NAME = 'Stephen Curry' TEAMS_NAME = ['Golden State Warriors'] PLAYER_NAME = 'Russell Westbrook' TEAMS_NAME = ['Oklahoma City Thunder'] PLAYER_NAME = 'Damian Lillard' TEAMS_NAME = ['Portland Trail Blazers'] ``` #### Yes, just 10 lines to get all their shoting data ``` data = [] headers = [] player = players.find_players_by_full_name(PLAYER_NAME) player_id = player[0]['id'] for team_name in TEAMS_NAME: team = teams.find_teams_by_full_name(team_name) team_id = team[0]['id'] shots = shotchartdetail.ShotChartDetail(player_id=player_id, team_id=team_id) headers = shots.shot_chart_detail.data['headers'] data.extend(shots.shot_chart_detail.data['data']) ``` #### Let's go the data scientist path ``` #df = pd.DataFrame(data, columns=headers) PLAYER_NAME = 'stephen_curry' df = pd.read_csv(f'{PLAYER_NAME}.csv') df.head() ``` #### And send the data to your CARTO account ``` dataset_name = '_'.join(PLAYER_NAME.split(' ')).lower() creds = Credentials(base_url=CARTO_BASE_URL, api_key=CARTO_API_KEY) set_default_credentials(creds) # 2019 shots # YEAR = '2019' # bool_series = df["GAME_DATE"].str.startswith(YEAR, na = False) # displaying filtered dataframe # df = df[bool_series] gdf = gpd.GeoDataFrame( df, geometry=gpd.points_from_xy(df.LOC_X, df.LOC_Y)) to_carto(gdf, dataset_name, if_exists='replace') ``` #### Wait, shots locations are in pixels coordinates. Let's do a hacky trick and let's suppose we are using coordinates in meters ``` sql_client = SQLClient() sql_client.execute("UPDATE {} SET the_geom = st_transform(st_setsrid(st_geometryfromtext('POINT(' || ST_X(the_geom) || ' ' || ST_Y(the_geom) || ')'), 3857), 4326)".format(dataset_name)) ``` #### Let's now compose a 7 layers map. If you wonder how I get to imagine this, I used BUILDER + some PostGIS wizardry + a lot (I mean a lot) of trial/error for the styling ``` def create_named_map(auth_client, dataset_name, map_name, factor): template = { "version": "0.0.1", "name": map_name, "auth": { "method": "open" }, "placeholders": {}, "view": {}, "layergroup": { "version": "1.0.1", "layers": [ { "type": "plain", "options": { "color": "#2d2d2d" } }, { "type": "cartodb", "options": { "cartocss_version": "2.1.1", "cartocss": '''#layer { polygon-fill: #2a2a2a; polygon-opacity: 0.9; } #layer::outline { line-width: 2 * %d; line-color: #4edce6; line-opacity: 1; }''' % (factor), "sql": '''SELECT 1 AS cartodb_id, the_geom, the_geom_webmercator FROM aromeu.basketball_court UNION SELECT 1 AS cartodb_id, the_geom, the_geom_webmercator FROM aromeu.basketball_court_markers''' } }, { "type": "cartodb", "options": { "cartocss_version": "2.1.1", "cartocss": '''#layer { line-width: 30 * %d; line-comp-op: screen; line-opacity: 0.08; [shot_distance >= 31] { line-color: #fff500; line-width: 0.3 * %d; } [shot_distance >= 22][shot_distance < 24] { line-color: #0d3781; line-opacity: 0.1; } [shot_distance >= 24][shot_distance < 26] { line-color: #681a87; line-opacity: 0.1; } [shot_distance >= 26][shot_distance < 28] { line-color: #8a1377; } [shot_distance >= 28][shot_distance < 31] { line-color: #ee29ac; } image-filters: agg-stack-blur(45 * %d, 45 * %d); }''' % (factor, factor, factor, factor), "sql": '''WITH a AS ( SELECT *, st_transform(the_geom, 3857) as the_geom_webmercator, ST_Length(the_geom::geography) / 1000 AS length FROM ( SELECT ST_MakeLine( the_geom, ST_SetSRID( ST_MakePoint( -1.53456990177195e-22, -3.17697838071347e-15 ), 4326 ) ) AS the_geom, cartodb_id, grid_type, game_id, game_event_id, player_id, player_name, team_id, team_name, period, minutes_remaining, seconds_remaining, event_type, action_type, shot_type, shot_zone_basic, shot_zone_area, shot_zone_range, shot_distance, loc_x, loc_y, shot_attempted_flag, shot_made_flag, game_date, htm, vtm FROM (SELECT * FROM {dataset} WHERE shot_distance >= 22 and shot_distance < 30 and shot_made_flag != 0) _line_analysis ) _cdb_analysis_line_to_single_point ) SELECT * FROM a'''.format(dataset=dataset_name) } }, { "type": "cartodb", "options": { "cartocss_version": "2.1.1", "cartocss": '''#layer { line-width: 1 * %d; line-comp-op: screen; line-opacity: 0.7; [shot_distance >= 31] { line-color: #fff500; } [shot_distance >= 22][shot_distance < 24] { line-color: #0d3781; } [shot_distance >= 24][shot_distance < 26] { line-color: #681a87; } [shot_distance >= 26][shot_distance < 28] { line-color: #8a1377; } [shot_distance >= 28][shot_distance < 31] { line-color: #ee29ac; } }''' % (factor), "sql": '''WITH a AS ( SELECT *, st_transform(the_geom, 3857) as the_geom_webmercator, ST_Length(the_geom::geography) / 1000 AS length FROM ( SELECT ST_MakeLine( the_geom, ST_SetSRID( ST_MakePoint( -1.53456990177195e-22, -3.17697838071347e-15 ), 4326 ) ) AS the_geom, cartodb_id, grid_type, game_id, game_event_id, player_id, player_name, team_id, team_name, period, minutes_remaining, seconds_remaining, event_type, action_type, shot_type, shot_zone_basic, shot_zone_area, shot_zone_range, shot_distance, loc_x, loc_y, shot_attempted_flag, shot_made_flag, game_date, htm, vtm FROM (SELECT * FROM {dataset} WHERE shot_distance >= 22 and shot_distance < 30 and shot_made_flag != 0) _line_analysis ) _cdb_analysis_line_to_single_point ) SELECT * FROM a'''.format(dataset=dataset_name) } }, { "type": "cartodb", "options": { "cartocss_version": "2.1.1", "cartocss": '''#layer { line-width: 6 * %d; line-comp-op: screen; line-opacity: 0.2; line-color: #fff500; image-filters: agg-stack-blur(18 * %d, 18 * %d); }''' % (factor, factor, factor), "sql": '''WITH a AS ( SELECT *, st_transform(the_geom, 3857) as the_geom_webmercator, ST_Length(the_geom::geography) / 1000 AS length FROM ( SELECT ST_MakeLine( the_geom, ST_SetSRID( ST_MakePoint( -1.53456990177195e-22, -3.17697838071347e-15 ), 4326 ) ) AS the_geom, cartodb_id, grid_type, game_id, game_event_id, player_id, player_name, team_id, team_name, period, minutes_remaining, seconds_remaining, event_type, action_type, shot_type, shot_zone_basic, shot_zone_area, shot_zone_range, shot_distance, loc_x, loc_y, shot_attempted_flag, shot_made_flag, game_date, htm, vtm FROM (SELECT * FROM {dataset} WHERE shot_distance >= 30 and shot_made_flag != 0) _line_analysis ) _cdb_analysis_line_to_single_point ), points AS ( SELECT cartodb_id , loc_x, loc_y, shot_distance, ST_StartPoint(ST_LineMerge(the_geom_webmercator)) AS p1 , ST_EndPoint(ST_LineMerge(the_geom_webmercator)) AS p2 FROM a AS q2), mid AS (SELECT *, ST_SetSRID(ST_MakePoint((ST_X(p2) - ST_X(p1))/2 + ST_X(p1), (ST_Y(p2) - ST_Y(p1))/3 + ST_Y(p1)), 3857) AS midpoint, PI()/2 - ST_Azimuth(p1, p2) AS angle, ST_Distance(p1, p2)/6 AS radius FROM points), third AS (SELECT *, ST_Translate(midpoint, sign(loc_x) *0.005 *sin(angle)*radius, 0 *cos(angle)*radius) AS p3 FROM mid) SELECT *, ST_SetSRID(ST_CurveToLine('CIRCULARSTRING( ' || ST_X(p1) || ' ' || ST_Y(p1) || ',' || ST_X(p3) || ' ' || ST_Y(p3) || ',' || ST_X(p2) || ' ' || ST_Y(p2) || ')'), 3857) AS the_geom_webmercator FROM third'''.format(dataset=dataset_name) } }, { "type": "cartodb", "options": { "cartocss_version": "2.1.1", "cartocss": '''#layer { line-width: 1.5 * %d; line-comp-op: lighten; line-opacity: 0.7; line-color: #fff500; }''' % (factor), "sql": '''WITH a AS ( SELECT *, st_transform(the_geom, 3857) as the_geom_webmercator, ST_Length(the_geom::geography) / 1000 AS length FROM ( SELECT ST_MakeLine( the_geom, ST_SetSRID( ST_MakePoint( -1.53456990177195e-22, -3.17697838071347e-15 ), 4326 ) ) AS the_geom, cartodb_id, grid_type, game_id, game_event_id, player_id, player_name, team_id, team_name, period, minutes_remaining, seconds_remaining, event_type, action_type, shot_type, shot_zone_basic, shot_zone_area, shot_zone_range, shot_distance, loc_x, loc_y, shot_attempted_flag, shot_made_flag, game_date, htm, vtm FROM (SELECT * FROM {dataset} WHERE shot_distance >= 30 and shot_made_flag != 0) _line_analysis ) _cdb_analysis_line_to_single_point ), points AS ( SELECT cartodb_id , loc_x, loc_y, shot_distance, ST_StartPoint(ST_LineMerge(the_geom_webmercator)) AS p1 , ST_EndPoint(ST_LineMerge(the_geom_webmercator)) AS p2 FROM a AS q2), mid AS (SELECT *, ST_SetSRID(ST_MakePoint((ST_X(p2) - ST_X(p1))/2 + ST_X(p1), (ST_Y(p2) - ST_Y(p1))/3 + ST_Y(p1)), 3857) AS midpoint, PI()/2 - ST_Azimuth(p1, p2) AS angle, ST_Distance(p1, p2)/6 AS radius FROM points), third AS (SELECT *, ST_Translate(midpoint, sign(loc_x) *0.005 *sin(angle)*radius, 0 *cos(angle)*radius) AS p3 FROM mid) SELECT *, ST_SetSRID(ST_CurveToLine('CIRCULARSTRING( ' || ST_X(p1) || ' ' || ST_Y(p1) || ',' || ST_X(p3) || ' ' || ST_Y(p3) || ',' || ST_X(p2) || ' ' || ST_Y(p2) || ')'), 3857) AS the_geom_webmercator FROM third'''.format(dataset=dataset_name) } }, { "type": "cartodb", "options": { "cartocss_version": "2.1.1", "cartocss": '''#layer['mapnik::geometry_type'=1] { marker-fill: #fff; marker-width: 12 * %d; marker-line-color: #fff; marker-line-width: 0; marker-line-opacity: 1; marker-opacity: 0.6; marker-type: ellipse; marker-placement: point; marker-allow-overlap: true; marker-comp-op: lighten; marker-clip: false; marker-multi-policy: largest; image-filters: agg-stack-blur(18 * %d, 18 * %d); }''' % (factor, factor, factor), "sql": '''with a as (select action_type,game_event_id,game_id,minutes_remaining,period,seconds_remaining,shot_distance,shot_made_flag,shot_type,shot_zone_area,shot_zone_basic,shot_zone_range,team_id,team_name,game_date, the_geom_webmercator, the_geom from {dataset}) SELECT 1 as cartodb_id, * FROM a WHERE (shot_distance >= 22 and (shot_zone_area like '%(R)' or shot_zone_area like '%(L)') and shot_zone_basic != 'Mid-Range') or (shot_distance >= 24 and shot_zone_basic != 'Mid-Range') and shot_type = '3PT Field Goal' and shot_made_flag != 0'''.format(dataset=dataset_name) } }, { "type": "cartodb", "options": { "cartocss_version": "2.1.1", "cartocss": '''#layer['mapnik::geometry_type'=1] { marker-fill: #fff; marker-width: 9 * %d; marker-line-color: #fff; marker-line-width: 3 * %d; marker-line-opacity: 1; marker-opacity: 0.3; marker-type: ellipse; marker-placement: point; marker-allow-overlap: true; marker-comp-op: lighten; marker-clip: false; marker-multi-policy: largest; }''' % (factor, factor), "sql": '''with a as (select action_type,game_event_id,game_id,minutes_remaining,period,seconds_remaining,shot_distance,shot_made_flag,shot_type,shot_zone_area,shot_zone_basic,shot_zone_range,team_id,team_name,game_date, the_geom_webmercator, the_geom from {dataset}) SELECT 1 as cartodb_id, * FROM a WHERE (shot_distance >= 22 and (shot_zone_area like '%(R)' or shot_zone_area like '%(L)') and shot_zone_basic != 'Mid-Range') or (shot_distance >= 24 and shot_zone_basic != 'Mid-Range') and shot_type = '3PT Field Goal' and shot_made_flag != 0'''.format(dataset=dataset_name) } } ] } } named_map_manager = NamedMapManager(auth_client) try: named_map = named_map_manager.get(map_name) if named_map is not None: named_map.client = auth_client named_map.delete() except Exception as e: #ignore print(e) return named_map_manager.create(template=template) ``` #### This is how we authenticate the CARTO Python SDK ``` auth_client = APIKeyAuthClient(CARTO_BASE_URL, CARTO_API_KEY) ``` #### This is one of the things I love the most from CARTO: prototype with BUILDER + then template your map and finally use APIs to produce maps programmatically For this specific case, we have parameterized the line and markers widths, so with a single template, we can produce maps that we can use to share a screenshot (with 72DPI) or to export for high resolution printing (with 300DPI). Let's go for the poster printing ``` DPI = 72 FACTOR = DPI / 72.0 map_name = 'tpl_' + dataset_name + str(int(round(time.time() * 1000))) create_named_map(auth_client, dataset_name, map_name, FACTOR) ``` #### Aaaand we're mostly done. Let's export a huge-high-resolution image ``` map = { 'username': CARTO_USER_NAME, 'map_id': map_name, 'width': 120, 'height': 80, 'dpi': DPI, 'zoom': 18, 'bounds': {"ne":[-0.000977916642979147,-0.004578593652695418],"sw":[0.004981951781790824,0.004288789350539447]}, 'api_key': CARTO_API_KEY } p = Printer(map['username'], map['map_id'], map['api_key'], map['width'], map['height'], map['zoom'], map['bounds'], map['dpi'], 'RGBA') image_path = p.export('.') image_path ``` #### How it looks like?? ![](aromeu_tpl_stephen_curry1588194572838_20200429230933.png) #### Clean some stuff and close the door when you leave, please ``` named_map_manager = NamedMapManager(auth_client) try: named_map = named_map_manager.get(map_name) if named_map is not None: named_map.client = auth_client named_map.delete() except Exception as e: #ignore print(e) --WHERE (shot_distance >= 22 and (shot_zone_area like '%(R)' or shot_zone_area like '%(L)') and shot_zone_basic != 'Mid-Range') or (shot_distance >= 24 and shot_zone_basic != 'Mid-Range') and shot_type = '3PT Field Goal' and shot_made_flag != 0 ```
github_jupyter
## Prediction sine wave function using Gaussian Process An example for Gaussian process algorithm to predict sine wave function. This example is from ["Gaussian Processes regression: basic introductory example"](http://scikit-learn.org/stable/auto_examples/gaussian_process/plot_gp_regression.html). ``` import numpy as np from sklearn.gaussian_process import GaussianProcess from matplotlib import pyplot as pl %matplotlib inline np.random.seed(1) # The function to predict def f(x): return x*np.sin(x) # -------------------------- # First the noiseless case # -------------------------- # Obervations X = np.atleast_2d([0., 1., 2., 3., 5., 6., 7., 8., 9.5]).T y = f(X).ravel() #X = np.atleast_2d(np.linspace(0, 100, 200)).T # Mesh the input space for evaluations of the real function, the prediction and its MSE x = np.atleast_2d(np.linspace(0, 10, 1000)).T # Instanciate a Gaussian Process model gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1, random_start=100) # Fit to data using Maximum Likelihood Estimation of the parameters gp.fit(X, y) # Make the prediction on the meshed x-axis (ask for MSE as well) y_pred, MSE = gp.predict(x, eval_MSE=True) sigma = np.sqrt(MSE) # Plot the function, the prediction and the 95% confidence interval based on the MSE fig = pl.figure() pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$') pl.plot(X, y, 'r.', markersize=10, label=u'Observations') pl.plot(x, y_pred, 'b-', label=u'Prediction') pl.fill(np.concatenate([x, x[::-1]]), np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]), alpha=.5, fc='b', ec='None', label='95% confidence interval') pl.xlabel('$x$') pl.ylabel('$f(x)$') pl.ylim(-10, 20) pl.legend(loc='upper left') # now the noisy case X = np.linspace(0.1, 9.9, 20) X = np.atleast_2d(X).T # Observations and noise y = f(X).ravel() dy = 0.5 + 1.0 * np.random.random(y.shape) noise = np.random.normal(0, dy) y += noise # Mesh the input space for evaluations of the real function, the prediction and # its MSE x = np.atleast_2d(np.linspace(0, 10, 1000)).T # Instanciate a Gaussian Process model gp = GaussianProcess(corr='squared_exponential', theta0=1e-1, thetaL=1e-3, thetaU=1, nugget=(dy / y) ** 2, random_start=100) # Fit to data using Maximum Likelihood Estimation of the parameters gp.fit(X, y) # Make the prediction on the meshed x-axis (ask for MSE as well) y_pred, MSE = gp.predict(x, eval_MSE=True) sigma = np.sqrt(MSE) # Plot the function, the prediction and the 95% confidence interval based on # the MSE fig = pl.figure() pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$') pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations') pl.plot(x, y_pred, 'b-', label=u'Prediction') pl.fill(np.concatenate([x, x[::-1]]), np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]), alpha=.5, fc='b', ec='None', label='95% confidence interval') pl.xlabel('$x$') pl.ylabel('$f(x)$') pl.ylim(-10, 20) pl.legend(loc='upper left') pl.show() ```
github_jupyter
Clustering Mash distances to obtain clonal groups for all Salmonella ``` library('FactoMineR') library('factoextra') library('readxl') library('dplyr') mash_dist_file = '../data/interim/mash_distance_matrix.csv' meta_excel_file = '../data/raw/GenotypicAMR_Master.xlsx' distances <- read.csv(mash_dist_file, header=TRUE, row.names=1, stringsAsFactors=FALSE) options(warn=-1) metadf = read_excel(meta_excel_file, na='-') options(warn=0) serodf <- as.data.frame(metadf['serovar']) serodf$serovar <- tolower(serodf$serovar) srrs <- sapply(metadf['run'], as.character) rownames(serodf) <- srrs serodf[is.na(serodf$serovar),] = 'No serovar' pcs <- PCA(distances, scale.unit=FALSE, ncp = 20, graph = FALSE) fp = list() cfit = list() for(k in 5:15) { cat(k) cat("\n") clu <- HCPC(pcs, graph = FALSE, nb.clust=k) clustdf <- clu$data.clust['clust'] clustdf = merge(clustdf, serodf, by=0, all.x=TRUE, all.y=FALSE) cluster_counts = clustdf %>% group_by(serovar) %>% summarise(n_clusters = n_distinct(clust)) # Find serovars split across clusters splitserovars = cluster_counts$serovar[cluster_counts['n_clusters'] >= 2] # For serovars split across clusters, count number in each cluster splitcounts = clustdf %>% count(serovar, clust) %>% filter(serovar %in% splitserovars) %>% arrange(desc(serovar)) # Count all genomes in non-majority clusters misclassified = splitcounts %>% group_by(serovar) %>% filter(n != max(n)) %>% filter(serovar != 'No serovar') fp[[k]] = sum(misclassified['n']) cfit[[k]] = clu } pcs <- PCA(distances, scale.unit=FALSE, ncp = 20, graph = FALSE) fp = list() cfit = list() k = 9 cat(k) cat("\n") clu <- HCPC(pcs, graph = FALSE, nb.clust=k) clustdf <- clu$data.clust['clust'] clustdf = merge(clustdf, serodf, by=0, all.x=TRUE, all.y=FALSE) cluster_counts = clustdf %>% group_by(serovar) %>% summarise(n_clusters = n_distinct(clust)) # Find serovars split across clusters splitserovars = cluster_counts$serovar[cluster_counts['n_clusters'] >= 2] # For serovars split across clusters, count number in each cluster splitcounts = clustdf %>% count(serovar, clust) %>% filter(serovar %in% splitserovars) %>% arrange(desc(serovar)) # Count all genomes in non-majority clusters misclassified = splitcounts %>% group_by(serovar) %>% filter(n != max(n)) %>% filter(serovar != 'No serovar') fp[[k]] = sum(misclassified['n']) cfit[[k]] = clu # fp = number of genomes that are in wrong clusters, i.e. the correct cluster is the cluster with the most serovar genomes print(fp) clustdf <- cfit[[9]]$data.clust['clust'] table(clustdf) fviz_eig(pcs) plot(cfit[[9]], choice = "3D.map") clustdf2 = merge(clustdf, serodf, by=0, all.x=TRUE, all.y=FALSE) cluster_counts = clustdf2 %>% group_by(clust) %>% count(serovar) %>% filter(n>2) cluster_counts ```
github_jupyter
# Convolutional Neural Networks --- In this notebook, we train a **CNN** to classify images from the CIFAR-10 database. The images in this database are small color images that fall into one of ten classes; some example images are pictured below. <img src='notebook_ims/cifar_data.png' width=70% height=70% /> ### Test for [CUDA](http://pytorch.org/docs/stable/cuda.html) Since these are larger (32x32x3) images, it may prove useful to speed up your training time by using a GPU. CUDA is a parallel computing platform and CUDA Tensors are the same as typical Tensors, only they utilize GPU's for computation. ``` import torch import numpy as np # check if CUDA is available train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('CUDA is not available. Training on CPU ...') else: print('CUDA is available! Training on GPU ...') ``` --- ## Load and Augment the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html) Downloading may take a minute. We load in the training and test data, split the training data into a training and validation set, then create DataLoaders for each of these sets of data. #### Augmentation In this cell, we perform some simple [data augmentation](https://medium.com/nanonets/how-to-use-deep-learning-when-you-have-limited-data-part-2-data-augmentation-c26971dc8ced) by randomly flipping and rotating the given image data. We do this by defining a torchvision `transform`, and you can learn about all the transforms that are used to pre-process and augment data, [here](https://pytorch.org/docs/stable/torchvision/transforms.html). #### TODO: Look at the [transformation documentation](https://pytorch.org/docs/stable/torchvision/transforms.html); add more augmentation transforms, and see how your model performs. This type of data augmentation should add some positional variety to these images, so that when we train a model on this data, it will be robust in the face of geometric changes (i.e. it will recognize a ship, no matter which direction it is facing). It's recommended that you choose one or two transforms. ``` from torchvision import datasets import torchvision.transforms as transforms from torch.utils.data.sampler import SubsetRandomSampler # number of subprocesses to use for data loading num_workers = 0 # how many samples per batch to load batch_size = 20 # percentage of training set to use as validation valid_size = 0.2 # convert data to a normalized torch.FloatTensor transform = transforms.Compose([ transforms.RandomHorizontalFlip(), # randomly flip and rotate transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) # choose the training and test datasets train_data = datasets.CIFAR10('data', train=True, download=True, transform=transform) test_data = datasets.CIFAR10('data', train=False, download=True, transform=transform) # obtain training indices that will be used for validation num_train = len(train_data) indices = list(range(num_train)) np.random.shuffle(indices) split = int(np.floor(valid_size * num_train)) train_idx, valid_idx = indices[split:], indices[:split] # define samplers for obtaining training and validation batches train_sampler = SubsetRandomSampler(train_idx) valid_sampler = SubsetRandomSampler(valid_idx) # prepare data loaders (combine dataset and sampler) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers) valid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers) # specify the image classes classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] ``` ### Visualize a Batch of Training Data ``` import matplotlib.pyplot as plt %matplotlib inline # helper function to un-normalize and display an image def imshow(img): img = img / 2 + 0.5 # unnormalize plt.imshow(np.transpose(img, (1, 2, 0))) # convert from Tensor image # obtain one batch of training images dataiter = iter(train_loader) images, labels = dataiter.next() images = images.numpy() # convert images to numpy for display # plot the images in the batch, along with the corresponding labels fig = plt.figure(figsize=(25, 4)) # display 20 images for idx in np.arange(20): ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[]) imshow(images[idx]) ax.set_title(classes[labels[idx]]) ``` ### View an Image in More Detail Here, we look at the normalized red, green, and blue (RGB) color channels as three separate, grayscale intensity images. ``` rgb_img = np.squeeze(images[3]) channels = ['red channel', 'green channel', 'blue channel'] fig = plt.figure(figsize = (36, 36)) for idx in np.arange(rgb_img.shape[0]): ax = fig.add_subplot(1, 3, idx + 1) img = rgb_img[idx] ax.imshow(img, cmap='gray') ax.set_title(channels[idx]) width, height = img.shape thresh = img.max()/2.5 for x in range(width): for y in range(height): val = round(img[x][y],2) if img[x][y] !=0 else 0 ax.annotate(str(val), xy=(y,x), horizontalalignment='center', verticalalignment='center', size=8, color='white' if img[x][y]<thresh else 'black') ``` --- ## Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html) This time, you'll define a CNN architecture. Instead of an MLP, which used linear, fully-connected layers, you'll use the following: * [Convolutional layers](https://pytorch.org/docs/stable/nn.html#conv2d), which can be thought of as stack of filtered images. * [Maxpooling layers](https://pytorch.org/docs/stable/nn.html#maxpool2d), which reduce the x-y size of an input, keeping only the most _active_ pixels from the previous layer. * The usual Linear + Dropout layers to avoid overfitting and produce a 10-dim output. A network with 2 convolutional layers is shown in the image below and in the code, and you've been given starter code with one convolutional and one maxpooling layer. <img src='notebook_ims/2_layer_conv.png' height=50% width=50% /> #### TODO: Define a model with multiple convolutional layers, and define the feedforward metwork behavior. The more convolutional layers you include, the more complex patterns in color and shape a model can detect. It's suggested that your final model include 2 or 3 convolutional layers as well as linear layers + dropout in between to avoid overfitting. It's good practice to look at existing research and implementations of related models as a starting point for defining your own models. You may find it useful to look at [this PyTorch classification example](https://github.com/pytorch/tutorials/blob/master/beginner_source/blitz/cifar10_tutorial.py) or [this, more complex Keras example](https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py) to help decide on a final structure. #### Output volume for a convolutional layer To compute the output size of a given convolutional layer we can perform the following calculation (taken from [Stanford's cs231n course](http://cs231n.github.io/convolutional-networks/#layers)): > We can compute the spatial size of the output volume as a function of the input volume size (W), the kernel/filter size (F), the stride with which they are applied (S), and the amount of zero padding used (P) on the border. The correct formula for calculating how many neurons define the output_W is given by `(W−F+2P)/S+1`. For example for a 7x7 input and a 3x3 filter with stride 1 and pad 0 we would get a 5x5 output. With stride 2 we would get a 3x3 output. ``` import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): def __init__(self): super(Net, self).__init__() # convolutional layer (sees 32x32x3 image tensor) self.conv1 = nn.Conv2d(3, 16, 3, padding=1) # convolutional layer (sees 16x16x16 tensor) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) # convolutional layer (sees 8x8x32 tensor) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) # max pooling layer self.pool = nn.MaxPool2d(2, 2) # linear layer (64 * 4 * 4 -> 500) self.fc1 = nn.Linear(64 * 4 * 4, 500) # linear layer (500 -> 10) self.fc2 = nn.Linear(500, 10) # dropout layer (p=0.25) self.dropout = nn.Dropout(0.25) def forward(self, x): # add sequence of convolutional and max pooling layers x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) # flatten image input x = x.view(-1, 64 * 4 * 4) # add dropout layer x = self.dropout(x) # add 1st hidden layer, with relu activation function x = F.relu(self.fc1(x)) # add dropout layer x = self.dropout(x) # add 2nd hidden layer, with relu activation function x = self.fc2(x) return x # create a complete CNN model = Net() print(model) # move tensors to GPU if CUDA is available if train_on_gpu: model.cuda() ``` ### Specify [Loss Function](http://pytorch.org/docs/stable/nn.html#loss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html) Decide on a loss and optimization function that is best suited for this classification task. The linked code examples from above, may be a good starting point; [this PyTorch classification example](https://github.com/pytorch/tutorials/blob/master/beginner_source/blitz/cifar10_tutorial.py) or [this, more complex Keras example](https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py). Pay close attention to the value for **learning rate** as this value determines how your model converges to a small error. #### TODO: Define the loss and optimizer and see how these choices change the loss over time. ``` import torch.optim as optim # specify loss function (categorical cross-entropy) criterion = nn.CrossEntropyLoss() # specify optimizer optimizer = optim.SGD(model.parameters(), lr=0.01) ``` --- ## Train the Network Remember to look at how the training and validation loss decreases over time; if the validation loss ever increases it indicates possible overfitting. ``` # number of epochs to train the model n_epochs = 30 valid_loss_min = np.Inf # track change in validation loss for epoch in range(1, n_epochs+1): # keep track of training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(train_loader): # move tensors to GPU if CUDA is available if train_on_gpu: data, target = data.cuda(), target.cuda() # clear the gradients of all optimized variables optimizer.zero_grad() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update training loss train_loss += loss.item()*data.size(0) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(valid_loader): # move tensors to GPU if CUDA is available if train_on_gpu: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # update average validation loss valid_loss += loss.item()*data.size(0) # calculate average losses train_loss = train_loss/len(train_loader.sampler) valid_loss = valid_loss/len(valid_loader.sampler) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss)) # save model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), 'model_augmented.pt') valid_loss_min = valid_loss ``` ### Load the Model with the Lowest Validation Loss ``` model.load_state_dict(torch.load('model_augmented.pt')) ``` --- ## Test the Trained Network Test your trained model on previously unseen data! A "good" result will be a CNN that gets around 70% (or more, try your best!) accuracy on these test images. ``` # track test loss test_loss = 0.0 class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) model.eval() # iterate over test data for batch_idx, (data, target) in enumerate(test_loader): # move tensors to GPU if CUDA is available if train_on_gpu: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # update test loss test_loss += loss.item()*data.size(0) # convert output probabilities to predicted class _, pred = torch.max(output, 1) # compare predictions to true label correct_tensor = pred.eq(target.data.view_as(pred)) correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy()) # calculate test accuracy for each object class for i in range(batch_size): label = target.data[i] class_correct[label] += correct[i].item() class_total[label] += 1 # average test loss test_loss = test_loss/len(test_loader.dataset) print('Test Loss: {:.6f}\n'.format(test_loss)) for i in range(10): if class_total[i] > 0: print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % ( classes[i], 100 * class_correct[i] / class_total[i], np.sum(class_correct[i]), np.sum(class_total[i]))) else: print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i])) print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % ( 100. * np.sum(class_correct) / np.sum(class_total), np.sum(class_correct), np.sum(class_total))) ``` ### Visualize Sample Test Results ``` # obtain one batch of test images dataiter = iter(test_loader) images, labels = dataiter.next() images.numpy() # move model inputs to cuda, if GPU available if train_on_gpu: images = images.cuda() # get sample outputs output = model(images) # convert output probabilities to predicted class _, preds_tensor = torch.max(output, 1) preds = np.squeeze(preds_tensor.numpy()) if not train_on_gpu else np.squeeze(preds_tensor.cpu().numpy()) # plot the images in the batch, along with predicted and true labels fig = plt.figure(figsize=(25, 4)) for idx in np.arange(20): ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[]) imshow(images[idx]) ax.set_title("{} ({})".format(classes[preds[idx]], classes[labels[idx]]), color=("green" if preds[idx]==labels[idx].item() else "red")) ```
github_jupyter
# Deep Convolutional Generative Adversarial Networks :label:`sec_dcgan` In :numref:`sec_basic_gan`, we introduced the basic ideas behind how GANs work. We showed that they can draw samples from some simple, easy-to-sample distribution, like a uniform or normal distribution, and transform them into samples that appear to match the distribution of some dataset. And while our example of matching a 2D Gaussian distribution got the point across, it is not especially exciting. In this section, we will demonstrate how you can use GANs to generate photorealistic images. We will be basing our models on the deep convolutional GANs (DCGAN) introduced in :cite:`Radford.Metz.Chintala.2015`. We will borrow the convolutional architecture that have proven so successful for discriminative computer vision problems and show how via GANs, they can be leveraged to generate photorealistic images. ``` from d2l import torch as d2l import torch import torchvision from torch import nn import warnings ``` ## The Pokemon Dataset The dataset we will use is a collection of Pokemon sprites obtained from [pokemondb](https://pokemondb.net/sprites). First download, extract and load this dataset. ``` #@save d2l.DATA_HUB['pokemon'] = (d2l.DATA_URL + 'pokemon.zip', 'c065c0e2593b8b161a2d7873e42418bf6a21106c') data_dir = d2l.download_extract('pokemon') pokemon = torchvision.datasets.ImageFolder(data_dir) ``` We resize each image into $64\times 64$. The `ToTensor` transformation will project the pixel value into $[0, 1]$, while our generator will use the tanh function to obtain outputs in $[-1, 1]$. Therefore we normalize the data with $0.5$ mean and $0.5$ standard deviation to match the value range. ``` batch_size = 256 transformer = torchvision.transforms.Compose([ torchvision.transforms.Resize((64, 64)), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(0.5, 0.5) ]) pokemon.transform = transformer data_iter = torch.utils.data.DataLoader( pokemon, batch_size=batch_size, shuffle=True, num_workers=d2l.get_dataloader_workers()) ``` Let us visualize the first 20 images. ``` warnings.filterwarnings('ignore') d2l.set_figsize((4, 4)) for X, y in data_iter: imgs = X[0:20,:,:,:].permute(0, 2, 3, 1)/2+0.5 d2l.show_images(imgs, num_rows=4, num_cols=5) break ``` ## The Generator The generator needs to map the noise variable $\mathbf z\in\mathbb R^d$, a length-$d$ vector, to a RGB image with width and height to be $64\times 64$ . In :numref:`sec_fcn` we introduced the fully convolutional network that uses transposed convolution layer (refer to :numref:`sec_transposed_conv`) to enlarge input size. The basic block of the generator contains a transposed convolution layer followed by the batch normalization and ReLU activation. ``` class G_block(nn.Module): def __init__(self, out_channels, in_channels=3, kernel_size=4, strides=2, padding=1, **kwargs): super(G_block, self).__init__(**kwargs) self.conv2d_trans = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, strides, padding, bias=False) self.batch_norm = nn.BatchNorm2d(out_channels) self.activation = nn.ReLU() def forward(self, X): return self.activation(self.batch_norm(self.conv2d_trans(X))) ``` In default, the transposed convolution layer uses a $k_h = k_w = 4$ kernel, a $s_h = s_w = 2$ strides, and a $p_h = p_w = 1$ padding. With a input shape of $n_h^{'} \times n_w^{'} = 16 \times 16$, the generator block will double input's width and height. $$ \begin{aligned} n_h^{'} \times n_w^{'} &= [(n_h k_h - (n_h-1)(k_h-s_h)- 2p_h] \times [(n_w k_w - (n_w-1)(k_w-s_w)- 2p_w]\\ &= [(k_h + s_h (n_h-1)- 2p_h] \times [(k_w + s_w (n_w-1)- 2p_w]\\ &= [(4 + 2 \times (16-1)- 2 \times 1] \times [(4 + 2 \times (16-1)- 2 \times 1]\\ &= 32 \times 32 .\\ \end{aligned} $$ ``` x = torch.zeros((2, 3, 16, 16)) g_blk = G_block(20) g_blk(x).shape ``` If changing the transposed convolution layer to a $4\times 4$ kernel, $1\times 1$ strides and zero padding. With a input size of $1 \times 1$, the output will have its width and height increased by 3 respectively. ``` x = torch.zeros((2, 3, 1, 1)) g_blk = G_block(20, strides=1, padding=0) g_blk(x).shape ``` The generator consists of four basic blocks that increase input's both width and height from 1 to 32. At the same time, it first projects the latent variable into $64\times 8$ channels, and then halve the channels each time. At last, a transposed convolution layer is used to generate the output. It further doubles the width and height to match the desired $64\times 64$ shape, and reduces the channel size to $3$. The tanh activation function is applied to project output values into the $(-1, 1)$ range. ``` n_G = 64 net_G = nn.Sequential( G_block(in_channels=100, out_channels=n_G*8, strides=1, padding=0), # Output: (64 * 8, 4, 4) G_block(in_channels=n_G*8, out_channels=n_G*4), # Output: (64 * 4, 8, 8) G_block(in_channels=n_G*4, out_channels=n_G*2), # Output: (64 * 2, 16, 16) G_block(in_channels=n_G*2, out_channels=n_G), # Output: (64, 32, 32) nn.ConvTranspose2d(in_channels=n_G, out_channels=3, kernel_size=4, stride=2, padding=1, bias=False), nn.Tanh()) # Output: (3, 64, 64) ``` Generate a 100 dimensional latent variable to verify the generator's output shape. ``` x = torch.zeros((1, 100, 1, 1)) net_G(x).shape ``` ## Discriminator The discriminator is a normal convolutional network network except that it uses a leaky ReLU as its activation function. Given $\alpha \in[0, 1]$, its definition is $$\textrm{leaky ReLU}(x) = \begin{cases}x & \text{if}\ x > 0\\ \alpha x &\text{otherwise}\end{cases}.$$ As it can be seen, it is normal ReLU if $\alpha=0$, and an identity function if $\alpha=1$. For $\alpha \in (0, 1)$, leaky ReLU is a nonlinear function that give a non-zero output for a negative input. It aims to fix the "dying ReLU" problem that a neuron might always output a negative value and therefore cannot make any progress since the gradient of ReLU is 0. ``` alphas = [0, .2, .4, .6, .8, 1] x = torch.arange(-2, 1, 0.1) Y = [nn.LeakyReLU(alpha)(x).detach().numpy() for alpha in alphas] d2l.plot(x.detach().numpy(), Y, 'x', 'y', alphas) ``` The basic block of the discriminator is a convolution layer followed by a batch normalization layer and a leaky ReLU activation. The hyperparameters of the convolution layer are similar to the transpose convolution layer in the generator block. ``` class D_block(nn.Module): def __init__(self, out_channels, in_channels=3, kernel_size=4, strides=2, padding=1, alpha=0.2, **kwargs): super(D_block, self).__init__(**kwargs) self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding, bias=False) self.batch_norm = nn.BatchNorm2d(out_channels) self.activation = nn.LeakyReLU(alpha, inplace=True) def forward(self, X): return self.activation(self.batch_norm(self.conv2d(X))) ``` A basic block with default settings will halve the width and height of the inputs, as we demonstrated in :numref:`sec_padding`. For example, given a input shape $n_h = n_w = 16$, with a kernel shape $k_h = k_w = 4$, a stride shape $s_h = s_w = 2$, and a padding shape $p_h = p_w = 1$, the output shape will be: $$ \begin{aligned} n_h^{'} \times n_w^{'} &= \lfloor(n_h-k_h+2p_h+s_h)/s_h\rfloor \times \lfloor(n_w-k_w+2p_w+s_w)/s_w\rfloor\\ &= \lfloor(16-4+2\times 1+2)/2\rfloor \times \lfloor(16-4+2\times 1+2)/2\rfloor\\ &= 8 \times 8 .\\ \end{aligned} $$ ``` x = torch.zeros((2, 3, 16, 16)) d_blk = D_block(20) d_blk(x).shape ``` The discriminator is a mirror of the generator. ``` n_D = 64 net_D = nn.Sequential( D_block(n_D), # Output: (64, 32, 32) D_block(in_channels=n_D, out_channels=n_D*2), # Output: (64 * 2, 16, 16) D_block(in_channels=n_D*2, out_channels=n_D*4), # Output: (64 * 4, 8, 8) D_block(in_channels=n_D*4, out_channels=n_D*8), # Output: (64 * 8, 4, 4) nn.Conv2d(in_channels=n_D*8, out_channels=1, kernel_size=4, bias=False)) # Output: (1, 1, 1) ``` It uses a convolution layer with output channel $1$ as the last layer to obtain a single prediction value. ``` x = torch.zeros((1, 3, 64, 64)) net_D(x).shape ``` ## Training Compared to the basic GAN in :numref:`sec_basic_gan`, we use the same learning rate for both generator and discriminator since they are similar to each other. In addition, we change $\beta_1$ in Adam (:numref:`sec_adam`) from $0.9$ to $0.5$. It decreases the smoothness of the momentum, the exponentially weighted moving average of past gradients, to take care of the rapid changing gradients because the generator and the discriminator fight with each other. Besides, the random generated noise `Z`, is a 4-D tensor and we are using GPU to accelerate the computation. ``` def train(net_D, net_G, data_iter, num_epochs, lr, latent_dim, device=d2l.try_gpu()): loss = nn.BCEWithLogitsLoss(reduction='sum') for w in net_D.parameters(): nn.init.normal_(w, 0, 0.02) for w in net_G.parameters(): nn.init.normal_(w, 0, 0.02) net_D, net_G = net_D.to(device), net_G.to(device) trainer_hp = {'lr': lr, 'betas': [0.5,0.999]} trainer_D = torch.optim.Adam(net_D.parameters(), **trainer_hp) trainer_G = torch.optim.Adam(net_G.parameters(), **trainer_hp) animator = d2l.Animator(xlabel='epoch', ylabel='loss', xlim=[1, num_epochs], nrows=2, figsize=(5, 5), legend=['discriminator', 'generator']) animator.fig.subplots_adjust(hspace=0.3) for epoch in range(1, num_epochs + 1): # Train one epoch timer = d2l.Timer() metric = d2l.Accumulator(3) # loss_D, loss_G, num_examples for X, _ in data_iter: batch_size = X.shape[0] Z = torch.normal(0, 1, size=(batch_size, latent_dim, 1, 1)) X, Z = X.to(device), Z.to(device) metric.add(d2l.update_D(X, Z, net_D, net_G, loss, trainer_D), d2l.update_G(Z, net_D, net_G, loss, trainer_G), batch_size) # Show generated examples Z = torch.normal(0, 1, size=(21, latent_dim, 1, 1), device=device) # Normalize the synthetic data to N(0, 1) fake_x = net_G(Z).permute(0, 2, 3, 1) / 2 + 0.5 imgs = torch.cat( [torch.cat([ fake_x[i * 7 + j].cpu().detach() for j in range(7)], dim=1) for i in range(len(fake_x)//7)], dim=0) animator.axes[1].cla() animator.axes[1].imshow(imgs) # Show the losses loss_D, loss_G = metric[0] / metric[2], metric[1] / metric[2] animator.add(epoch, (loss_D, loss_G)) print(f'loss_D {loss_D:.3f}, loss_G {loss_G:.3f}, ' f'{metric[2] / timer.stop():.1f} examples/sec on {str(device)}') ``` We train the model with a small number of epochs just for demonstration. For better performance, the variable `num_epochs` can be set to a larger number. ``` latent_dim, lr, num_epochs = 100, 0.005, 20 train(net_D, net_G, data_iter, num_epochs, lr, latent_dim) ``` ## Summary * DCGAN architecture has four convolutional layers for the Discriminator and four "fractionally-strided" convolutional layers for the Generator. * The Discriminator is a 4-layer strided convolutions with batch normalization (except its input layer) and leaky ReLU activations. * Leaky ReLU is a nonlinear function that give a non-zero output for a negative input. It aims to fix the “dying ReLU” problem and helps the gradients flow easier through the architecture. ## Exercises 1. What will happen if we use standard ReLU activation rather than leaky ReLU? 1. Apply DCGAN on Fashion-MNIST and see which category works well and which does not. [Discussions](https://discuss.d2l.ai/t/1083)
github_jupyter
### Convolutional autoencoder Since our inputs are images, it makes sense to use convolutional neural networks (convnets) as encoders and decoders. In practical settings, autoencoders applied to images are always convolutional autoencoders --they simply perform much better. Let's implement one. The encoder will consist in a stack of Conv2D and MaxPooling2D layers (max pooling being used for spatial down-sampling), while the decoder will consist in a stack of Conv2D and UpSampling2D layers. ``` from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D from keras.models import Model from keras import backend as K import numpy as np from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D from keras.models import Model from keras import backend as K input_img = Input(shape=(32, 32, 3)) # adapt this if using `channels_first` image data format x1 = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img) x2 = MaxPooling2D((2, 2), padding='same')(x1) x3 = Conv2D(8, (6, 6), activation='relu', padding='same')(x2) x4 = MaxPooling2D((2, 2), padding='same')(x3) x5 = Conv2D(8, (9, 9), activation='relu', padding='same')(x4) encoded = MaxPooling2D((2, 2), padding='same')(x5) # at this point the representation is (4, 4, 8) i.e. 128-dimensional x6 = Conv2D(8, (9, 9), activation='relu', padding='same')(encoded) x7 = UpSampling2D((2, 2))(x6) x8 = Conv2D(8, (6, 6), activation='relu', padding='same')(x7) x9 = UpSampling2D((2, 2))(x8) x10 = Conv2D(16, (3, 3), activation='relu', padding='same')(x9) x11 = UpSampling2D((2, 2))(x10) decoded = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(x11) autoencoder = Model(input_img, decoded) autoencoder.compile(optimizer='adagrad', loss='binary_crossentropy') from keras.datasets import cifar10 import numpy as np (x_train, _), (x_test, _) = cifar10.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = np.reshape(x_train, (len(x_train), 32, 32, 3)) # adapt this if using `channels_first` image data format x_test = np.reshape(x_test, (len(x_test), 32, 32, 3)) # adapt this if using `channels_first` image data format autoencoder.fit(x_train, x_train, epochs=50, batch_size=128, shuffle=True, validation_data=(x_test, x_test)) from keras.models import load_model #autoencoder.save('cifar10_autoencoders.h5') # creates a HDF5 file 'my_model.h5' #del model # deletes the existing model. # returns a compiled model # identical to the previous one autoencoder = load_model('cifar10_autoencoders.h5') import matplotlib.pyplot as plt decoded_imgs = autoencoder.predict(x_test) n = 10 plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(32, 32, 3)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + n + 1) plt.imshow(decoded_imgs[i].reshape(32, 32, 3)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() ``` ### Plotting the weights from the first layer ``` import matplotlib.pyplot as plt n = 8 for i in range(n): fig = plt.figure(figsize=(1,1)) conv_1 = np.asarray(autoencoder.layers[1].get_weights())[0][:,:,0,i] ax = fig.add_subplot(111) plt.imshow(conv_1.transpose(), cmap = 'gray') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() autoencoder.layers[3].get_weights() from keras import backend as K # K.learning_phase() is a flag that indicates if the network is in training or # predict phase. It allow layer (e.g. Dropout) to only be applied during training inputs = [K.learning_phase()] + autoencoder.inputs _layer1_f = K.function(inputs, [x2]) def convout1_f(X): # The [0] is to disable the training phase flag return _layer1_f([0] + [X]) #_lay_f = K.function(inputs, [x1]) #def convout1_f(X): # The [0] is to disable the training phase flag # return _layer1_f([0] + [X]) _layer2_f = K.function(inputs, [x4]) def convout2_f(X): # The [0] is to disable the training phase flag return _layer2_f([0] + [X]) _layer3_f = K.function(inputs, [encoded]) def convout3_f(X): # The [0] is to disable the training phase flag return _layer3_f([0] + [X]) _up_layer1_f = K.function(inputs, [x6]) def convout4_f(X): # The [0] is to disable the training phase flag return _up_layer1_f([0] + [X]) _up_layer2_f = K.function(inputs, [x8]) def convout5_f(X): # The [0] is to disable the training phase flag return _up_layer2_f([0] + [X]) _up_layer3_f = K.function(inputs, [x10]) def convout6_f(X): # The [0] is to disable the training phase flag return _up_layer3_f([0] + [X]) _up_layer4_f = K.function(inputs, [decoded]) def convout7_f(X): # The [0] is to disable the training phase flag return _up_layer4_f([0] + [X]) x2 i = 1 x = x_test[i:i+1] ``` ### Visualizing the first convnet/output layer_1 with sample first test image ``` np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0).shape #Plotting conv_1 for i in range(4): #i = 3 x = x_test[i:i+1] check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0) temp = x[0,:,:,:] fig, axes = plt.subplots(1, 1, figsize=(3, 3)) plt.imshow(temp) plt.show() k = 0 while k < check.shape[2]: #plt.figure() #plt.subplot(231 + i) fig, axes = plt.subplots(4, 4, figsize=(5, 5)) for i in range(4): for j in range(4): axes[i,j].imshow(check[:,:,k], cmap = 'gray') k += 1 #axes[0, 0].imshow(R, cmap='jet') #plt.imshow(check[:,:,i]) plt.show() check.shape ``` ### Visualizing the second convnet/output layer_2 with sample test image ``` i = 3 x = x_test[i:i+1] check = np.squeeze(np.squeeze(np.array(convout2_f(x)),0),0) check.shape #Plotting conv_2 for i in range(4): #i = 3 x = x_test[i:i+1] check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0) temp = x[0,:,:,:] fig, axes = plt.subplots(1, 1, figsize=(3, 3)) plt.imshow(temp) plt.show() k = 0 while k < check.shape[2]: #plt.figure() #plt.subplot(231 + i) fig, axes = plt.subplots(2, 4, figsize=(5, 5)) for i in range(2): for j in range(4): axes[i,j].imshow(check[:,:,k]) k += 1 #axes[0, 0].imshow(R, cmap='jet') #plt.imshow(check[:,:,i]) plt.show() ``` ### Plotting the third convnet/output layer_3 with sample test image ``` i = 3 x = x_test[i:i+1] check = np.squeeze(np.squeeze(np.array(convout3_f(x)),0),0) check.shape #Plotting conv_3 for i in range(4): #i = 3 x = x_test[i:i+1] check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0) temp = x[0,:,:,:] fig, axes = plt.subplots(1, 1, figsize=(3, 3)) plt.imshow(temp) plt.show() k = 0 while k < check.shape[2]: #plt.figure() #plt.subplot(231 + i) fig, axes = plt.subplots(2, 4, figsize=(5, 5)) for i in range(2): for j in range(4): axes[i,j].imshow(check[:,:,k]) k += 1 #axes[0, 0].imshow(R, cmap='jet') #plt.imshow(check[:,:,i]) plt.show() ``` ### Visualizing the fourth convnet/decoded/output layer_4 with sample test image ``` i = 3 x = x_test[i:i+1] check = np.squeeze(np.squeeze(np.array(convout4_f(x)),0),0) check.shape #Plotting conv_4 for i in range(4): #i = 3 x = x_test[i:i+1] check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0) temp = x[0,:,:,:] fig, axes = plt.subplots(1, 1, figsize=(3, 3)) plt.imshow(temp) plt.show() k = 0 while k < check.shape[2]: #plt.figure() #plt.subplot(231 + i) fig, axes = plt.subplots(2, 4, figsize=(5, 5)) for i in range(2): for j in range(4): axes[i,j].imshow(check[:,:,k]) k += 1 #axes[0, 0].imshow(R, cmap='jet') #plt.imshow(check[:,:,i]) plt.show() ``` ### Visualizing the fifth convnet/decoded/output layer_5 with sample test image ``` i = 3 x = x_test[i:i+1] check = np.squeeze(np.squeeze(np.array(convout5_f(x)),0),0) check.shape #Plotting conv_5 for i in range(4): #i = 3 x = x_test[i:i+1] check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0) temp = x[0,:,:,:] fig, axes = plt.subplots(1, 1, figsize=(3, 3)) plt.imshow(temp) plt.show() k = 0 while k < check.shape[2]: #plt.figure() #plt.subplot(231 + i) fig, axes = plt.subplots(2, 4, figsize=(5, 5)) for i in range(2): for j in range(4): axes[i,j].imshow(check[:,:,k]) k += 1 #axes[0, 0].imshow(R, cmap='jet') #plt.imshow(check[:,:,i]) plt.show() ``` ### Visualizing the sixth convnet/decoded/output layer_6 with sample test image ``` i = 3 x = x_test[i:i+1] check = np.squeeze(np.squeeze(np.array(convout6_f(x)),0),0) check.shape #Plotting conv_6 for i in range(4): #i = 3 x = x_test[i:i+1] check = np.squeeze(np.squeeze(np.array(convout1_f(x)),0),0) temp = x[0,:,:,:] fig, axes = plt.subplots(1, 1, figsize=(3, 3)) plt.imshow(temp) plt.show() k = 0 while k < check.shape[2]: #plt.figure() #plt.subplot(231 + i) fig, axes = plt.subplots(4, 4, figsize=(5, 5)) for i in range(4): for j in range(4): axes[i,j].imshow(check[:,:,k]) k += 1 #axes[0, 0].imshow(R, cmap='jet') #plt.imshow(check[:,:,i]) plt.show() ``` ### Visualizing the final decoded/output layer with sample test image ``` i = 1 x = x_test[i:i+1] check = np.squeeze(np.squeeze(np.array(convout7_f(x)),0),0) check.shape #Plot final decoded layer decoded_imgs = autoencoder.predict(x_test) n = 4 plt.figure(figsize=(20, 4)) for i in range(n): # display original ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(32, 32, 3)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # display reconstruction ax = plt.subplot(2, n, i + n + 1) plt.imshow(decoded_imgs[i].reshape(32, 32, 3)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show() ```
github_jupyter
``` import sys import os import glob import subprocess as sp import multiprocessing as mp import pandas as pd import numpy as np from basic_tools import * debug=False def run_ldsc(pheno_code,ld,output,mode='original',samp_prev=np.nan,pop_prev=np.nan): if os.path.exists(ldsc_path.format(pheno_code)+'.log'): print("Congratulations!. ldsc result of",pheno_code,"exists. passed.") return if mode=='original': script=['ldsc.py','--h2',sumstats_path.format(pheno_code)+'.sumstats.gz', '--ref-ld-chr',ld_path.format(ld,''), '--w-ld-chr',wld_path, '--out',ldsc_path.format(output)] elif mode=='my': script=['ldsc_my.py','--h2',sumstats_path.format(pheno_code)+'.sumstats.gz', '--ref-ld-chr',ld_path.format(ld,''), '--w-ld-chr',wld_path, '--out',ldsc_path.format(output)] else: print("run_ldsc mode Error!!!!!!!") if np.isnan(samp_prev)==False and np.isnan(pop_prev)==False: script+=['--samp-prev',str(samp_prev),'--pop-prev',str(pop_prev)] print('Started:',' '.join(script)) sp.call(script) print('Finished:',' '.join(script)) def run_ldsc_wrapper(prefix,scale,pheno_code,samp_prev=np.nan,pop_prev=np.nan): run_ldsc(pheno_code,prefix,'{}.{}'.format(prefix,pheno_code),mode='original' if mode=='uni' else 'my',samp_prev=samp_prev,pop_prev=pop_prev) sys.argv#uni 0 20 x x mode=sys.argv[1] scale=int(sys.argv[2]) cores=int(sys.argv[3]) start=int(sys.argv[4]) end=int(sys.argv[5]) if mode=='uni': prefix=mode else: prefix=mode+str(scale) #start,end,prefix=0,1000,'bp300' phenotypes_uni_filtered['prevalence']=phenotypes_uni_filtered['n_cases']/phenotypes_uni_filtered['n_non_missing'] phenotypes_uni_filtered.shape pheno_code_list_todo=[] for idx,row in phenotypes_uni_filtered.iloc[start:end].iterrows(): if os.path.exists(ldsc_path.format('{}.{}'.format(prefix,idx))+'.log'): #print(ldsc_path.format('{}.{}'.format(prefix,idx))+'.log','exists') continue print(idx,end=' ') pheno_code_list_todo.append((idx,row['prevalence'])) """ phenotypes_filtered['prevalence']=phenotypes_filtered['n_cases']/phenotypes_filtered['n_non_missing'] phenotypes_filtered.shape pheno_code_list_todo=[] for idx,row in phenotypes_filtered.iloc[start:end].iterrows(): if os.path.exists(ldsc_path.format('{}.{}'.format(prefix,idx))+'.log'): continue print(idx,end=' ') pheno_code_list_todo.append((idx,row['prevalence'])) """ ``` ``` jupyter nbconvert 5_run_ldsc.ipynb --to script export SCREENDIR=$HOME/.screen start=0;end=600;mode=uni python 5_run_ldsc.py $mode 0 10 $start $end start=0;end=600;mode=bp python 5_run_ldsc.py $mode 300 10 $start $end && python 5_run_ldsc.py $mode 128 10 $start $end && python 5_run_ldsc.py $mode 64 5 $start $end && python 5_run_ldsc.py $mode 32 5 $start $end && python 5_run_ldsc.py $mode 16 5 $start $end && python 5_run_ldsc.py $mode 8 2 $start $end ``` ``` #pool = mp.Pool(processes=15) #pool.starmap(run_ldsc_wrapper,[(mode,scale,pheno_code,prevelence,prevelence) for (pheno_code,prevelence) in pheno_code_list_todo]) pool = mp.Pool(processes=cores) #pool.starmap(run_ldsc_wrapper,[(mode,scale,pheno_code) for pheno_code in pheno_code_list_todo]) pool.starmap(run_ldsc_wrapper,[(prefix,scale,pheno_code,prevelence,prevelence) for (pheno_code,prevelence) in pheno_code_list_todo]) ```
github_jupyter
# anesthetic plot gallery This functions as both some examples of plots that can be produced, and a tutorial. Any difficulties/issues/requests should be posted as a [GitHub issue](https://github.com/williamjameshandley/anesthetic/issues) ## Download example data Download some example data from github (or alternatively use your own chains files) This downloads the PLA chains for the planck baseline cosmology, and the equivalent nested sampling chains: ``` import requests import tarfile for filename in ["plikHM_TTTEEE_lowl_lowE_lensing.tar.gz","plikHM_TTTEEE_lowl_lowE_lensing_NS.tar.gz"]: github_url = "https://github.com/williamjameshandley/cosmo_example/raw/master/" url = github_url + filename open(filename, 'wb').write(requests.get(url).content) tarfile.open(filename).extractall() ``` ## Marginalised posterior plotting Import anesthetic and load the MCMC samples: ``` %matplotlib inline import matplotlib.pyplot as plt from anesthetic import MCMCSamples, make_2d_axes mcmc_root = 'plikHM_TTTEEE_lowl_lowE_lensing/base_plikHM_TTTEEE_lowl_lowE_lensing' mcmc = MCMCSamples(root=mcmc_root) ``` We have plotting tools for 1D plots ... ``` fig, axes = mcmc.plot_1d('omegabh2') ; ``` ... multiple 1D plots ... ``` fig, axes = mcmc.plot_1d(['omegabh2','omegach2','H0','tau','logA','ns']); fig.tight_layout() ``` ... triangle plots ... ``` mcmc.plot_2d(['omegabh2','omegach2','H0'], types={'lower':'kde','diagonal':'kde'}); ``` ... triangle plots (with the equivalent scatter plot filling up the left hand side) ... ``` mcmc.plot_2d(['omegabh2','omegach2','H0']); ``` ... and rectangle plots. ``` mcmc.plot_2d([['omegabh2','omegach2','H0'], ['logA', 'ns']]); ``` Rectangle plots are pretty flexible with what they can do: ``` mcmc.plot_2d([['omegabh2','omegach2','H0'], ['H0','omegach2']]); ``` ## Changing the appearance Anesthetic tries to follow matplotlib conventions as much as possible, so most changes to the appearance should be relatively straight forward. Here are some examples: * **figure size**: ``` fig = plt.figure(figsize=(5, 5)) fig, axes = make_2d_axes(['omegabh2', 'omegach2', 'H0'], fig=fig, tex=mcmc.tex) mcmc.plot_2d(axes); ``` * **legends**: ``` fig, axes = make_2d_axes(['omegabh2', 'omegach2', 'H0'], tex=mcmc.tex) mcmc.plot_2d(axes, label='Posterior'); axes.iloc[-1, 0].legend(bbox_to_anchor=(len(axes), len(axes)), loc='upper left'); ``` * **unfilled contours** &nbsp; & &nbsp; **modifying individual axes**: ``` fig, axes = make_2d_axes(['omegabh2', 'omegach2', 'H0'], tex=mcmc.tex) mcmc.plot_2d(axes.iloc[0:1, :], types=dict(upper='kde', lower='kde', diagonal='kde'), fc=None); mcmc.plot_2d(axes.iloc[1:2, :], types=dict(upper='kde', lower='kde', diagonal='kde'), fc=None, cmap=plt.cm.Oranges, lw=3); mcmc.plot_2d(axes.iloc[2:3, :], types=dict(upper='kde', lower='kde', diagonal='kde'), fc='C2', ec='C3', c='C4', lw=2); ``` ## Defining new parameters You can see that samples are stored as a pandas array ``` mcmc[:6] ``` Since it's a (weighted) pandas array, we compute things like the mean and variance of samples ``` mcmc.mean() ``` We can define new parameters with relative ease. For example, the default cosmoMC setup does not include omegab, only omegabh2: ``` 'omegab' in mcmc ``` However, this is pretty trivial to recompute: ``` h = mcmc['H0']/100 mcmc['omegab'] = mcmc['omegabh2']/h**2 mcmc.tex['omegab'] = '$\Omega_b$' mcmc.plot_1d('omegab'); ``` ## Nested sampling plotting Anethestic really comes to the fore for nested sampling. We can do all of the above, and more with the power that NS chains provide ``` from anesthetic import NestedSamples nested_root = 'plikHM_TTTEEE_lowl_lowE_lensing_NS/NS_plikHM_TTTEEE_lowl_lowE_lensing' nested = NestedSamples(root=nested_root) ``` We can infer the evidence, KL divergence and Bayesian model dimensionality: ``` ns_output = nested.ns_output() ``` This is a set of ``MCMCSamples``, with columns yielding the log of the Bayesian evidence (logZ), the Kullback-Leibler divergence (D) and the Bayesian model dimensionality (d). ``` ns_output[:6] ``` The evidence, KL divergence and Bayesian model dimensionality, with their corresponding errors, are: ``` for x in ns_output: print('%10s = %9.2f +/- %4.2f' % (x, ns_output[x].mean(), ns_output[x].std())) ``` Since ``ns_output`` is a set of ``MCMCSamples``, it may be plotted as usual. Here we illustrate slightly more fine-grained control of the axes construction (demanding three columns) ``` from anesthetic import make_1d_axes fig, axes = make_1d_axes(['logZ', 'D', 'd'], ncols=3, tex=ns_output.tex) ns_output.plot_1d(axes); ``` We can also inspect the correlation between these inferences: ``` ns_output.plot_2d(['logZ','D']); ``` Here is a comparison of the base and NS output ``` h = nested['H0']/100 nested['omegab'] = nested['omegabh2']/h**2 nested.tex['omegab'] = '$\Omega_b$' fig, axes = mcmc.plot_2d(['sigma8','omegab']) nested.plot_2d(axes=axes); ``` With nested samples, we can plot the prior (or any temperature), by passing beta=0. We also introduce here how to create figure legends. ``` prior = nested.set_beta(0) fig, axes = prior.plot_2d(['ns','tau'], label='prior') nested.plot_2d(axes=axes, label='posterior') handles, labels = axes['ns']['tau'].get_legend_handles_labels() leg = fig.legend(handles, labels) fig.tight_layout() ``` We can also set up an interactive plot, which allows us to replay a nested sampling run after the fact. ``` nested.gui() ``` There are also tools for converting to alternative formats, in case you have pipelines in other plotters: ``` from anesthetic.convert import to_getdist getdist_samples = to_getdist(nested) ```
github_jupyter
``` %matplotlib inline ``` Sequence-to-Sequence Modeling with nn.Transformer and TorchText =============================================================== This is a tutorial on how to train a sequence-to-sequence model that uses the `nn.Transformer <https://pytorch.org/docs/master/nn.html?highlight=nn%20transformer#torch.nn.Transformer>`__ module. PyTorch 1.2 release includes a standard transformer module based on the paper `Attention is All You Need <https://arxiv.org/pdf/1706.03762.pdf>`__. The transformer model has been proved to be superior in quality for many sequence-to-sequence problems while being more parallelizable. The ``nn.Transformer`` module relies entirely on an attention mechanism (another module recently implemented as `nn.MultiheadAttention <https://pytorch.org/docs/master/nn.html?highlight=multiheadattention#torch.nn.MultiheadAttention>`__) to draw global dependencies between input and output. The ``nn.Transformer`` module is now highly modularized such that a single component (like `nn.TransformerEncoder <https://pytorch.org/docs/master/nn.html?highlight=nn%20transformerencoder#torch.nn.TransformerEncoder>`__ in this tutorial) can be easily adapted/composed. ![](../_static/img/transformer_architecture.jpg) Define the model ---------------- In this tutorial, we train ``nn.TransformerEncoder`` model on a language modeling task. The language modeling task is to assign a probability for the likelihood of a given word (or a sequence of words) to follow a sequence of words. A sequence of tokens are passed to the embedding layer first, followed by a positional encoding layer to account for the order of the word (see the next paragraph for more details). The ``nn.TransformerEncoder`` consists of multiple layers of `nn.TransformerEncoderLayer <https://pytorch.org/docs/master/nn.html?highlight=transformerencoderlayer#torch.nn.TransformerEncoderLayer>`__. Along with the input sequence, a square attention mask is required because the self-attention layers in ``nn.TransformerEncoder`` are only allowed to attend the earlier positions in the sequence. For the language modeling task, any tokens on the future positions should be masked. To have the actual words, the output of ``nn.TransformerEncoder`` model is sent to the final Linear layer, which is followed by a log-Softmax function. ``` import math import torch import torch.nn as nn import torch.nn.functional as F class TransformerModel(nn.Module): def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5): super(TransformerModel, self).__init__() from torch.nn import TransformerEncoder, TransformerEncoderLayer self.model_type = 'Transformer' self.pos_encoder = PositionalEncoding(ninp, dropout) encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout) self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers) self.encoder = nn.Embedding(ntoken, ninp) self.ninp = ninp self.decoder = nn.Linear(ninp, ntoken) self.init_weights() def generate_square_subsequent_mask(self, sz): mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1) mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) return mask def init_weights(self): initrange = 0.1 self.encoder.weight.data.uniform_(-initrange, initrange) self.decoder.bias.data.zero_() self.decoder.weight.data.uniform_(-initrange, initrange) def forward(self, src, src_mask): src = self.encoder(src) * math.sqrt(self.ninp) src = self.pos_encoder(src) output = self.transformer_encoder(src, src_mask) output = self.decoder(output) return output ``` ``PositionalEncoding`` module injects some information about the relative or absolute position of the tokens in the sequence. The positional encodings have the same dimension as the embeddings so that the two can be summed. Here, we use ``sine`` and ``cosine`` functions of different frequencies. ``` class PositionalEncoding(nn.Module): def __init__(self, d_model, dropout=0.1, max_len=5000): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0).transpose(0, 1) self.register_buffer('pe', pe) def forward(self, x): x = x + self.pe[:x.size(0), :] return self.dropout(x) ``` Load and batch data ------------------- This tutorial uses ``torchtext`` to generate Wikitext-2 dataset. The vocab object is built based on the train dataset and is used to numericalize tokens into tensors. Starting from sequential data, the ``batchify()`` function arranges the dataset into columns, trimming off any tokens remaining after the data has been divided into batches of size ``batch_size``. For instance, with the alphabet as the sequence (total length of 26) and a batch size of 4, we would divide the alphabet into 4 sequences of length 6: \begin{align}\begin{bmatrix} \text{A} & \text{B} & \text{C} & \ldots & \text{X} & \text{Y} & \text{Z} \end{bmatrix} \Rightarrow \begin{bmatrix} \begin{bmatrix}\text{A} \\ \text{B} \\ \text{C} \\ \text{D} \\ \text{E} \\ \text{F}\end{bmatrix} & \begin{bmatrix}\text{G} \\ \text{H} \\ \text{I} \\ \text{J} \\ \text{K} \\ \text{L}\end{bmatrix} & \begin{bmatrix}\text{M} \\ \text{N} \\ \text{O} \\ \text{P} \\ \text{Q} \\ \text{R}\end{bmatrix} & \begin{bmatrix}\text{S} \\ \text{T} \\ \text{U} \\ \text{V} \\ \text{W} \\ \text{X}\end{bmatrix} \end{bmatrix}\end{align} These columns are treated as independent by the model, which means that the dependence of ``G`` and ``F`` can not be learned, but allows more efficient batch processing. ``` import io import torch from torchtext.utils import download_from_url, extract_archive from torchtext.data.utils import get_tokenizer from torchtext.vocab import build_vocab_from_iterator url = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip' test_filepath, valid_filepath, train_filepath = extract_archive(download_from_url(url)) tokenizer = get_tokenizer('basic_english') vocab = build_vocab_from_iterator(map(tokenizer, iter(io.open(train_filepath, encoding="utf8")))) def data_process(raw_text_iter): data = [torch.tensor([vocab[token] for token in tokenizer(item)], dtype=torch.long) for item in raw_text_iter] return torch.cat(tuple(filter(lambda t: t.numel() > 0, data))) train_data = data_process(iter(io.open(train_filepath, encoding="utf8"))) val_data = data_process(iter(io.open(valid_filepath, encoding="utf8"))) test_data = data_process(iter(io.open(test_filepath, encoding="utf8"))) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def batchify(data, bsz): # Divide the dataset into bsz parts. nbatch = data.size(0) // bsz # Trim off any extra elements that wouldn't cleanly fit (remainders). data = data.narrow(0, 0, nbatch * bsz) # Evenly divide the data across the bsz batches. data = data.view(bsz, -1).t().contiguous() return data.to(device) batch_size = 20 eval_batch_size = 10 train_data = batchify(train_data, batch_size) val_data = batchify(val_data, eval_batch_size) test_data = batchify(test_data, eval_batch_size) ``` Functions to generate input and target sequence ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``get_batch()`` function generates the input and target sequence for the transformer model. It subdivides the source data into chunks of length ``bptt``. For the language modeling task, the model needs the following words as ``Target``. For example, with a ``bptt`` value of 2, we’d get the following two Variables for ``i`` = 0: ![](../_static/img/transformer_input_target.png) It should be noted that the chunks are along dimension 0, consistent with the ``S`` dimension in the Transformer model. The batch dimension ``N`` is along dimension 1. ``` bptt = 35 def get_batch(source, i): seq_len = min(bptt, len(source) - 1 - i) data = source[i:i+seq_len] target = source[i+1:i+1+seq_len].reshape(-1) return data, target ``` Initiate an instance -------------------- The model is set up with the hyperparameter below. The vocab size is equal to the length of the vocab object. ``` ntokens = len(vocab.stoi) # the size of vocabulary emsize = 200 # embedding dimension nhid = 200 # the dimension of the feedforward network model in nn.TransformerEncoder nlayers = 2 # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder nhead = 2 # the number of heads in the multiheadattention models dropout = 0.2 # the dropout value model = TransformerModel(ntokens, emsize, nhead, nhid, nlayers, dropout).to(device) ``` Run the model ------------- `CrossEntropyLoss <https://pytorch.org/docs/master/nn.html?highlight=crossentropyloss#torch.nn.CrossEntropyLoss>`__ is applied to track the loss and `SGD <https://pytorch.org/docs/master/optim.html?highlight=sgd#torch.optim.SGD>`__ implements stochastic gradient descent method as the optimizer. The initial learning rate is set to 5.0. `StepLR <https://pytorch.org/docs/master/optim.html?highlight=steplr#torch.optim.lr_scheduler.StepLR>`__ is applied to adjust the learn rate through epochs. During the training, we use `nn.utils.clip_grad_norm\_ <https://pytorch.org/docs/master/nn.html?highlight=nn%20utils%20clip_grad_norm#torch.nn.utils.clip_grad_norm_>`__ function to scale all the gradient together to prevent exploding. ``` criterion = nn.CrossEntropyLoss() lr = 5.0 # learning rate optimizer = torch.optim.SGD(model.parameters(), lr=lr) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95) import time def train(): model.train() # Turn on the train mode total_loss = 0. start_time = time.time() src_mask = model.generate_square_subsequent_mask(bptt).to(device) for batch, i in enumerate(range(0, train_data.size(0) - 1, bptt)): data, targets = get_batch(train_data, i) optimizer.zero_grad() if data.size(0) != bptt: src_mask = model.generate_square_subsequent_mask(data.size(0)).to(device) output = model(data, src_mask) loss = criterion(output.view(-1, ntokens), targets) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5) optimizer.step() total_loss += loss.item() log_interval = 200 if batch % log_interval == 0 and batch > 0: cur_loss = total_loss / log_interval elapsed = time.time() - start_time print('| epoch {:3d} | {:5d}/{:5d} batches | ' 'lr {:02.2f} | ms/batch {:5.2f} | ' 'loss {:5.2f} | ppl {:8.2f}'.format( epoch, batch, len(train_data) // bptt, scheduler.get_lr()[0], elapsed * 1000 / log_interval, cur_loss, math.exp(cur_loss))) total_loss = 0 start_time = time.time() def evaluate(eval_model, data_source): eval_model.eval() # Turn on the evaluation mode total_loss = 0. src_mask = model.generate_square_subsequent_mask(bptt).to(device) with torch.no_grad(): for i in range(0, data_source.size(0) - 1, bptt): data, targets = get_batch(data_source, i) if data.size(0) != bptt: src_mask = model.generate_square_subsequent_mask(data.size(0)).to(device) output = eval_model(data, src_mask) output_flat = output.view(-1, ntokens) total_loss += len(data) * criterion(output_flat, targets).item() return total_loss / (len(data_source) - 1) ``` Loop over epochs. Save the model if the validation loss is the best we've seen so far. Adjust the learning rate after each epoch. ``` best_val_loss = float("inf") epochs = 3 # The number of epochs best_model = None for epoch in range(1, epochs + 1): epoch_start_time = time.time() train() val_loss = evaluate(model, val_data) print('-' * 89) print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | ' 'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time), val_loss, math.exp(val_loss))) print('-' * 89) if val_loss < best_val_loss: best_val_loss = val_loss best_model = model scheduler.step() ``` Evaluate the model with the test dataset ------------------------------------- Apply the best model to check the result with the test dataset. ``` test_loss = evaluate(best_model, test_data) print('=' * 89) print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format( test_loss, math.exp(test_loss))) print('=' * 89) ```
github_jupyter
--- _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._ --- # The Series Data Structure ``` import pandas as pd pd.Series? purchase_1 = pd.Series({'Name': 'Chris', 'Item Purchased': 'Dog Food', 'Cost': 22.50}) purchase_2 = pd.Series({'Name': 'Kevyn', 'Item Purchased': 'Kitty Litter', 'Cost': 2.50}) purchase_3 = pd.Series({'Name': 'Vinod', 'Item Purchased': 'Bird Seed', 'Cost': 5.00}) df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2']) df.head() df[['Item Purchased']] purchase_1 = pd.Series({'Name': 'Chris', 'Item Purchased': 'Dog Food', 'Cost': 22.50}) purchase_2 = pd.Series({'Name': 'Kevyn', 'Item Purchased': 'Kitty Litter', 'Cost': 2.50}) purchase_3 = pd.Series({'Name': 'Vinod', 'Item Purchased': 'Bird Seed', 'Cost': 5.00}) df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2']) df.head() import pandas as pd purchase_1 = pd.Series({'Name': 'Chris', 'Item Purchased': 'Dog Food', 'Cost': 22.50}) purchase_2 = pd.Series({'Name': 'Kevyn', 'Item Purchased': 'Kitty Litter', 'Cost': 2.50}) purchase_3 = pd.Series({'Name': 'Vinod', 'Item Purchased': 'Bird Seed', 'Cost': 5.00}) df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2']) df.head() df.set_index(name=['S']) df[df['Cost']>3.0] animals = ['Tiger', 'Bear', 'Moose'] pd.Series(animals) numbers = [1, 2, 3] pd.Series(numbers) animals = ['Tiger', 'Bear', None] pd.Series(animals) numbers = [1, 2, None] pd.Series(numbers) import numpy as np np.nan == None np.nan == np.nan np.isnan(np.nan) sports = {'Archery': 'Bhutan', 'Golf': 'Scotland', 'Sumo': 'Japan', 'Taekwondo': 'South Korea'} s = pd.Series(sports) s s.index s = pd.Series(['Tiger', 'Bear', 'Moose'], index=['India', 'America', 'Canada']) s sports = {'Archery': 'Bhutan', 'Golf': 'Scotland', 'Sumo': 'Japan', 'Taekwondo': 'South Korea'} s = pd.Series(sports, index=['Golf', 'Sumo', 'Hockey']) s ``` # Querying a Series ``` sports = {'Archery': 'Bhutan', 'Golf': 'Scotland', 'Sumo': 'Japan', 'Taekwondo': 'South Korea'} s = pd.Series(sports) s s.iloc[3] s.loc['Golf'] s[3] s['Golf'] sports = {99: 'Bhutan', 100: 'Scotland', 101: 'Japan', 102: 'South Korea'} s = pd.Series(sports) s[0] #This won't call s.iloc[0] as one might expect, it generates an error instead s = pd.Series([100.00, 120.00, 101.00, 3.00]) s total = 0 for item in s: total+=item print(total) import numpy as np total = np.sum(s) print(total) #this creates a big series of random numbers s = pd.Series(np.random.randint(0,1000,10000)) s.head() len(s) %%timeit -n 100 summary = 0 for item in s: summary+=item %%timeit -n 100 summary = np.sum(s) s+=2 #adds two to each item in s using broadcasting s.head() for label, value in s.iteritems(): s.set_value(label, value+2) s.head() %%timeit -n 10 s = pd.Series(np.random.randint(0,1000,10000)) for label, value in s.iteritems(): s.loc[label]= value+2 %%timeit -n 10 s = pd.Series(np.random.randint(0,1000,10000)) s+=2 s = pd.Series([1, 2, 3]) s.loc['Animal'] = 'Bears' s original_sports = pd.Series({'Archery': 'Bhutan', 'Golf': 'Scotland', 'Sumo': 'Japan', 'Taekwondo': 'South Korea'}) cricket_loving_countries = pd.Series(['Australia', 'Barbados', 'Pakistan', 'England'], index=['Cricket', 'Cricket', 'Cricket', 'Cricket']) all_countries = original_sports.append(cricket_loving_countries) original_sports cricket_loving_countries all_countries all_countries.loc['Cricket'] ``` # The DataFrame Data Structure ``` import pandas as pd purchase_1 = pd.Series({'Name': 'Chris', 'Item Purchased': 'Dog Food', 'Cost': 22.50}) purchase_2 = pd.Series({'Name': 'Kevyn', 'Item Purchased': 'Kitty Litter', 'Cost': 2.50}) purchase_3 = pd.Series({'Name': 'Vinod', 'Item Purchased': 'Bird Seed', 'Cost': 5.00}) df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2']) df.head() df.loc['Store 2'] type(df.loc['Store 2']) df.loc['Store 1'] df.loc['Store 1', 'Cost'] df.T df.T.loc['Cost'] df['Cost'] df.loc['Store 1']['Cost'] df.loc[:,['Name', 'Cost']] df.drop('Store 1') df copy_df = df.copy() copy_df = copy_df.drop('Store 1') copy_df copy_df.drop? del copy_df['Name'] copy_df df['Location'] = None df ``` # Dataframe Indexing and Loading ``` costs = df['Cost'] costs costs+=2 costs df !cat olympics.csv df = pd.read_csv('olympics.csv') df.head() df = pd.read_csv('olympics.csv', index_col = 0, skiprows=1) df.head() df.columns for col in df.columns: if col[:2]=='01': df.rename(columns={col:'Gold' + col[4:]}, inplace=True) if col[:2]=='02': df.rename(columns={col:'Silver' + col[4:]}, inplace=True) if col[:2]=='03': df.rename(columns={col:'Bronze' + col[4:]}, inplace=True) if col[:1]=='№': df.rename(columns={col:'#' + col[1:]}, inplace=True) df.head() ``` # Querying a DataFrame ``` df['Gold'] > 0 only_gold = df.where(df['Gold'] > 0) only_gold.head() only_gold['Gold'].count() df['Gold'].count() only_gold = only_gold.dropna() only_gold.head() only_gold = df[df['Gold'] > 0] only_gold.head() len(df[(df['Gold'] > 0) | (df['Gold.1'] > 0)]) df[(df['Gold.1'] > 0) & (df['Gold'] == 0)] ``` # Indexing Dataframes ``` df.head() df['country'] = df.index df = df.set_index('Gold') df.head() df = df.reset_index() df.head() df = pd.read_csv('census.csv') df.head() df['SUMLEV'].unique() df=df[df['SUMLEV'] == 50] df.head() columns_to_keep = ['STNAME', 'CTYNAME', 'BIRTHS2010', 'BIRTHS2011', 'BIRTHS2012', 'BIRTHS2013', 'BIRTHS2014', 'BIRTHS2015', 'POPESTIMATE2010', 'POPESTIMATE2011', 'POPESTIMATE2012', 'POPESTIMATE2013', 'POPESTIMATE2014', 'POPESTIMATE2015'] df = df[columns_to_keep] df.head() df = df.set_index(['STNAME', 'CTYNAME']) df.head() df.loc['Michigan', 'Washtenaw County'] df.loc[ [('Michigan', 'Washtenaw County'), ('Michigan', 'Wayne County')] ] ``` # Missing values ``` df = pd.read_csv('log.csv') df df.fillna? df = df.set_index('time') df = df.sort_index() df df = df.reset_index() df = df.set_index(['time', 'user']) df df = df.fillna(method='ffill') df.head() ```
github_jupyter
``` %matplotlib inline ``` This notebook is based on: https://mne.tools/stable/auto_tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.html # Spatiotemporal permutation F-test on full sensor data Tests for differential evoked responses in at least one condition using a permutation clustering test. The FieldTrip neighbor templates will be used to determine the adjacency between sensors. This serves as a spatial prior to the clustering. Spatiotemporal clusters will then be visualized using custom matplotlib code. ``` from mpl_toolkits.axes_grid1 import make_axes_locatable from mne.stats import spatio_temporal_cluster_test from mne.channels import find_ch_adjacency from mne.viz import plot_compare_evokeds ``` ## Read epochs Your pipeline from the previous notebook(s) should go here. Basically from reading the raw to the epoching needs to be done here. Once it is all epoched you can continue. _Remember to equalise your conditions!_ The MNE-python stats functions work on a numpy array with the shape: - n_observations $\times$ n_times $\times$ n_channels/n_vertices So we need to extract the data and then transform it to the right shape. _Remember_ MNE-python epochs are in the shape: - n_observations $\times$ n_channels/n_verticies $\times$ n_times n_channels/n_verticies is because the functions works both on sensor space and source space data. You should also select just two conditions, e.g. left vs right auditory or auditory vs visual. ``` X = [epochs[k].get_data() for k in event_dict] # as 3D matrix X = [np.transpose(x, (0, 2, 1)) for x in X] # transpose for clustering ``` ## Find the FieldTrip neighbor definition to setup sensor adjacency ``` adjacency, ch_names = find_ch_adjacency(epochs.info, ch_type='eeg') print(type(adjacency)) # it's a sparse matrix! plt.imshow(adjacency.toarray(), cmap='gray', origin='lower', interpolation='nearest') plt.xlabel('{} EEG'.format(len(ch_names))) plt.ylabel('{} EEG'.format(len(ch_names))) plt.title('Between-sensor adjacency') ``` ## Compute permutation statistic How does it work? We use clustering to "bind" together features which are similar. Our features are the magnetic fields measured over our sensor array at different times. This reduces the multiple comparison problem. To compute the actual test-statistic, we first sum all F-values in all clusters. We end up with one statistic for each cluster. Then we generate a distribution from the data by shuffling our conditions between our samples and recomputing our clusters and the test statistics. We test for the significance of a given cluster by computing the probability of observing a cluster of that size. For more background read: Maris/Oostenveld (2007), "Nonparametric statistical testing of EEG- and MEG-data" Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190. doi:10.1016/j.jneumeth.2007.03.024 ## TASK Look up what the different parameters in the function does! ``` # set family-wise p-value p_accept = 0.05 cluster_stats = spatio_temporal_cluster_test(X, n_permutations=2000, threshold=None, tail=0, n_jobs=1, buffer_size=None, adjacency=adjacency) T_obs, clusters, p_values, _ = cluster_stats good_cluster_inds = np.where(p_values < p_accept)[0] ``` Note. The same functions work with source estimate. The only differences are the origin of the data, the size, and the adjacency definition. It can be used for single trials or for groups of subjects. ## Visualize clusters **Adjust the visualization to the conditions you have selected!** ``` # configure variables for visualization colors = {"Aud": "crimson", "Vis": 'steelblue'} linestyles = {"L": '-', "R": '--'} # organize data for plotting evokeds = {cond: epochs[cond].average() for cond in event_id} # loop over clusters for i_clu, clu_idx in enumerate(good_cluster_inds): # unpack cluster information, get unique indices time_inds, space_inds = np.squeeze(clusters[clu_idx]) ch_inds = np.unique(space_inds) time_inds = np.unique(time_inds) # get topography for F stat f_map = T_obs[time_inds, ...].mean(axis=0) # get signals at the sensors contributing to the cluster sig_times = epochs.times[time_inds] # create spatial mask mask = np.zeros((f_map.shape[0], 1), dtype=bool) mask[ch_inds, :] = True # initialize figure fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3)) # plot average test statistic and mark significant sensors f_evoked = mne.EvokedArray(f_map[:, np.newaxis], epochs.info, tmin=0) f_evoked.plot_topomap(times=0, mask=mask, axes=ax_topo, cmap='Reds', vmin=np.min, vmax=np.max, show=False, colorbar=False, mask_params=dict(markersize=10)) image = ax_topo.images[0] # create additional axes (for ERF and colorbar) divider = make_axes_locatable(ax_topo) # add axes for colorbar ax_colorbar = divider.append_axes('right', size='5%', pad=0.05) plt.colorbar(image, cax=ax_colorbar) ax_topo.set_xlabel( 'Averaged F-map ({:0.3f} - {:0.3f} s)'.format(*sig_times[[0, -1]])) # add new axis for time courses and plot time courses ax_signals = divider.append_axes('right', size='300%', pad=1.2) title = 'Cluster #{0}, {1} sensor'.format(i_clu + 1, len(ch_inds)) if len(ch_inds) > 1: title += "s (mean)" plot_compare_evokeds(evokeds, title=title, picks=ch_inds, axes=ax_signals, colors=colors, linestyles=linestyles, show=False, split_legend=True, truncate_yaxis='auto') # plot temporal cluster extent ymin, ymax = ax_signals.get_ylim() ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1], color='orange', alpha=0.3) # clean up viz mne.viz.tight_layout(fig=fig) fig.subplots_adjust(bottom=.05) plt.show() ``` ## Exercises - What is the smallest p-value you can obtain, given the finite number of permutations?
github_jupyter
``` import os os.environ['PYSPARK_SUBMIT_ARGS'] = \ '--conf spark.cassandra.connection.host=cassandra --packages org.apache.spark:spark-streaming-kafka-0-8_2.11:2.0.2,com.datastax.spark:spark-cassandra-connector_2.11:2.0.2 pyspark-shell' from pyspark import SparkContext from pyspark.streaming import StreamingContext from pyspark.streaming.kafka import KafkaUtils from pyspark.sql import SQLContext from pyspark.sql import functions as F from pyspark.sql.types import * from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating sc = SparkContext(appName="BigDataRiver") sc.setLogLevel("WARN") sc.setCheckpointDir('checkpoint/') ssc = StreamingContext(sc, 60) sql = SQLContext(sc) kafkaStream = KafkaUtils.createDirectStream(ssc, ['bdr'], {"metadata.broker.list": 'kafka:9092'}) parsed = kafkaStream.map(lambda v: v[1]) #split is_purchase column into two separateClicksSchema = StructType([ StructField("purchased_count", LongType(), False), StructField("clicked_count", LongType(), False) ]) def separateClicks(is_purchase): return (is_purchase, 1-is_purchase) separateClicks_udf = F.udf(separateClicks, separateClicksSchema) def buildCFModel(train): def isProductToRating(productCount, clickCount): return (productCount * 3.0) + clickCount ratings = train.rdd.\ map(lambda r: Rating(r.user_id, r.product, isProductToRating(r.purchased_count, r.clicked_count))) rank = 10 numIterations = 20 lambdaFactor = 0.01 alpha = 0.01 seed = 42 return ALS.trainImplicit(ratings, rank, numIterations, alpha, seed=seed) def recommendTopProducts(dfModel): numberOfRecommendationsRequired = 5 rdd = dfModel.recommendProductsForUsers(numberOfRecommendationsRequired) recommendations = rdd.map(lambda (user,ratings): (user, map(lambda r: r.product, ratings))) topRecommendationsSchema = StructType([ StructField("user_id", IntegerType(), False), StructField("recommended_products", ArrayType(IntegerType()), False) ]) return sql.createDataFrame(recommendations, topRecommendationsSchema) def processStream(rdd): df = sql.read.json(rdd) if(len(df.columns)): #store updated counters in C* df.withColumn('c', separateClicks_udf(df['is_purchase'])).\ select("user_id","product","c.purchased_count","c.clicked_count").\ write.format("org.apache.spark.sql.cassandra").mode('append').\ options(table="users_interests", keyspace="bdr").save() #read all data from C* usersInterests = sql.read.format("org.apache.spark.sql.cassandra").\ options(table="users_interests", keyspace="bdr").load().cache() dfModel = buildCFModel(usersInterests.select("user_id","product","clicked_count","purchased_count")) top5 = recommendTopProducts(dfModel) top5.show() top5.write.format("org.apache.spark.sql.cassandra").mode('append').options(table="cf", keyspace="bdr").save() print "Saved" else: print "Empty" parsed.foreachRDD(lambda rdd: processStream(rdd)) ssc.start() ssc.awaitTermination() ```
github_jupyter
``` import os import xgboost as xgb import pandas as pd import numpy as np from utils import encode_numeric_zscore_list, encode_numeric_zscore_all, to_xy, encode_text_index_list, encode_numeric_log_all from xgboost.sklearn import XGBClassifier, XGBRegressor from sklearn import datasets from sigopt_sklearn.search import SigOptSearchCV path = "./data/allstate" inputFilePath = os.path.join(path, "train.csv.zip") df = pd.read_csv(inputFilePath, compression="zip", header=0, na_values=['NULL']) df = df.reindex(np.random.permutation(df.index)) df.reset_index(inplace=True, drop=True) df.drop('id', axis=1, inplace=True) #df = df.sample(frac=0.01) #encode categoricals as dummies encode_text_index_list(df, ['cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', 'cat7', 'cat8', 'cat9', 'cat10', 'cat11', 'cat12', 'cat13', 'cat14', 'cat15', 'cat16', 'cat17', 'cat18', 'cat19', 'cat20', 'cat21', 'cat22', 'cat23', 'cat24', 'cat25', 'cat26', 'cat27', 'cat28', 'cat29', 'cat30', 'cat31', 'cat32', 'cat33', 'cat34', 'cat35', 'cat36', 'cat37', 'cat38', 'cat39', 'cat40', 'cat41', 'cat42', 'cat43', 'cat44', 'cat45', 'cat46', 'cat47', 'cat48', 'cat49', 'cat50', 'cat51', 'cat52', 'cat53', 'cat54', 'cat55', 'cat56', 'cat57', 'cat58', 'cat59', 'cat60', 'cat61', 'cat62', 'cat63', 'cat64', 'cat65', 'cat66', 'cat67', 'cat68', 'cat69', 'cat70', 'cat71', 'cat72', 'cat73', 'cat74', 'cat75', 'cat76', 'cat77', 'cat78', 'cat79', 'cat80', 'cat81', 'cat82', 'cat83', 'cat84', 'cat85', 'cat86', 'cat87', 'cat88', 'cat89', 'cat90', 'cat91', 'cat92', 'cat93', 'cat94', 'cat95', 'cat96', 'cat97', 'cat98', 'cat99', 'cat100', 'cat101', 'cat102', 'cat103', 'cat104', 'cat105', 'cat106', 'cat107', 'cat108', 'cat109', 'cat110', 'cat111', 'cat112', 'cat113', 'cat114', 'cat115', 'cat116']) #encode all numeric values to zscored values encode_numeric_zscore_list(df, ['cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13', 'cont14']) #discard rows where z-score > 2 df.fillna(0) # Create x(predictors) and y (expected outcome) X,Y = to_xy(df, "loss") # find your SigOpt client token here : https://sigopt.com/user/profile client_token = "UAJKINHBEGLJVIYYMGWANLUPRORPFRLTJMESGZKNPTHKOSIW" xgb_params = { 'learning_rate' : [0.01, 0.5], 'n_estimators' : [10, 70], 'max_depth':[3, 50], 'min_child_weight':[1, 15], 'gamma':[0, 1.0], 'subsample':[0.1, 1.0], 'colsample_bytree':[0.1, 1.0], 'max_delta_step': [1,15], 'colsample_bylevel': [0.1, 1.0], #'lamda': [1,5], #'alpha': [1,5], 'scale_pos_weight': [0,5], #'objective': 'reg:linear', #'booster': ['gblinear', 'gbtree'] , #'eval_metric': 'mae', #'tree_method': ['exact', 'approx'] } xgb = XGBRegressor() clf = SigOptSearchCV(xgb, xgb_params, cv=5, client_token=client_token, n_jobs=25, n_iter=700, verbose=1) clf.fit(X, Y) a = XGBRegressor() a.get_params().keys() ```
github_jupyter
<a href="https://colab.research.google.com/github/rtindru/CompStats/blob/master/Kensho_Assessment_Model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = (20, 4) import seaborn as sns import keras from google.colab import drive drive.mount('/content/drive') PATH = "/content/drive/My Drive/Kensho Asssessment/data/" tdf = pd.read_csv(PATH+'clean_train.csv') tdf.head() from keras.utils import to_categorical from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import train_test_split from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences MAX_NB_WORDS = 1000 MAX_SEQUENCE_LENGTH = 50 BATCH_SIZE = 200 EPOCHS = 3 # Data has temporality, sort so that we train on past data and predict on recent data tdf = tdf.sort_values('date') # Tokenize the word meanings tokenizer = Tokenizer(num_words=MAX_NB_WORDS) tokenizer.fit_on_texts(tdf.clean_tite) sequences = tokenizer.texts_to_sequences(tdf.clean_tite) word_index = tokenizer.word_index print('Found %s unique tokens.' % len(word_index)) X = pd.DataFrame(pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)) # Label encode the word labels le = LabelEncoder() y = pd.DataFrame(le.fit_transform(tdf.subject)) NUM_CLASSES = len(le.classes_) # Split data into train, test, and valid train_size = int(tdf.shape[0] * 0.75) X_train, X_test, y_train, y_test = X[:train_size], X[train_size:], y[:train_size], y[train_size:] print('Classes: ', NUM_CLASSES) print('Shape of data tensor:', X_train.shape) print('Shape of label tensor:', y_train.shape) print('Shape of data tensor:', X_test.shape) print('Shape of label tensor:', y_test.shape) # # Preload the embedding matrix # EMBEDDING_DIM = 300 # same as the lenght of the keyed vector # import gensim # def get_coefficients(word, model): # """ # Helper method to return coeffs for a model; or zeros! # """ # try: # return model.get_vector(word) # except KeyError: # return np.zeros(model.wv.vector_size) # gn_model = gensim.models.KeyedVectors.load_word2vec_format('/content/drive/My Drive/petrichor_new/GoogleNews-vectors-negative300.bin.gz', binary=True) # embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM)) # for word, i in word_index.items(): # embedding_matrix[i] = get_coefficients(word, gn_model) EMBEDDING_DIM = 30 # same as the lenght of the keyed vector from keras.models import Sequential from keras.layers import Dense, Embedding, Dropout, SpatialDropout1D, Bidirectional from keras.layers import LSTM NUM_SAMPLES = len(X_train) LSTM_DIM = 64 # Good to go with 32/64; can be stacked MAX_LEN = MAX_SEQUENCE_LENGTH NUM_CLASSES = len(le.classes_) model = Sequential() model.add(Embedding(input_dim=len(word_index)+1, output_dim=EMBEDDING_DIM, input_length=MAX_LEN)) model.add(LSTM(LSTM_DIM, dropout=0.2, recurrent_dropout=0.2, input_length=MAX_SEQUENCE_LENGTH)) # input_shape=(1, 3000, ) model.add(Dense(NUM_CLASSES, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy', 'top_k_categorical_accuracy']) print(model.summary()) _y_train = keras.utils.to_categorical(y_train, num_classes=NUM_CLASSES) _y_test = keras.utils.to_categorical(y_test, num_classes=NUM_CLASSES) history = model.fit(X_train, _y_train, validation_data=(X_test, _y_test), verbose=True, epochs=EPOCHS) # list all data in history print(history.history.keys()) # summarize history for accuracy plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # Predict on test data history = model.fit(X_train, _y_train, validation_data=(X_test, _y_test), verbose=True, epochs=EPOCHS) vdf = pd.read_csv(PATH+'clean_val.csv') sequences = tokenizer.texts_to_sequences(vdf.clean_tite) word_index = tokenizer.word_index print('Found %s unique tokens.' % len(word_index)) X_val = pd.DataFrame(pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)) y_pred = model.predict(X_val, ) vdf['subject'] = le.inverse_transform(np.argmax(y_pred, axis=1)) vdf['month'] = vdf['date'].astype('datetime64').apply(lambda x: x.replace(day=1)) vdf.dtypes ``` ``` vdf.head() sub = vdf[['month', 'subject']].value_counts().reset_index(name="article_count") sub = pd.read_csv(PATH+'submission.csv') sub.head() sub.sort_values('month').to_csv(PATH+'submsson0.csv') y_train_pred = model.predict(pd.concat([X_train, X_test])) tdf['pred_sub'] = le.inverse_transform(np.argmax(y_train_pred, axis=1)) def mse(df1, df2): return np.sum(np.square(df1['count'] - df2['count']))/df1.shape[0] df_true = tdf[['month', 'subject']].value_counts().reset_index(name='count').set_index(['month', 'subject']).sort_values(['month', 'subject']) df_pred = tdf[['month', 'pred_sub']].value_counts().reset_index(name='count').set_index(['month', 'pred_sub']).sort_values(['month', 'pred_sub']) print(df_true.head()) print(df_pred.head()) print(mse(df_true, df_pred)) from sklearn.metrics import classification_report print(classification_report(tdf.pred_sub, tdf.subject)) ```
github_jupyter
<h1 id="header-ch">2021 CCF BDCI基于飞桨实现花样滑冰选手骨骼点动作识别-第6名方案</h1> # 赛题介绍 人体运动分析是近几年许多领域研究的热点问题。在学科的交叉研究上,人体运动分析涉及到计算机科学、运动人体科学、环境行为学和材料科学等。随着研究的深入以及计算机视觉、5G通信的飞速发展,人体运动分析技术已应用于自动驾驶、影视创作、安防异常事件监测和体育竞技分析、康复等实际场景人体运动分析已成为人工智能领域研究的前沿课题。目前的研究数据普遍缺少细粒度语义信息,导致现存的分割或识别任务缺少时空细粒度动作语义模型。此类研究在竞技体育、运动康复、日常健身等方面有非常重大的意义。相比于图片的细粒度研究,时空细粒度语义的人体动作具有动作的类内方差大、类间方差小这一特点,这将导致由细粒度语义产生的一系列问题,利用粗粒度语义的识别模型进行学习难得获得理想的结果。 基于实际需求以及图深度学习模型的发展,本比赛旨在构建基于骨骼点的细粒度人体动作识别方法。通过本赛题建立精度高、细粒度意义明确的动作识别模型,希望大家探索时空细粒度模型的新方法。 # RES2CTR-GCN介绍 ## 整体结构 本算法是基于[CTR-GCN](https://arxiv.org/pdf/2107.12213v2.pdf)进行改进,采用多流同结构算法整体框架如下图所示: ### 双流算法流程图 <p align="center"> <img src="multi_stream.png" width = "500" height = "50" alt="" align="center" /> ### 单流算法流程图 <p align="center"> <img src="model.png" width = "500" height = "50" alt="" align="center" /> ### RES2CTR-GCN模块 <img src="RES2CTR-GCN.png" width = "500" height = "50" alt="" align="center" /> <img src="CTR-GC.png" width = "500" height = "50" alt="" align="center" /> <img src="TEMORAL_MODELING.png" width = "500" height = "50" alt="" align="center" /> ## 数据增强/清洗策略 ### 数据流的构建 本模型采用多流同结构 故需要对训练的数据流以及预测的数据流进行预先的构建。构建的代码在data文件夹下的get_train_data.py,get_test_data.py ### 数据增强 在训练的情况下,采用mixup数据增强的策略, ## AI模型开发过程、训练技巧、创新思路 ## 下载githee模型代码 ``` # 进入到gitclone 的 ccf 目录下 %cd ~/work/ !git clone https://gitee.com/mark_twain/ccf.git ``` ## 配置代码环境,安装相应的依赖包 ``` %cd ~/work/ccf !python3.7 -m pip install --upgrade pip !python3.7 -m pip install --upgrade -r requirements.txt ``` ## 解压数据集并将数据集移动到指定文件夹中 ``` # 将数据集解压 %cd ~/ !unzip data/data118075/bdcidataset.zip # 将数据集移动到代码文件ccf/data中 !mv test_A_data.npy ./work/ccf/data/ !mv test_B_data.npy ./work/ccf/data/ !mv train_label.npy ./work/ccf/data/ !mv train_data.npy ./work/ccf/data/ # 进入到gitclone 的ccf目录下 %cd ~/work/ccf/ ``` ## 配置文件 由于是本模型是采用三流同结构算法 故有3个不同的配置文件以配置不同的输入流 三个配置文件分别为: `configs/recognition/ctrgcn/res2ctrgcn_keypoint_bone.yaml` `configs/recognition/ctrgcn/res2ctrgcn_keypoint_joint.yaml` `configs/recognition/ctrgcn/res2ctrgcn_keypoint_velocity.yaml` ### 以`configs/recognition/ctrgcn/res2ctrgcn_keypoint_bone.yaml`为例子的配置文件的内容 通过yaml配置文件的方式选择不同的算法和训练参数等,这里我们使用`configs/recognition/ctrgcn/res2ctrgcn_keypoint_bone.yaml`配置文件完成RES2CTR-GCN模型算法训练。从该配置文件中,我们可以得到如下信息: ### 网络结构 ```yaml MODEL: framework: "RecognizerGCN" backbone: name: "RES2CTRGCN" head: name: "CTRGCNHead" num_classes: 30 ``` 表示我们使用的是RES2CTR-GCN算法,framework为`RecognizerGCN`,backbone是时空图卷积网络`RES2CTR-GCN`,head使用对应的`CTRGCNHead`,采用soft-label计算损失函数,损失函数是`CrossEntropyLoss`。 ### 数据路径 ```yaml DATASET: batch_size: 8 num_workers: 4 test_batch_size: 1 test_num_workers: 0 train: format: "SkeletonDataset" file_path: "data/train_bone_data.npy" # 手动配置 label_path: "data/train_label.npy" # 手动配置 test: format: "SkeletonDataset" file_path: "data/test_bone_B_data.npy" # 手动配置 test_mode: True ``` 训练数据路径通过`DATASET.train.file_path`字段指定,训练标签路径通过`DATASET.train.label_path`字段指定,测试数据路径通过`DATASET.test.file_path`字段指定。这三个路径**需要用户在配置文件`configs/recognition/ctrgcn/res2ctrgcn_keypoint_joint.yaml`中手动配置好**。本项目中路径示例如上所示。 ### 数据处理 ```yaml PIPELINE: train: sample: name: "SampleFrame" window_size: 2000 transform: - SkeletonNorm: test: sample: name: "SampleFrame" window_size: 2000 transform: - SkeletonNorm: ``` 数据处理主要包括两步操作,分别为`SampleFrame`和`SkeletonNorm`。 ### 优化器 ```yaml OPTIMIZER: #OPTIMIZER field name: 'Momentum' momentum: 0.9 learning_rate: iter_step: True name: 'CustomWarmupCosineDecay' max_epoch: 100 warmup_epochs: 10 warmup_start_lr: 0.005 cosine_base_lr: 0.1 ``` 网络训练使用的优化器为`Momentum`,学习率更新策略为`CustomWarmupCosineDecay`。 ## 一键启动训练脚本 ### 运行脚本指令 ```bash bash train.sh ``` 你将会看到类似如下的训练日志 ```txt [11/14 13:47:39] epoch:[ 1/100] train step:0  loss: 3.45860 lr: 0.005000 top1: 0.11111 top5: 0.11111 batch_cost: 2.37207 sec, reader_cost: 0.15600 sec, ips: 3.79416 instance/sec. [11/14 13:47:48] epoch:[ 1/100] train step:10  loss: 3.21713 lr: 0.005286 top1: 0.00000 top5: 0.42159 batch_cost: 0.96801 sec, reader_cost: 0.00000 sec, ips: 9.29743 instance/sec. [11/14 13:47:58] epoch:[ 1/100] train step:20  loss: 3.16619 lr: 0.005571 top1: 0.21693 top5: 0.32804 batch_cost: 0.98901 sec, reader_cost: 0.00000 sec, ips: 9.10002 instance/sec. [11/14 13:48:08] epoch:[ 1/100] train step:30  loss: 2.65808 lr: 0.005857 top1: 0.22222 top5: 0.55555 batch_cost: 0.95300 sec, reader_cost: 0.00000 sec, ips: 9.44386 instance/sec. [11/14 13:48:17] epoch:[ 1/100] train step:40  loss: 2.95422 lr: 0.006143 top1: 0.00000 top5: 0.49274 batch_cost: 0.92300 sec, reader_cost: 0.00000 sec, ips: 9.75082 instance/sec. [11/14 13:48:27] epoch:[ 1/100] train step:50  loss: 3.37573 lr: 0.006428 top1: 0.07475 top5: 0.29899 batch_cost: 0.92377 sec, reader_cost: 0.00000 sec, ips: 9.74270 instance/sec. [11/14 13:48:36] epoch:[ 1/100] train step:60  loss: 2.69240 lr: 0.006714 top1: 0.33333 top5: 0.58678 batch_cost: 0.91400 sec, reader_cost: 0.00000 sec, ips: 9.84682 instance/sec. [11/14 13:48:46] epoch:[ 1/100] train step:70  loss: 2.88237 lr: 0.007000 top1: 0.00000 top5: 0.53872 batch_cost: 0.91900 sec, reader_cost: 0.00000 sec, ips: 9.79326 instance/sec. [11/14 13:48:55] epoch:[ 1/100] train step:80  loss: 3.31014 lr: 0.007285 top1: 0.11111 top5: 0.44141 batch_cost: 0.95794 sec, reader_cost: 0.00000 sec, ips: 9.39519 instance/sec. [11/14 13:49:05] epoch:[ 1/100] train step:90  loss: 2.97485 lr: 0.007571 top1: 0.11111 top5: 0.55454 batch_cost: 0.99122 sec, reader_cost: 0.00100 sec, ips: 9.07971 instance/sec. [11/14 13:49:15] epoch:[ 1/100] train step:100  loss: 3.22369 lr: 0.007857 top1: 0.00000 top5: 0.42589 batch_cost: 1.02620 sec, reader_cost: 0.00000 sec, ips: 8.77022 instance/sec. [11/14 13:49:24] epoch:[ 1/100] train step:110  loss: 2.41740 lr: 0.008142 top1: 0.52294 top5: 0.83453 batch_cost: 0.98703 sec, reader_cost: 0.00100 sec, ips: 9.11828 instance/sec. [11/14 13:49:34] epoch:[ 1/100] train step:120  loss: 2.87388 lr: 0.008428 top1: 0.00000 top5: 0.62665 batch_cost: 0.93400 sec, reader_cost: 0.00000 sec, ips: 9.63598 instance/sec. [11/14 13:49:43] epoch:[ 1/100] train step:130  loss: 2.71671 lr: 0.008714 top1: 0.33333 top5: 0.73594 batch_cost: 1.01655 sec, reader_cost: 0.00000 sec, ips: 8.85346 instance/sec. [11/14 13:49:53] epoch:[ 1/100] train step:140  loss: 2.67541 lr: 0.008999 top1: 0.33332 top5: 0.77776 batch_cost: 0.91100 sec, reader_cost: 0.00000 sec, ips: 9.87926 instance/sec. [11/14 13:50:02] epoch:[ 1/100] train step:150  loss: 3.34374 lr: 0.009285 top1: 0.00000 top5: 0.40388 batch_cost: 0.92400 sec, reader_cost: 0.00000 sec, ips: 9.74027 instance/sec. [11/14 13:50:11] epoch:[ 1/100] train step:160  loss: 3.10928 lr: 0.009571 top1: 0.11111 top5: 0.55556 batch_cost: 0.92956 sec, reader_cost: 0.00100 sec, ips: 9.68198 instance/sec. [11/14 13:50:21] epoch:[ 1/100] train step:170  loss: 3.18118 lr: 0.009856 top1: 0.00000 top5: 0.55554 batch_cost: 0.92963 sec, reader_cost: 0.00000 sec, ips: 9.68124 instance/sec. [11/14 13:50:30] epoch:[ 1/100] train step:180  loss: 2.78696 lr: 0.010142 top1: 0.33303 top5: 0.66576 batch_cost: 0.92700 sec, reader_cost: 0.00100 sec, ips: 9.70872 instance/sec. [11/14 13:50:39] epoch:[ 1/100] train step:190  loss: 3.06701 lr: 0.010427 top1: 0.22962 top5: 0.52839 batch_cost: 0.93000 sec, reader_cost: 0.00000 sec, ips: 9.67740 instance/sec. [11/14 13:50:48] epoch:[ 1/100] train step:200  loss: 2.81732 lr: 0.010713 top1: 0.00000 top5: 0.66872 batch_cost: 0.92860 sec, reader_cost: 0.00000 sec, ips: 9.69201 instance/sec. [11/14 13:50:58] epoch:[ 1/100] train step:210  loss: 2.74136 lr: 0.010999 top1: 0.22222 top5: 0.68260 batch_cost: 0.93100 sec, reader_cost: 0.00000 sec, ips: 9.66704 instance/sec. [11/14 13:51:07] epoch:[ 1/100] train step:220  loss: 3.03122 lr: 0.011284 top1: 0.19732 top5: 0.44444 batch_cost: 0.95900 sec, reader_cost: 0.00000 sec, ips: 9.38478 instance/sec. [11/14 13:51:16] epoch:[ 1/100] train step:230  loss: 2.72677 lr: 0.011570 top1: 0.21040 top5: 0.60753 batch_cost: 0.92000 sec, reader_cost: 0.00000 sec, ips: 9.78263 instance/sec. [11/14 13:51:26] epoch:[ 1/100] train step:240  loss: 2.53266 lr: 0.011856 top1: 0.42706 top5: 0.64349 batch_cost: 0.92751 sec, reader_cost: 0.00000 sec, ips: 9.70338 instance/sec. [11/14 13:51:35] epoch:[ 1/100] train step:250  loss: 2.67476 lr: 0.012141 top1: 0.11108 top5: 0.77764 batch_cost: 0.93000 sec, reader_cost: 0.00000 sec, ips: 9.67742 instance/sec. [11/14 13:51:44] epoch:[ 1/100] train step:260  loss: 2.96337 lr: 0.012427 top1: 0.09552 top5: 0.52438 batch_cost: 0.93500 sec, reader_cost: 0.00000 sec, ips: 9.62569 instance/sec. [11/14 13:51:53] epoch:[ 1/100] train step:270  loss: 3.01553 lr: 0.012713 top1: 0.22222 top5: 0.66056 batch_cost: 0.91700 sec, reader_cost: 0.00000 sec, ips: 9.81461 instance/sec. [11/14 13:52:03] epoch:[ 1/100] train step:280  loss: 3.02630 lr: 0.012998 top1: 0.22222 top5: 0.51143 batch_cost: 0.91700 sec, reader_cost: 0.00000 sec, ips: 9.81463 instance/sec. [11/14 13:52:12] epoch:[ 1/100] train step:290  loss: 2.64080 lr: 0.013284 top1: 0.10819 top5: 0.65205 batch_cost: 0.93278 sec, reader_cost: 0.00000 sec, ips: 9.64855 instance/sec. [11/14 13:52:21] epoch:[ 1/100] train step:300  loss: 2.56226 lr: 0.013570 top1: 0.33230 top5: 0.77640 batch_cost: 0.92300 sec, reader_cost: 0.00100 sec, ips: 9.75082 instance/sec. [11/14 13:52:30] epoch:[ 1/100] train step:310  loss: 2.31375 lr: 0.013855 top1: 0.21937 top5: 0.87747 batch_cost: 0.91700 sec, reader_cost: 0.00100 sec, ips: 9.81463 instance/sec. [11/14 13:52:40] epoch:[ 1/100] train step:320  loss: 2.93230 lr: 0.014141 top1: 0.11111 top5: 0.44344 batch_cost: 0.88833 sec, reader_cost: 0.00000 sec, ips: 10.13133 instance/sec. [11/14 13:52:42] [31mEND epoch:1 [0m [95mtrain loss_avg: 2.93599 top1_avg: 0.14609 top5_avg: 0.56468 avg_batch_cost: 0.93686 sec, avg_reader_cost: 0.00000 sec, batch_cost_sum: 305.87457 sec, avg_ips: 9.53332 instance/sec. ``` #### 注意事项 请使用<span style='color:red'>GPU版本</span>的配置环境运行本模块 ``` # 开始训练 !bash train.sh ``` ## 测试脚本 模型训练完成后,可使用测试脚本进行评估, 该测试脚本会执行多流的模型预测,然后将模型预测的结果进行融合 ### 测试脚本启动命令 ```bash bash inference.sh ``` - 评估结果保存在`final_submission.csv`文件中,可在[评测官网](https://aistudio.baidu.com/aistudio/competition/detail/115)提交查看得分。 ``` #启动预测脚本指令 !bash inference.sh ``` 测试脚本运行完成后,可以在当前目录中得到`final_submission.csv`文件,将该文件提交至[评测官网](https://aistudio.baidu.com/aistudio/competition/detail/115),即可以查看在A榜得分。示例给出的模型文件,在A榜的得分为72.452,在B榜的得分为66.246
github_jupyter
# Plus proches voisins - évaluation Comment évaluer la pertinence d'un modèle des plus proches voisins. ``` %matplotlib inline from papierstat.datasets import load_wines_dataset df = load_wines_dataset() X = df.drop(['quality', 'color'], axis=1) y = df['quality'] from sklearn.neighbors import KNeighborsRegressor knn = KNeighborsRegressor(n_neighbors=1) knn.fit(X, y) prediction = knn.predict(X) ``` Le modèle ne fait pas d'erreur sur tous les exemples de la base de vins. C'est normal puisque le plus proche voisin d'un vin est nécessairement lui-même, la note prédite et la sienne. ``` min(prediction - y), max(prediction - y) ``` Il est difficile dans ces conditions de dire si la prédiction et de bonne qualité. On pourrait estimer la qualité de la prédiction sur un vin nouveau mais il n'y en a aucun pour le moment et ce n'est pas l'ordinateur qui va les fabriquer. On peut peut-être regarder combien de fois le plus proche voisin d'un vin autre que le vin lui-même partage la même note. ``` from sklearn.neighbors import NearestNeighbors nn = NearestNeighbors(n_neighbors=2) nn.fit(X) distance, index = nn.kneighbors(X) proche = index[:, 1].ravel() note_proche = [y[i] for i in proche] ``` Il ne reste plus qu'à calculer la différence entre la note d'un vin et celle de son plus proche voisin autre que lui-même. ``` diff = y - note_proche ax = diff.hist(bins=20, figsize=(3,3)) ax.set_title('Histogramme des différences\nde prédiction') ``` Ca marche pour les deux tiers de la base, pour le tiers restant, les notes diffèrent. On peut maintenant regarder si la distance entre ces deux voisins pourrait être corrélée à cette différence. ``` import pandas dif = pandas.DataFrame(dict(dist=distance[:,1], diff=diff)) ax = dif.plot(x="dist", y="diff", kind='scatter', figsize=(3,3)) ax.set_title('Graphe XY - distance / différence'); ``` Ce n'est pas très lisible. Essayons un autre type de graphique. ``` from seaborn import violinplot, boxplot import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 2, figsize=(8,3)) violinplot(x="diff", y="dist", data=dif, ax=ax[0]) ax[0].set_ylim([0,25]) ax[0].set_title('Violons distribution\ndifférence / distance') boxplot(x="diff", y="dist", data=dif, ax=ax[1]) ax[1].set_title('Boxplots distribution\ndifférence / distance') ax[1].set_ylim([0,25]); ``` A priori le modèle n'est pas si mauvais, les voisins partageant la même note ont l'air plus proches que ceux qui ont des notes différentes. ``` import numpy dif['abs_diff'] = numpy.abs(dif['diff']) from seaborn import jointplot ax = jointplot("dist", "abs_diff", data=dif[dif.dist <= 10], kind="kde", space=0, color="g", size=4) ax.ax_marg_y.set_title('Heatmap distribution distance / différence'); ``` Les vins proches se ressemblent pour la plupart. C'est rassurant pour la suite. 61% des vins ont un voisin proche partageant la même note. ``` len(dif[dif['abs_diff'] == 0]) / dif.shape[0] ```
github_jupyter
# Introduction to the scikit-learn -- supervised learning and model selection (part 3) - toc: true - badges: true - categories: [EEG, jupyter] - description: To visualize the workings of machine learning algorithms, it is often helpful to study two-dimensional or one-dimensional data, that is data with only one or two features. While in practice, datasets usually have many more features, it is hard to plot high-dimensional data in on two-dimensional screens. To visualize the workings of machine learning algorithms, it is often helpful to study two-dimensional or one-dimensional data, that is data with only one or two features. While in practice, datasets usually have many more features, it is hard to plot high-dimensional data in on two-dimensional screens. We will illustrate some very simple examples before we move on to more "real world" data sets. First, we will look at a two class classification problem in two dimensions. We use the synthetic data generated by the ``make_blobs`` function. ``` from IPython.display import YouTubeVideo YouTubeVideo('wCpCDbkDJXQ', width=700, height=400) %matplotlib inline import matplotlib.pyplot as plt import numpy as np import torch from sklearn.datasets import make_blobs X, y = make_blobs(centers=2, random_state=0) print('X ~ n_samples x n_features:', X.shape) print('y ~ n_samples:', y.shape) print('\nFirst 5 samples:\n', X[:5, :]) print('\nFirst 5 labels:', y[:5]) ``` As the data is two-dimensional, we can plot each sample as a point in a two-dimensional coordinate system, with the first feature being the x-axis and the second feature being the y-axis. ``` plt.scatter(X[y == 0, 0], X[y == 0, 1], c='blue', s=40, label='0') plt.scatter(X[y == 1, 0], X[y == 1, 1], c='red', s=40, label='1', marker='s') plt.xlabel('first feature') plt.ylabel('second feature') plt.legend(loc='upper right'); ``` Classification is a supervised task, and since we are interested in its performance on unseen data, we split our data into two parts: 1. a training set that the learning algorithm uses to fit the model 2. a test set to evaluate the generalization performance of the model The ``train_test_split`` function from the ``model_selection`` module does that for us -- we will use it to split a dataset into 75% training data and 25% test data. <img src="figures/train_test_split_matrix.svg" width="25%"> ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1234, stratify=y) ``` ### The scikit-learn estimator API <img src="figures/supervised_workflow.svg" width="30%"> Every algorithm is exposed in scikit-learn via an ''Estimator'' object. (All models in scikit-learn have a very consistent interface). For instance, we first import the logistic regression class. ``` from sklearn.linear_model import LogisticRegression ``` Next, we instantiate the estimator object. ``` classifier = LogisticRegression() X_train.shape y_train.shape ``` To built the model from our data, that is to learn how to classify new points, we call the ``fit`` function with the training data, and the corresponding training labels (the desired output for the training data point): ``` classifier.fit? classifier.fit(X_train, y_train) ``` (Some estimator methods such as `fit` return `self` by default. Thus, after executing the code snippet above, you will see the default parameters of this particular instance of `LogisticRegression`. Another way of retrieving the estimator's ininitialization parameters is to execute `classifier.get_params()`, which returns a parameter dictionary.) ``` classifier.coef_ classifier.get_params() ``` We can then apply the model to unseen data and use the model to predict the estimated outcome using the ``predict`` method: ``` prediction = classifier.predict(X_test) ``` We can compare these against the true labels: ``` print(prediction) print(y_test) ``` We can evaluate our classifier quantitatively by measuring what fraction of predictions is correct. This is called **accuracy**: ``` prediction == y_test np.mean(prediction == y_test) ``` There is also a convenience function , ``score``, that all scikit-learn classifiers have to compute this directly from the test data: ``` classifier.score(X_test, y_test) ``` It is often helpful to compare the generalization performance (on the test set) to the performance on the training set: ``` classifier.score(X_train, y_train) ``` LogisticRegression is a so-called linear model, that means it will create a decision that is linear in the input space. In 2d, this simply means it finds a line to separate the blue from the red: ``` from figures import plot_2d_separator plt.scatter(X[y == 0, 0], X[y == 0, 1], c='blue',s=40, label='0') plt.scatter(X[y == 1, 0], X[y == 1, 1], c='red', s=40, label='1', marker='s') plt.xlabel("first feature") plt.ylabel("second feature") plot_2d_separator(classifier, X) plt.legend(loc='upper right'); ``` **Estimated parameters**: All the estimated model parameters are attributes of the estimator object ending by an underscore. Here, these are the coefficients and the offset of the line: ``` print(classifier.coef_) print(classifier.intercept_) ``` K Nearest Neighbors (KNN) ------------------------------------------------ Another popular and easy to understand classifier is K nearest neighbors (kNN). It has one of the simplest learning strategies: given a new, unknown observation, look up in your reference database which ones have the closest features and assign the predominant class. The KNN classifier is a non-parametric classifier that simply stores the training data $\mathcal{D}$ and classifies each new instance $x$ using a majority vote over its' set of $K$ nearest neighbors $\mathcal{N}_K(x)$ computed using any distance function $d: R^D \times\mathbb{R}^D \rightarrow \mathbb{R} $. KNN Classification Function: $$g_{KNN}(x) = argmax_{y\in\mathcal{Y}} \sum_{i\in \mathcal{N}_K(x)} \mathbb{I}[y_i=y]$$ Use of KNN requires choosing the distance function $d$ and the number of neighbors $K$. ![](https://cdn-images-1.medium.com/max/900/1*k8WEP2Kn3YDOopnLzljAJA.png) ![](https://cdn-images-1.medium.com/max/900/1*CZwsWrWNj2KqF1jJ-Z3SPA.png) ![](https://cdn-images-1.medium.com/max/1200/1*4vdvnkZoWgOp0vcLF4wFcA.png) - In general, KNN can work with any distance function $d$ satisfying non-negativity $d(\bf{x},\bf{x}')\geq 0$ and identity of indiscernibles $d(\bf{x},\bf{x})=0$. - Alternatively, KNN can work with any similarity function $s$ satisfying non-negativity $s(\bf{x},\bf{y})\geq 0$ that attains it's maximum on indiscernibles $s(\bf{x},\bf{x})=\max_{\bf{x}'} s(\bf{x},\bf{x}')$. - However, the more structure the distance or similarity function has (symmetry, triangle inequality), the more structure you can exploit when designing algorithms. ## Minkowski Distance ($\ell_p$ norms)} Given two data vectors $\bf{x},\bf{x}' \in \mathbb{R}^D$, the Minkowski Distance with parameter $p$ (the $\ell_p$ norm) is a proper metric defined as follows: \begin{align*} d_p(\bf{x},\bf{x}') &= ||\bf{x}-\bf{x}'||_p \\ &=\left(\sum_{i=1}^D |x_d-x'_d|^p\right)^{1/p} \end{align*} Special cases include Euclidean distance ($p=2$), Manhattan distance ($p=1$) and Chebyshev distance ($p=\infty$). ## Brute Force KNN - Given any distance function $d$, brute force KNN works by computing the distance $d_i = d(\bf{x}_i,\bf{x}_*)$ from a target point $\bf{x}_*$ to all of the training points $\bf{x}_i$. - You then simply sort the distances $\{d_i,i=1:N\}$ and choose the data cases with the $K$ smallest distances to form the neighbor set $\mathcal{N}_K(\bf{x}_*)$. Using a similarity function is identical, but you select the $K$ most similar data cases. - Once the $K$ neighbors are selected, applying the classification rule is easy. In Sklearn the KNN interface is exactly the same as for ``LogisticRegression above``. ``` from sklearn.neighbors import KNeighborsClassifier ``` This time we set a parameter of the KNeighborsClassifier to tell it we only want to look at one nearest neighbor: ``` knn = KNeighborsClassifier(n_neighbors=1) ``` We fit the model with out training data ``` knn.fit(X_train, y_train) plt.scatter(X[y == 0, 0], X[y == 0, 1], c='blue', s=40, label='0') plt.scatter(X[y == 1, 0], X[y == 1, 1], c='red', s=40, label='1', marker='s') plt.xlabel("first feature") plt.ylabel("second feature") plot_2d_separator(knn, X) plt.legend(loc='upper right'); knn.score(X_test, y_test) ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li> Apply the KNeighborsClassifier to the ``iris`` dataset. Play with different values of the ``n_neighbors`` and observe how training and test score change. </li> </ul> </div> ``` # %load solutions/05A_knn_with_diff_k.py from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() X = iris.data y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1234, stratify=y) X_trainsub, X_valid, y_trainsub, y_valid = train_test_split(X_train, y_train, test_size=0.5, random_state=1234, stratify=y_train) for k in range(1, 20): knn = KNeighborsClassifier(n_neighbors=k) train_score = knn.fit(X_trainsub, y_trainsub).\ score(X_trainsub, y_trainsub) valid_score = knn.score(X_valid, y_valid) print('k: %d, Train/Valid Acc: %.3f/%.3f' % (k, train_score, valid_score)) knn = KNeighborsClassifier(n_neighbors=9) knn.fit(X_train, y_train) print('k=9 Test Acc: %.3f' % knn.score(X_test, y_test)) %matplotlib inline import matplotlib.pyplot as plt import numpy as np ``` # Supervised Learning -- Regression Analysis In regression we are trying to predict a continuous output variable -- in contrast to the nominal variables we were predicting in the previous classification examples. Let's start with a simple toy example with one feature dimension (explanatory variable) and one target variable. We will create a dataset out of a sine curve with some noise: ``` x = np.linspace(-3, 3, 100) print(x) rng = np.random.RandomState(42) y = np.sin(4 * x) + x + rng.uniform(size=len(x)) plt.plot(x, y, 'o'); ``` Linear Regression ================= The first model that we will introduce is the so-called simple linear regression. Here, we want to fit a line to the data, which One of the simplest models again is a linear one, that simply tries to predict the data as lying on a line. One way to find such a line is `LinearRegression` (also known as [*Ordinary Least Squares (OLS)*](https://en.wikipedia.org/wiki/Ordinary_least_squares) regression). The interface for LinearRegression is exactly the same as for the classifiers before, only that ``y`` now contains float values, instead of classes. As we remember, the scikit-learn API requires us to provide the target variable (`y`) as a 1-dimensional array; scikit-learn's API expects the samples (`X`) in form a 2-dimensional array -- even though it may only consist of 1 feature. Thus, let us convert the 1-dimensional `x` NumPy array into an `X` array with 2 axes: ``` print('Before: ', x.shape) X = x[:, np.newaxis] print('After: ', X.shape) ``` Again, we start by splitting our dataset into a training (75%) and a test set (25%): ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) ``` Next, we use the learning algorithm implemented in `LinearRegression` to **fit a regression model to the training data**: ``` from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) ``` After fitting to the training data, we paramerterized a linear regression model with the following values. ``` print('Weight coefficients: ', regressor.coef_) print('y-axis intercept: ', regressor.intercept_) ``` Since our regression model is a linear one, the relationship between the target variable (y) and the feature variable (x) is defined as $$y = weight \times x + \text{intercept}$$. Plugging in the min and max values into thos equation, we can plot the regression fit to our training data: ``` min_pt = X.min() * regressor.coef_[0] + regressor.intercept_ max_pt = X.max() * regressor.coef_[0] + regressor.intercept_ print(min_pt, max_pt, X.min(), X.max()) plt.plot([X.min(), X.max()], [min_pt, max_pt]) plt.plot(X_train, y_train, 'o'); ``` Similar to the estimators for classification in the previous notebook, we use the `predict` method to predict the target variable. And we expect these predicted values to fall onto the line that we plotted previously: ``` y_pred_train = regressor.predict(X_train) plt.plot(X_train, y_train, 'o', label="data") plt.plot(X_train, y_pred_train, 'o', label="prediction") plt.plot([X.min(), X.max()], [min_pt, max_pt], label='fit') plt.legend(loc='best') ``` As we can see in the plot above, the line is able to capture the general slope of the data, but not many details. Next, let's try the test set: ``` y_pred_test = regressor.predict(X_test) print(X_test.shape) print(y_pred_test.shape) plt.plot(X_test, y_test, 'o', label="data") plt.plot(X_test, y_pred_test, 'o', label="prediction") plt.plot([X.min(), X.max()], [min_pt, max_pt], label='fit') plt.legend(loc='best'); ``` Again, scikit-learn provides an easy way to evaluate the prediction quantitatively using the ``score`` method. For regression tasks, this is the R<sup>2</sup> score. Another popular way would be the Mean Squared Error (MSE). As its name implies, the MSE is simply the average squared difference over the predicted and actual target values $$MSE = \frac{1}{n} \sum_{i=1}^{n} (\text{predicted}_i - \text{true}_i)^2$$ ``` regressor.score(X_test, y_test) ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li> Add a feature containing `sin(4x)` to `X` and redo the fit. Visualize the predictions with this new richer, yet linear, model. </li> </ul> </div> ``` # %load solutions/06B_lin_with_sine.py ``` KNeighborsRegression ======================= As for classification, we can also use a neighbor based method for regression. We can simply take the output of the nearest point, or we could average several nearest points. This method is less popular for regression than for classification, but still a good baseline. ``` from sklearn.neighbors import KNeighborsRegressor kneighbor_regression = KNeighborsRegressor(n_neighbors=1) kneighbor_regression.fit(X_train, y_train) ``` Again, let us look at the behavior on training and test set: ``` y_pred_train = kneighbor_regression.predict(X_train) plt.plot(X_train, y_train, 'o', label="data", markersize=10) plt.plot(X_train, y_pred_train, 's', label="prediction", markersize=4) plt.legend(loc='best'); ``` On the training set, we do a perfect job: each point is its own nearest neighbor! ``` y_pred_test = kneighbor_regression.predict(X_test) plt.plot(X_test, y_test, 'o', label="data", markersize=8) plt.plot(X_test, y_pred_test, 's', label="prediction", markersize=4) plt.legend(loc='best'); ``` On the test set, we also do a better job of capturing the variation, but our estimates look much messier than before. Let us look at the R<sup>2</sup> score: ``` kneighbor_regression.score(X_test, y_test) ``` Much better than before! Here, the linear model was not a good fit for our problem; it was lacking in complexity and thus under-fit our data. <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li> Compare the KNeighborsRegressor and LinearRegression on the boston housing dataset. You can load the dataset using ``sklearn.datasets.load_boston``. You can learn about the dataset by reading the ``DESCR`` attribute. </li> </ul> </div> ``` # %load solutions/06A_knn_vs_linreg.py ``` # Cross-Validation and scoring methods In the previous sections and notebooks, we split our dataset into two parts, a training set and a test set. We used the training set to fit our model, and we used the test set to evaluate its generalization performance -- how well it performs on new, unseen data. <img src="figures/train_test_split.svg" width="80%"> However, often (labeled) data is precious, and this approach lets us only use ~ 3/4 of our data for training. On the other hand, we will only ever try to apply our model 1/4 of our data for testing. A common way to use more of the data to build a model, but also get a more robust estimate of the generalization performance, is cross-validation. In cross-validation, the data is split repeatedly into a training and non-overlapping test-sets, with a separate model built for every pair. The test-set scores are then aggregated for a more robust estimate. The most common way to do cross-validation is k-fold cross-validation, in which the data is first split into k (often 5 or 10) equal-sized folds, and then for each iteration, one of the k folds is used as test data, and the rest as training data: <img src="figures/cross_validation.svg" width="80%"> This way, each data point will be in the test-set exactly once, and we can use all but a k'th of the data for training. Let us apply this technique to evaluate the KNeighborsClassifier algorithm on the Iris dataset: ``` from sklearn.datasets import load_iris from sklearn.neighbors import KNeighborsClassifier iris = load_iris() X, y = iris.data, iris.target classifier = KNeighborsClassifier() ``` The labels in iris are sorted, which means that if we split the data as illustrated above, the first fold will only have the label 0 in it, while the last one will only have the label 2: ``` y ``` To avoid this problem in evaluation, we first shuffle our data: ``` import numpy as np rng = np.random.RandomState(0) permutation = rng.permutation(len(X)) X, y = X[permutation], y[permutation] print(y) ``` Now implementing cross-validation is easy: ``` k = 5 n_samples = len(X) fold_size = n_samples // k scores = [] masks = [] for fold in range(k): # generate a boolean mask for the test set in this fold test_mask = np.zeros(n_samples, dtype=bool) test_mask[fold * fold_size : (fold + 1) * fold_size] = True # store the mask for visualization masks.append(test_mask) # create training and test sets using this mask X_test, y_test = X[test_mask], y[test_mask] X_train, y_train = X[~test_mask], y[~test_mask] # fit the classifier classifier.fit(X_train, y_train) # compute the score and record it scores.append(classifier.score(X_test, y_test)) ``` Let's check that our test mask does the right thing: ``` import matplotlib.pyplot as plt %matplotlib inline plt.matshow(masks, cmap='gray_r') ``` And now let's look a the scores we computed: ``` print(scores) print(np.mean(scores)) ``` As you can see, there is a rather wide spectrum of scores from 90% correct to 100% correct. If we only did a single split, we might have gotten either answer. As cross-validation is such a common pattern in machine learning, there are functions to do the above for you with much more flexibility and less code. The ``sklearn.model_selection`` module has all functions related to cross validation. There easiest function is ``cross_val_score`` which takes an estimator and a dataset, and will do all of the splitting for you: ``` from sklearn.model_selection import cross_val_score scores = cross_val_score(classifier, X, y) print(scores) print(np.mean(scores)) ``` As you can see, the function uses three folds by default. You can change the number of folds using the cv argument: ``` cross_val_score(classifier, X, y, cv=5) ``` There are also helper objects in the cross-validation module that will generate indices for you for all kinds of different cross-validation methods, including k-fold: ``` from sklearn.model_selection import KFold, StratifiedKFold, ShuffleSplit ``` By default, cross_val_score will use ``StratifiedKFold`` for classification, which ensures that the class proportions in the dataset are reflected in each fold. If you have a binary classification dataset with 90% of data point belonging to class 0, that would mean that in each fold, 90% of datapoints would belong to class 0. If you would just use KFold cross-validation, it is likely that you would generate a split that only contains class 0. It is generally a good idea to use ``StratifiedKFold`` whenever you do classification. ``StratifiedKFold`` would also remove our need to shuffle ``iris``. Let's see what kinds of folds it generates on the unshuffled iris dataset. Each cross-validation class is a generator of sets of training and test indices: ``` cv = StratifiedKFold(n_splits=5) for train, test in cv.split(iris.data, iris.target): print(test) ``` As you can see, there are a couple of samples from the beginning, then from the middle, and then from the end, in each of the folds. This way, the class ratios are preserved. Let's visualize the split: ``` def plot_cv(cv, features, labels): masks = [] for train, test in cv.split(features, labels): mask = np.zeros(len(labels), dtype=bool) mask[test] = 1 masks.append(mask) plt.matshow(masks, cmap='gray_r') plot_cv(StratifiedKFold(n_splits=5), iris.data, iris.target) ``` For comparison, again the standard KFold, that ignores the labels: ``` plot_cv(KFold(n_splits=5), iris.data, iris.target) ``` Keep in mind that increasing the number of folds will give you a larger training dataset, but will lead to more repetitions, and therefore a slower evaluation: ``` plot_cv(KFold(n_splits=10), iris.data, iris.target) ``` Another helpful cross-validation generator is ``ShuffleSplit``. This generator simply splits of a random portion of the data repeatedly. This allows the user to specify the number of repetitions and the training set size independently: ``` plot_cv(ShuffleSplit(n_splits=5, test_size=.2), iris.data, iris.target) ``` If you want a more robust estimate, you can just increase the number of splits: ``` plot_cv(ShuffleSplit(n_splits=20, test_size=.2), iris.data, iris.target) ``` You can use all of these cross-validation generators with the `cross_val_score` method: ``` cv = ShuffleSplit(n_splits=5, test_size=.2) cross_val_score(classifier, X, y, cv=cv) ``` <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li> Perform three-fold cross-validation using the ``KFold`` class on the iris dataset without shuffling the data. Can you explain the result? </li> </ul> </div> ``` # %load solutions/13_cross_validation.py cv = KFold(n_splits=3) cross_val_score(classifier, iris.data, iris.target, cv=cv) ``` # A recap on Scikit-learn's estimator interface Scikit-learn strives to have a uniform interface across all methods. Given a scikit-learn *estimator* object named `model`, the following methods are available (not all for each model): - Available in **all Estimators** + `model.fit()` : fit training data. For supervised learning applications, this accepts two arguments: the data `X` and the labels `y` (e.g. `model.fit(X, y)`). For unsupervised learning applications, `fit` takes only a single argument, the data `X` (e.g. `model.fit(X)`). - Available in **supervised estimators** + `model.predict()` : given a trained model, predict the label of a new set of data. This method accepts one argument, the new data `X_new` (e.g. `model.predict(X_new)`), and returns the learned label for each object in the array. + `model.predict_proba()` : For classification problems, some estimators also provide this method, which returns the probability that a new observation has each categorical label. In this case, the label with the highest probability is returned by `model.predict()`. + `model.decision_function()` : For classification problems, some estimators provide an uncertainty estimate that is not a probability. For binary classification, a decision_function >= 0 means the positive class will be predicted, while < 0 means the negative class. + `model.score()` : for classification or regression problems, most (all?) estimators implement a score method. Scores are between 0 and 1, with a larger score indicating a better fit. For classifiers, the `score` method computes the prediction accuracy. For regressors, `score` computes the coefficient of determination (R<sup>2</sup>) of the prediction. + `model.transform()` : For feature selection algorithms, this will reduce the dataset to the selected features. For some classification and regression models such as some linear models and random forests, this method reduces the dataset to the most informative features. These classification and regression models can therefore also be used as feature selection methods. - Available in **unsupervised estimators** + `model.transform()` : given an unsupervised model, transform new data into the new basis. This also accepts one argument `X_new`, and returns the new representation of the data based on the unsupervised model. + `model.fit_transform()` : some estimators implement this method, which more efficiently performs a fit and a transform on the same input data. + `model.predict()` : for clustering algorithms, the predict method will produce cluster labels for new data points. Not all clustering methods have this functionality. + `model.predict_proba()` : Gaussian mixture models (GMMs) provide the probability for each point to be generated by a given mixture component. + `model.score()` : Density models like KDE and GMMs provide the likelihood of the data under the model. Apart from ``fit``, the two most important functions are arguably ``predict`` to produce a target variable (a ``y``) ``transform``, which produces a new representation of the data (an ``X``). The following table shows for which class of models which function applies: <table> <tr style="border:None; font-size:20px; padding:10px;"><th>``model.predict``</th><th>``model.transform``</th></tr> <tr style="border:None; font-size:20px; padding:10px;"><td>Classification</td><td>Preprocessing</td></tr> <tr style="border:None; font-size:20px; padding:10px;"><td>Regression</td><td>Dimensionality Reduction</td></tr> <tr style="border:None; font-size:20px; padding:10px;"><td>Clustering</td><td>Feature Extraction</td></tr> <tr style="border:None; font-size:20px; padding:10px;"><td>&nbsp;</td><td>Feature Selection</td></tr> </table>
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from tensorflow.keras import layers from tensorflow.keras import Model !wget --no-check-certificate \ https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 \ -O /tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 from tensorflow.keras.applications.inception_v3 import InceptionV3 local_weights_file = '/tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5' pre_trained_model = InceptionV3(input_shape = (150, 150, 3), include_top = False, weights = None) pre_trained_model.load_weights(local_weights_file) for layer in pre_trained_model.layers: layer.trainable = False # pre_trained_model.summary() last_layer = pre_trained_model.get_layer('mixed7') print('last layer output shape: ', last_layer.output_shape) last_output = last_layer.output from tensorflow.keras.optimizers import RMSprop # Flatten the output layer to 1 dimension x = layers.Flatten()(last_output) # Add a fully connected layer with 1,024 hidden units and ReLU activation x = layers.Dense(1024, activation='relu')(x) # Add a dropout rate of 0.2 x = layers.Dropout(0.2)(x) # Add a final sigmoid layer for classification x = layers.Dense (1, activation='sigmoid')(x) model = Model( pre_trained_model.input, x) model.compile(optimizer = RMSprop(lr=0.0001), loss = 'binary_crossentropy', metrics = ['accuracy']) !wget --no-check-certificate \ https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \ -O /tmp/cats_and_dogs_filtered.zip from tensorflow.keras.preprocessing.image import ImageDataGenerator import os import zipfile local_zip = '//tmp/cats_and_dogs_filtered.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('/tmp') zip_ref.close() # Define our example directories and files base_dir = '/tmp/cats_and_dogs_filtered' train_dir = os.path.join( base_dir, 'train') validation_dir = os.path.join( base_dir, 'validation') train_cats_dir = os.path.join(train_dir, 'cats') # Directory with our training cat pictures train_dogs_dir = os.path.join(train_dir, 'dogs') # Directory with our training dog pictures validation_cats_dir = os.path.join(validation_dir, 'cats') # Directory with our validation cat pictures validation_dogs_dir = os.path.join(validation_dir, 'dogs')# Directory with our validation dog pictures train_cat_fnames = os.listdir(train_cats_dir) train_dog_fnames = os.listdir(train_dogs_dir) # Add our data-augmentation parameters to ImageDataGenerator train_datagen = ImageDataGenerator(rescale = 1./255., rotation_range = 40, width_shift_range = 0.2, height_shift_range = 0.2, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) # Note that the validation data should not be augmented! test_datagen = ImageDataGenerator( rescale = 1.0/255. ) # Flow training images in batches of 20 using train_datagen generator train_generator = train_datagen.flow_from_directory(train_dir, batch_size = 20, class_mode = 'binary', target_size = (150, 150)) # Flow validation images in batches of 20 using test_datagen generator validation_generator = test_datagen.flow_from_directory( validation_dir, batch_size = 20, class_mode = 'binary', target_size = (150, 150)) history = model.fit( train_generator, validation_data = validation_generator, steps_per_epoch = 100, epochs = 20, validation_steps = 50, verbose = 2) import matplotlib.pyplot as plt acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r', label='Training accuracy') plt.plot(epochs, val_acc, 'b', label='Validation accuracy') plt.title('Training and validation accuracy') plt.legend(loc=0) plt.figure() plt.show() ```
github_jupyter
<center> <font size=5> <h1>Define working environment</h1> </font> </center> The following cells are used to: - Import needed libraries - Set the environment variables for Python, Anaconda, GRASS GIS and R statistical computing - Define the ["GRASSDATA" folder](https://grass.osgeo.org/grass73/manuals/helptext.html), the name of "location" and "mapset" where you will to work. **Import libraries** ``` ## Import libraries needed for setting parameters of operating system import os import sys ``` <center> <font size=3> <h3>Environment variables when working on Linux Mint</h3> </font> </center> **Set 'Python' and 'GRASS GIS' environment variables** Here, we set [the environment variables allowing to use of GRASS GIS](https://grass.osgeo.org/grass64/manuals/variables.html) inside this Jupyter notebook. Please change the directory path according to your own system configuration. ``` ### Define GRASS GIS environment variables for LINUX UBUNTU Mint 18.1 (Serena) # Check is environmental variables exists and create them (empty) if not exists. if not 'PYTHONPATH' in os.environ: os.environ['PYTHONPATH']='' if not 'LD_LIBRARY_PATH' in os.environ: os.environ['LD_LIBRARY_PATH']='' # Set environmental variables os.environ['GISBASE'] = '/home/tais/SRC/GRASS/grass_trunk/dist.x86_64-pc-linux-gnu' os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'bin') os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'script') os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'lib') #os.environ['PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python') os.environ['PYTHONPATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python') os.environ['PYTHONPATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python','grass') os.environ['PYTHONPATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'etc','python','grass','script') os.environ['PYTHONLIB'] = '/usr/lib/python2.7' os.environ['LD_LIBRARY_PATH'] += os.pathsep + os.path.join(os.environ['GISBASE'],'lib') os.environ['GIS_LOCK'] = '$$' os.environ['GISRC'] = os.path.join(os.environ['HOME'],'.grass7','rc') os.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons') os.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons','bin') os.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons') os.environ['PATH'] += os.pathsep + os.path.join(os.environ['HOME'],'.grass7','addons','scripts') ## Define GRASS-Python environment sys.path.append(os.path.join(os.environ['GISBASE'],'etc','python')) ``` **Import GRASS Python packages** ``` ## Import libraries needed to launch GRASS GIS in the jupyter notebook import grass.script.setup as gsetup ## Import libraries needed to call GRASS using Python import grass.script as gscript from grass.script import core as grass ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** **Display current environment variables of your computer** ``` ## Display the current defined environment variables for key in os.environ.keys(): print "%s = %s \t" % (key,os.environ[key]) ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** <center> <font size=5> <h1>Define functions</h1> </font> </center> This section of the notebook is dedicated to defining functions which will then be called later in the script. If you want to create your own functions, define them here. ### Function for computing processing time The "print_processing_time" is used to calculate and display the processing time for various stages of the processing chain. At the beginning of each major step, the current time is stored in a new variable, using [time.time() function](https://docs.python.org/2/library/time.html). At the end of the stage in question, the "print_processing_time" function is called and takes as argument the name of this new variable containing the recorded time at the beginning of the stage, and an output message. ``` ## Import library for managing time in python import time ## Function "print_processing_time()" compute processing time and printing it. # The argument "begintime" wait for a variable containing the begintime (result of time.time()) of the process for which to compute processing time. # The argument "printmessage" wait for a string format with information about the process. def print_processing_time(begintime, printmessage): endtime=time.time() processtime=endtime-begintime remainingtime=processtime days=int((remainingtime)/86400) remainingtime-=(days*86400) hours=int((remainingtime)/3600) remainingtime-=(hours*3600) minutes=int((remainingtime)/60) remainingtime-=(minutes*60) seconds=round((remainingtime)%60,1) if processtime<60: finalprintmessage=str(printmessage)+str(seconds)+" seconds" elif processtime<3600: finalprintmessage=str(printmessage)+str(minutes)+" minutes and "+str(seconds)+" seconds" elif processtime<86400: finalprintmessage=str(printmessage)+str(hours)+" hours and "+str(minutes)+" minutes and "+str(seconds)+" seconds" elif processtime>=86400: finalprintmessage=str(printmessage)+str(days)+" days, "+str(hours)+" hours and "+str(minutes)+" minutes and "+str(seconds)+" seconds" return finalprintmessage ``` ### Function for creation of configuration file for r.li (landscape units provided as polygons) (multiprocessed) ``` ##### Function that create the r.li configuration file for a list of landcover raster. ### It enable to create in one function as many configuration file as the number of raster provided in 'listoflandcoverraster'. ### It could be use only in case study with a several landcover raster and only one landscape unit layer. ### So, the landscape unit layer if fixed and there are the landcover raster which change. # 'listoflandcoverraster' wait for a list with the name (string) of landcover rasters. # 'landscape_polygons' wait for the name (string) of the vector layer containing the polygons to be used as landscape units. # 'uniqueid' wait for the name of the 'landscape_polygons' layer's columns containing unique ID for each landscape unit polygon. # 'returnlistpath' wait for a boolean value (True/False) according to the fact that a list containing the path to the configuration files is desired. # 'ncores' wait for a integer corresponding to the number of desired cores to be used for parallelization. # Import libraries for multiprocessing import multiprocessing from multiprocessing import Pool from functools import partial # Function that copy the landscape unit raster masks on a new layer with name corresponding to the current 'landcover_raster' def copy_landscapeunitmasks(current_landcover_raster,base_landcover_raster,landscape_polygons,landscapeunit_bbox,cat): ### Copy the landscape units mask for the current 'cat' # Define the name of the current "current_landscapeunit_rast" layer current_landscapeunit_rast=current_landcover_raster.split("@")[0]+"_"+landscape_polygons.split("@")[0]+"_"+str(cat) base_landscapeunit_rast=base_landcover_raster.split("@")[0]+"_"+landscape_polygons.split("@")[0]+"_"+str(cat) # Copy the the landscape unit created for the first landcover map in order to match the name of the current landcover map gscript.run_command('g.copy', overwrite=True, quiet=True, raster=(base_landscapeunit_rast,current_landscapeunit_rast)) # Add the line to the maskedoverlayarea variable maskedoverlayarea="MASKEDOVERLAYAREA "+current_landscapeunit_rast+"|"+landscapeunit_bbox[cat] return maskedoverlayarea # Function that create the r.li configuration file for the base landcover raster and then for all the binary rasters def create_rli_configfile(listoflandcoverraster,landscape_polygons,uniqueid='cat',returnlistpath=True,ncores=2): # Check if 'listoflandcoverraster' is not empty if len(listoflandcoverraster)==0: sys.exit("The list of landcover raster is empty and should contain at least one raster name") # Check if rasters provided in 'listoflandcoverraster' exists to avoid error in mutliprocessing for cur_rast in listoflandcoverraster: try: mpset=cur_rast.split("@")[1] except: mpset="" if cur_rast.split("@")[0] not in [x[0] for x in gscript.list_pairs(type='raster',mapset=mpset)]: sys.exit('Raster <%s> not found' %cur_rast) # Check if rasters provided in 'listoflandcoverraster' have the same extend and spatial resolution raster={} for x, rast in enumerate(raster_list): raster[x]=gscript.raster_info(rast) key_list=raster.keys() for x in key_list[1:]: for info in ('north','south','east','west','ewres','nsres'): if not raster[0][info]==raster[x][info]: sys.exit("Some raster provided in the list have different spatial resolution or extend, please check") # Get the version of GRASS GIS version=grass.version()['version'].split('.')[0] # Define the folder to save the r.li configuration files if sys.platform=="win32": rli_dir=os.path.join(os.environ['APPDATA'],"GRASS"+version,"r.li") else: rli_dir=os.path.join(os.environ['HOME'],".grass"+version,"r.li") if not os.path.exists(rli_dir): os.makedirs(rli_dir) ## Create an ordered list with the 'cat' value of landscape units to be processed. try: landscape_polygons_mapset=landscape_polygons.split("@")[1] except: landscape_polygons_mapset=list(gscript.parse_command('g.mapset', flags='p'))[0] dbpath="$GISDBASE/$LOCATION_NAME/%s/sqlite/sqlite.db"%landscape_polygons_mapset if uniqueid not in list(gscript.parse_command('db.columns', table=landscape_polygons.split("@")[0], database=dbpath)): sys.exit('Column <%s> not found in vector layer <%s>' %(uniqueid,landscape_polygons.split("@")[0])) else: list_cat=[int(x) for x in gscript.parse_command('v.db.select', quiet=True, map=landscape_polygons, column=uniqueid, flags='c')] list_cat.sort() # Declare a empty dictionnary which will contains the north, south, east, west values for each landscape unit landscapeunit_bbox={} # Declare a empty list which will contain the path of the configation files created listpath=[] # Declare a empty string variable which will contains the core part of the r.li configuration file maskedoverlayarea="" # Duplicate 'listoflandcoverraster' in a new variable called 'tmp_list' tmp_list=list(listoflandcoverraster) # Set the current landcover raster as the first of the list base_landcover_raster=tmp_list.pop(0) #The pop function return the first item of the list and delete it from the list at the same time # Loop trough the landscape units for cat in list_cat: # Extract the current landscape unit polygon as temporary vector tmp_vect="tmp_"+base_landcover_raster.split("@")[0]+"_"+landscape_polygons.split("@")[0]+"_"+str(cat) gscript.run_command('v.extract', overwrite=True, quiet=True, input=landscape_polygons, cats=cat, output=tmp_vect) # Set region to match the extent of the current landscape polygon, with resolution and alignement matching the landcover raster gscript.run_command('g.region', vector=tmp_vect, align=base_landcover_raster) # Rasterize the landscape unit polygon landscapeunit_rast=tmp_vect[4:] gscript.run_command('v.to.rast', overwrite=True, quiet=True, input=tmp_vect, output=landscapeunit_rast, use='cat', memory='3000') # Remove temporary vector gscript.run_command('g.remove', quiet=True, flags="f", type='vector', name=tmp_vect) # Set the region to match the raster landscape unit extent and save the region info in a dictionary region_info=gscript.parse_command('g.region', raster=landscapeunit_rast, flags='g') n=str(round(float(region_info['n']),5)) #the config file need 5 decimal for north and south s=str(round(float(region_info['s']),5)) e=str(round(float(region_info['e']),6)) #the config file need 6 decimal for east and west w=str(round(float(region_info['w']),6)) # Save the coordinates of the bbox in the dictionary (n,s,e,w) landscapeunit_bbox[cat]=n+"|"+s+"|"+e+"|"+w # Add the line to the maskedoverlayarea variable maskedoverlayarea+="MASKEDOVERLAYAREA "+landscapeunit_rast+"|"+landscapeunit_bbox[cat]+"\n" # Compile the content of the r.li configuration file config_file_content="SAMPLINGFRAME 0|0|1|1\n" config_file_content+=maskedoverlayarea config_file_content+="RASTERMAP "+base_landcover_raster+"\n" config_file_content+="VECTORMAP "+landscape_polygons+"\n" # Create a new file and save the content configfilename=base_landcover_raster.split("@")[0]+"_"+landscape_polygons.split("@")[0] path=os.path.join(rli_dir,configfilename) listpath.append(path) f=open(path, 'w') f.write(config_file_content) f.close() # Continue creation of r.li configuration file and landscape unit raster the rest of the landcover raster provided while len(tmp_list)>0: # Reinitialize 'maskedoverlayarea' variable as an empty string maskedoverlayarea="" # Set the current landcover raster as the first of the list current_landcover_raster=tmp_list.pop(0) #The pop function return the first item of the list and delete it from the list at the same time # Copy all the landscape units masks for the current landcover raster p=Pool(ncores) #Create a pool of processes and launch them using 'map' function func=partial(copy_landscapeunitmasks,current_landcover_raster,base_landcover_raster,landscape_polygons,landscapeunit_bbox) # Set fixed argument of the function maskedoverlayarea=p.map(func,list_cat) # Launch the processes for as many items in the list and get the ordered results using map function p.close() p.join() # Compile the content of the r.li configuration file config_file_content="SAMPLINGFRAME 0|0|1|1\n" config_file_content+="\n".join(maskedoverlayarea)+"\n" config_file_content+="RASTERMAP "+current_landcover_raster+"\n" config_file_content+="VECTORMAP "+landscape_polygons+"\n" # Create a new file and save the content configfilename=current_landcover_raster.split("@")[0]+"_"+landscape_polygons.split("@")[0] path=os.path.join(rli_dir,configfilename) listpath.append(path) f=open(path, 'w') f.write(config_file_content) f.close() # Return a list of path of configuration files creates if option actived if returnlistpath: return listpath ``` ### Function for creation of binary raster from a categorical raster (multiprocessed) ``` ###### Function creating a binary raster for each category of a base raster. ### The function run within the current region. If a category do not exists in the current region, no binary map will be produce # 'categorical_raster' wait for the name of the base raster to be used. It is the one from which one binary raster will be produced for each category value # 'prefix' wait for a string corresponding to the prefix of the name of the binary raster which will be produced # 'setnull' wait for a boolean value (True, False) according to the fact that the output binary should be 1/0 or 1/null # 'returnlistraster' wait for a boolean value (True, False) regarding to the fact that a list containing the name of binary raster is desired as return of the function # 'category_list' wait for a list of interger corresponding to specific category of the base raster to be used # 'ncores' wait for a integer corresponding to the number of desired cores to be used for parallelization # Import libraries for multiprocessing import multiprocessing from multiprocessing import Pool from functools import partial def create_binary_raster(categorical_raster,prefix="binary",setnull=False,returnlistraster=True,category_list=None,ncores=2): # Check if raster exists to avoid error in mutliprocessing try: mpset=categorical_raster.split("@")[1] except: mpset="" if categorical_raster not in gscript.list_strings(type='raster',mapset=mpset): sys.exit('Raster <%s> not found' %categorical_raster) # Check for number of cores doesnt exceed available nbcpu=multiprocessing.cpu_count() if ncores>=nbcpu: ncores=nbcpu-1 returnlist=[] #Declare empty list for return #gscript.run_command('g.region', raster=categorical_raster, quiet=True) #Set the region null='null()' if setnull else '0' #Set the value for r.mapcalc minclass=1 if setnull else 2 #Set the value to check if the binary raster is empty if category_list == None: #If no category_list provided category_list=[cl for cl in gscript.parse_command('r.category',map=categorical_raster,quiet=True)] for i,x in enumerate(category_list): #Make sure the format is UTF8 and not Unicode category_list[i]=x.encode('UTF8') category_list.sort(key=float) #Sort the raster categories in ascending. p=Pool(ncores) #Create a pool of processes and launch them using 'map' function func=partial(get_binary,categorical_raster,prefix,null,minclass) # Set the two fixed argument of the function returnlist=p.map(func,category_list) # Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function p.close() p.join() if returnlistraster: return returnlist #### Function that extract binary raster for a specified class (called in 'create_binary_raster' function) def get_binary(categorical_raster,prefix,null,minclass,cl): binary_class=prefix+"_"+cl gscript.run_command('r.mapcalc', expression=binary_class+'=if('+categorical_raster+'=='+str(cl)+',1,'+null+')',overwrite=True, quiet=True) if len(gscript.parse_command('r.category',map=binary_class,quiet=True))>=minclass: #Check if created binary is not empty return binary_class else: gscript.run_command('g.remove', quiet=True, flags="f", type='raster', name=binary_class) ``` ### Function for computation of spatial metrics at landscape level (multiprocessed) ``` ##### Function that compute different landscape metrics (spatial metrics) at landscape level. ### The metric computed are "dominance","pielou","renyi","richness","shannon","simpson". ### It is important to set the computation region before runing this script so that it match the extent of the 'raster' layer. # 'configfile' wait for the path (string) to the configuration file corresponding to the 'raster' layer. # 'raster' wait for the name (string) of the landcover map on which landscape metrics will be computed. # 'returnlistresult' wait for a boolean value (True/False) according to the fact that a list containing the path to the result files is desired. # 'ncores' wait for a integer corresponding to the number of desired cores to be used for parallelization. # Import libraries for multiprocessing import multiprocessing from multiprocessing import Pool from functools import partial def compute_landscapelevel_metrics(configfile, raster, spatial_metric): filename=raster.split("@")[0]+"_%s" %spatial_metric outputfile=os.path.join(os.path.split(configfile)[0],"output",filename) if spatial_metric=='renyi': # The alpha parameter was set to 2 as in https://en.wikipedia.org/wiki/R%C3%A9nyi_entropy gscript.run_command('r.li.%s' %spatial_metric, overwrite=True, input=raster,config=configfile,alpha='2', output=filename) else: gscript.run_command('r.li.%s' %spatial_metric, overwrite=True, input=raster,config=configfile, output=filename) return outputfile def get_landscapelevel_metrics(configfile, raster, returnlistresult=True, ncores=2): # Check if raster exists to avoid error in mutliprocessing try: mpset=raster.split("@")[1] except: mpset="" if raster not in gscript.list_strings(type='raster',mapset=mpset): sys.exit('Raster <%s> not found' %raster) # Check if configfile exists to avoid error in mutliprocessing if not os.path.exists(configfile): sys.exit('Configuration file <%s> not found' %configfile) # List of metrics to be computed spatial_metric_list=["dominance","pielou","renyi","richness","shannon","simpson"] # Check for number of cores doesnt exceed available nbcpu=multiprocessing.cpu_count() if ncores>=nbcpu: ncores=nbcpu-1 if ncores>len(spatial_metric_list): ncores=len(spatial_metric_list) #Adapt number of cores to number of metrics to compute #Declare empty list for return returnlist=[] # Create a new pool p=Pool(ncores) # Set the two fixed argument of the 'compute_landscapelevel_metrics' function func=partial(compute_landscapelevel_metrics,configfile, raster) # Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function returnlist=p.map(func,spatial_metric_list) p.close() p.join() # Return list of paths to result files if returnlistresult: return returnlist ``` ### Function for computation of spatial metrics at class level (multiprocessed) ``` ##### Function that compute different landscape metrics (spatial metrics) at class level. ### The metric computed are "patch number (patchnum)","patch density (patchdensity)","mean patch size(mps)", ### "coefficient of variation of patch area (padcv)","range of patch area size (padrange)", ### "standard deviation of patch area (padsd)", "shape index (shape)", "edge density (edgedensity)". ### It is important to set the computation region before runing this script so that it match the extent of the 'raster' layer. # 'configfile' wait for the path (string) to the configuration file corresponding to the 'raster' layer. # 'raster' wait for the name (string) of the landcover map on which landscape metrics will be computed. # 'returnlistresult' wait for a boolean value (True/False) according to the fact that a list containing the path to the result files is desired. # 'ncores' wait for a integer corresponding to the number of desired cores to be used for parallelization. # Import libraries for multiprocessing import multiprocessing from multiprocessing import Pool from functools import partial def compute_classlevel_metrics(configfile, raster, spatial_metric): filename=raster.split("@")[0]+"_%s" %spatial_metric gscript.run_command('r.li.%s' %spatial_metric, overwrite=True, input=raster,config=configfile,output=filename) outputfile=os.path.join(os.path.split(configfile)[0],"output",filename) return outputfile def get_classlevel_metrics(configfile, raster, returnlistresult=True, ncores=2): # Check if raster exists to avoid error in mutliprocessing try: mpset=raster.split("@")[1] except: mpset="" if raster not in [x.split("@")[0] for x in gscript.list_strings(type='raster',mapset=mpset)]: sys.exit('Raster <%s> not found' %raster) # Check if configfile exists to avoid error in mutliprocessing if not os.path.exists(configfile): sys.exit('Configuration file <%s> not found' %configfile) # List of metrics to be computed spatial_metric_list=["patchnum","patchdensity","mps","padcv","padrange","padsd","shape","edgedensity"] # Check for number of cores doesnt exceed available nbcpu=multiprocessing.cpu_count() if ncores>=nbcpu: ncores=nbcpu-1 if ncores>len(spatial_metric_list): ncores=len(spatial_metric_list) #Adapt number of cores to number of metrics to compute # Declare empty list for return returnlist=[] # Create a new pool p=Pool(ncores) # Set the two fixed argument of the 'compute_classlevel_metrics' function func=partial(compute_classlevel_metrics,configfile, raster) # Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function returnlist=p.map(func,spatial_metric_list) p.close() p.join() # Return list of paths to result files if returnlistresult: return returnlist ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** <center> <font size=5> <h1>User inputs</h1> </font> </center> ``` ## Define a empty dictionnary for saving user inputs user={} ## Enter the path to GRASSDATA folder user["gisdb"] = "/home/tais/Documents/GRASSDATA_Spie2017subset_Ouaga" ## Enter the name of the location (existing or for a new one) user["location"] = "SPIE_subset" ## Enter the EPSG code for this location user["locationepsg"] = "32630" ## Enter the name of the mapset to use for segmentation user["mapsetname"] = "test_rli" ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** # Compute spatial metrics for deriving land use in street blocs **Launch GRASS GIS working session** ``` ## Set the name of the mapset in which to work mapsetname=user["mapsetname"] ## Launch GRASS GIS working session in the mapset if os.path.exists(os.path.join(user["gisdb"],user["location"],mapsetname)): gsetup.init(os.environ['GISBASE'], user["gisdb"], user["location"], mapsetname) print "You are now working in mapset '"+mapsetname+"'" else: print "'"+mapsetname+"' mapset doesn't exists in "+user["gisdb"] ``` **Set the path to the r.li folder for configuration file and for results** ``` os.environ # Define path of the outputfile (in r.li folder) version=grass.version()['version'].split('.')[0] # Get the version of GRASS GIS if sys.platform=="win32": rli_config_dir=os.path.join(os.environ['APPDATA'],"GRASS"+version,"r.li") rli_output_dir=os.path.join(os.environ['APPDATA'],"GRASS"+version,"r.li","output") else: rli_config_dir=os.path.join(os.environ['HOME'],"GRASS"+version,"r.li") rli_output_dir=os.path.join(os.environ['HOME'],".grass"+version,"r.li","output") if not os.path.exists(rli_config_dir): os.makedirs(rli_config_dir) if not os.path.exists(rli_output_dir): os.makedirs(rli_output_dir) # Print print "GRASS GIS add-on's r.li configuration files will be saved under <%s>."%(rli_config_dir,) print "GRASS GIS add-on's r.li outputs will be saved under <%s>."%(rli_output_dir,) ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** ### Define the name of the base landcover map and landscape units polygons ``` # Set the name of the 'base' landcover map baselandcoverraster="classif@test_rli" # Set the name of the vector polygon layer containing the landscape units landscape_polygons="streetblocks@PERMANENT" ``` ### Import shapefile containing street blocks polygons ``` # Set the path to the shapefile containing streetblocks polygons pathtoshp="/media/tais/data/Dropbox/ULB/MAUPP/Landuse_mapping/Test_spatial_metrics_computation/Data/Subset_spatial_metrics.shp" # Import shapefile gscript.run_command('v.in.ogr', quiet=True, overwrite=True, input=pathtoshp, output=landscape_polygons) ``` ### Create binary rasters from the base landcover map ``` # Save time for computing processin time begintime=time.time() # Create as many binary raster layer as categorical values existing in the base landcover map gscript.run_command('g.region', raster=baselandcoverraster, quiet=True) #Set the region pref=baselandcoverraster.split("@")[0]+"_cl" #Set the prefix raster_list=[] # Initialize a empty list for results raster_list=create_binary_raster(baselandcoverraster, prefix=pref,setnull=True,returnlistraster=True, category_list=None,ncores=15) #Extract binary raster # Compute and print processing time print_processing_time(begintime,"Extraction of binary rasters achieved in ") # Insert the name of the base landcover map at first position in the list raster_list.insert(0,baselandcoverraster) # Display the raster to be used for landscape analysis raster_list ``` ## Create r.li configuration file for a list of landcover rasters ``` # Save time for computing processin time begintime=time.time() # Run creation of r.li configuration file and associated raster layers list_configfile=create_rli_configfile(raster_list,landscape_polygons,uniqueid='gid',returnlistpath=True,ncores=20) # Compute and print processing time print_processing_time(begintime,"Creation of r.li configuration files achieved in ") # Display the path to the configuration files created list_configfile ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** ## Compute spatial metrics at landscape level ``` # Initialize an empty list which will contains the resultfiles resultfiles=[] # Save time for computing processin time begintime=time.time() # Get the path to the configuration file for the base landcover raster currentconfigfile=list_configfile[0] # Get the name of the base landcover raster currentraster=raster_list[0] # Set the region to match the extent of the base raster gscript.run_command('g.region', raster=currentraster, quiet=True) # Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function resultfiles.append(get_landscapelevel_metrics(currentconfigfile, currentraster, returnlistresult=True, ncores=15)) # Compute and print processing time print_processing_time(begintime,"Computation of spatial metric achieved in ") resultfiles ``` ## Compute spatial metrics at class level ``` # Save time for computing processin time begintime=time.time() # Get a list with paths to the configuration file for class level metrics classlevelconfigfiles=list_configfile[1:] # Get a list with name of binary landcover raster for class level metrics classlevelrasters=raster_list[1:] for x,currentraster in enumerate(classlevelrasters[:]): # Get the path to the configuration file for the base landcover raster currentconfigfile=classlevelconfigfiles[x] # Set the region to match the extent of the base raster gscript.run_command('g.region', raster=currentraster, quiet=True) # Launch the processes for as many items in the 'functions_name' list and get the ordered results using map function resultfiles.append(get_classlevel_metrics(currentconfigfile, currentraster, returnlistresult=True, ncores=10)) # Compute and print processing time print_processing_time(begintime,"Computation of spatial metric achieved in ") resultfiles # Flat the 'resultfiles' list which contains several lists resultfiles=[item for sublist in resultfiles for item in sublist] resultfiles ``` # Compute some special metrics ``` # Set pixel value of 'buildings' on the 'baselandcoverraster' buildpixel=11 # Set the name of the new layer containing height of buildings buildings_height='buildings_height' # Set the name of the nDSM layer ndsm="ndsm" # Set the name of the NDVI layer ndvi="ndvi" # Set the name of the NDWI layer ndwi="ndwi" # Set the prefix of SAR textures layer SAR_prefix="SAR_w" ``` ### Create raster with nDSM value of 'buildings' pixels ``` # Save time for computing processin time begintime=time.time() # Create a raster layer with height of pixels classified as 'buildings' gscript.run_command('g.region', raster=baselandcoverraster, quiet=True) #Set the region formula="%s=if(%s==%s, %s, null())"%(buildings_height,baselandcoverraster,buildpixel,ndsm) gscript.mapcalc(formula, overwrite=True) # Compute and print processing time print_processing_time(begintime,"Creation of layer in ") ``` ### Mean and standard deviation of building's height, SAR textures, NDVI, NDWI ``` # Save time for computing processin time begintime=time.time() # Create a raster layer with height of pixels classified as 'buildings' gscript.run_command('g.region', raster=baselandcoverraster, quiet=True) #Set the region formula="%s=if(%s==%s, %s, null())"%(buildings_height,baselandcoverraster,buildpixel,ndsm) gscript.mapcalc(formula, overwrite=True) # Compute and print processing time print_processing_time(begintime,"Creation of layer in ") # Set up a list with name of raster layer to be used ancillarylayers=[] ancillarylayers.append(buildings_height) ancillarylayers.append(ndvi) ancillarylayers.append(ndwi) [ancillarylayers.append(x) for x in gscript.list_strings("rast", pattern=SAR_prefix, flag='r')] #Append SAR textures print "Layer to be used :\n\n"+'\n'.join(ancillarylayers) # Set the path to the file for i.segment.stats results isegmentstatsfile=os.path.join(rli_output_dir,"ancillary_info") # Save time for computing processin time begintime=time.time() ###### Compute shape metrics as well as mean and stddev of ancillary layers for each landscape unit ## Set number of cores to be used ncores=len(ancillarylayers) nbcpu=multiprocessing.cpu_count() if ncores>=nbcpu: ncores=nbcpu-1 if ncores>len(ancillarylayers): ncores=len(ancillarylayers) #Adapt number of cores to number of metrics to compute # Run i.segment.stats gscript.run_command('g.region', raster=baselandcoverraster, quiet=True) #Set the region raster_landscapeunits="temp_%s"%landscape_polygons.split("@")[0] gscript.run_command('v.to.rast', overwrite=True, input=landscape_polygons, output=raster_landscapeunits, use='cat') gscript.run_command('i.segment.stats', overwrite=True, map=raster_landscapeunits, raster_statistics='stddev,median', area_measures='area,perimeter,compact_circle,compact_square,fd', rasters=','.join(ancillarylayers), csvfile=isegmentstatsfile, processes=ncores) # Compute and print processing time print_processing_time(begintime,"Metrics computed in ") resultfiles.insert(0,isegmentstatsfile) resultfiles ``` # Combine all .csv files together ``` ## Function which execute a left join using individual .csv files. ## This ddddddddddddd # The argument "indir" wait for a string containing the path to the directory where the individual .csv files are stored. # The argument "outfile" wait for a string containing the path to the output file to create. # The argument "overwrite" wait for True/False value allow or not to overwrite existing outfile. # The argument "pattern" wait for a string containing the pattern of filename to use. Use wildcards is possible (*.csv for all .csv files) import os,sys import glob def leftjoin_csv(fileList,outfile,separator=",",overwrite=False,pattern=None): # Stop execution if outputfile exitst and can not be overwriten if os.path.isfile(outfile) and overwrite==False: print "File '"+str(outfile)+"' aleady exists and overwrite option is not enabled." else: if os.path.isfile(outfile) and overwrite==True: # If outputfile exitst and can be overwriten os.remove(outfile) print "File '"+str(outfile)+"' has been overwrited." if len(fileList)<=1: #Check if there are at least 2 files in the list sys.exit("This function require at least two .csv files to be jointed together.") # Save all the value in a dictionnary with key corresponding to the first column outputdict={} header=[] header.append("id") #set name for first column # Loop through all files: for filenum,f in enumerate([open(f) for f in fileList]): for linenum,line in enumerate(f): firstcolumn=line.split(separator)[0] othercolumns=line.split("\n")[0].split(separator)[1:] if linenum==0: #If first line if firstcolumn.split(" ")[0]=="RESULT": #If file comes from r.li.* add-ons header.append(os.path.split(fileList[filenum])[-1].split(".")[0]) else: [header.append(x) for x in othercolumns] #If file comes from i.segment.stats else: try: cat_id=firstcolumn.split(" ")[1] except: cat_id=firstcolumn try: [outputdict[cat_id].append(x) for x in othercolumns] except: outputdict[cat_id]=othercolumns # Write the dictionnary with header in a the output csv file outputcsv=open(outfile,"w") outputcsv.write(separator.join(header)) outputcsv.write("\n") for key in outputdict.keys(): outputcsv.write(key+separator) outputcsv.write(separator.join(outputdict[key])) outputcsv.write("\n") outputcsv.close() # Create a .csvt file with type of each column csvt=open(outfile+"t","w") results=open(outfile,"r") header=results.next() typecolumn=[] typecolumn.append("Integer") for columns in header[1:]: typecolumn.append("Real") csvt.write(separator.join(typecolumn)) csvt.close() outputcsv.close() # Print what happend print str(len(fileList))+" individual .csv files were joint together." # Join all result files together in a new .csv file outfile=os.path.join(rli_output_dir,"land_use_metrics.csv") leftjoin_csv(resultfiles, outfile, separator="|", overwrite=True) ``` # Importing the NDVI layer ``` break ## Saving current time for processing time management begintime_ndvi=time.time() ## Import nDSM imagery print ("Importing NDVI raster imagery at " + time.ctime()) gscript.run_command('r.import', input="/media/tais/data/MAUPP/WorldView3_Ouagadougou/Orthorectified/mosaique_georef/NDVI/ndvi_georef_ordre2.TIF", output="ndvi", overwrite=True) # Mask null/nodata values gscript.run_command('r.null', map="ndvi") print_processing_time(begintime_ndvi, "imagery has been imported in ") ``` # Importing the nDSM layer ``` break ## Saving current time for processing time management begintime_ndsm=time.time() ## Import nDSM imagery print ("Importing nDSM raster imagery at " + time.ctime()) grass.run_command('r.import', input="/media/tais/data/MAUPP/WorldView3_Ouagadougou/Orthorectified/mosaique_georef/nDSM/nDSM_mosaik_georef_ordre2.tif", output="ndsm", overwrite=True) ## Define null value for specific value in nDSM raster. Adapt the value to your own data. # If there is no null value in your data, comment the next line grass.run_command('r.null', map="ndsm", setnull="-999") # Make histogram equalisation on grey color. grass.run_command('r.colors', flags='e', map='ndsm', color='grey') print_processing_time(begintime_ndsm, "nDSM imagery has been imported in ") ``` ### Masking the nDSM artifacts ``` break # Import vector with nDSM artifacts zones grass.run_command('v.in.ogr', overwrite=True, input="/media/tais/data/MAUPP/WorldView3_Ouagadougou/Masque_artifacts_nDSM/Ouaga_mask_artifacts_nDSM.shp", output="mask_artifacts_ndsm") ## Set computational region to match the default region grass.run_command('g.region', overwrite=True, raster="ndsm") # Rasterize the vector layer, with value "0" on the artifacts zones grass.run_command('v.to.rast', input='mask_artifacts_ndsm', output='mask_artifacts_ndsm', use='val', value='0', memory='5000') ## Set computational region to match the default region grass.run_command('g.region', overwrite=True, raster="ndsm") ## Create a new nDSM with artifacts filled with '0' value formula='tmp_artifact=nmin(ndsm,mask_artifacts_ndsm)' grass.mapcalc(formula, overwrite=True) ## Remove the artifact mask grass.run_command('g.remove', flags='f', type='raster', name="mask_artifacts_ndsm") ## Rename the new nDSM grass.run_command('g.rename', raster='tmp_artifact,ndsm', overwrite=True) ## Remove the intermediate nDSM layer grass.run_command('g.remove', flags='f', type='raster', name="tmp_artifact") ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** # Define input raster for computing statistics of segments ``` ## Display the name of rasters available in PERMANENT and CLASSIFICATION mapset print grass.read_command('g.list',type="raster", mapset="PERMANENT", flags='rp') print grass.read_command('g.list',type="raster", mapset=user["classificationA_mapsetname"], flags='rp') ## Define the list of raster layers for which statistics will be computed inputstats=[] inputstats.append("opt_blue") inputstats.append("opt_green") inputstats.append("opt_red") inputstats.append("opt_nir") inputstats.append("ndsm") inputstats.append("ndvi") print "Layer to be used to compute raster statistics of segments:\n"+'\n'.join(inputstats) ## Define the list of raster statistics to be computed for each raster layer rasterstats=[] rasterstats.append("min") rasterstats.append("max") rasterstats.append("range") rasterstats.append("mean") rasterstats.append("stddev") #rasterstats.append("coeff_var") # Seems that this statistic create null values rasterstats.append("median") rasterstats.append("first_quart") rasterstats.append("third_quart") rasterstats.append("perc_90") print "Raster statistics to be computed:\n"+'\n'.join(rasterstats) ## Define the list of area measures (segment's shape statistics) to be computed areameasures=[] areameasures.append("area") areameasures.append("perimeter") areameasures.append("compact_circle") areameasures.append("compact_square") areameasures.append("fd") print "Area measures to be computed:\n"+'\n'.join(areameasures) ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** <center> <font size=5> <h1>Compute objects' statistics</h1> </font> </center> ``` ## Saving current time for processing time management begintime_computeobjstat=time.time() ``` ## Define the folder where to save the results and create it if necessary In the next cell, please adapt the path to the directory where you want to save the .csv output of i.segment.uspo. ``` ## Folder in which save processing time output outputfolder="/media/tais/My_Book_1/MAUPP/Traitement/Ouagadougou/Segmentation_fullAOI_localapproach/Results/CLASSIF/stats_optical" ## Create the folder if does not exists if not os.path.exists(outputfolder): os.makedirs(outputfolder) print "Folder '"+outputfolder+"' created" ``` ### Copy data from other mapset to the current mapset Some data need to be copied from other mapsets into the current mapset. ### Remove current mask ``` ## Check if there is a raster layer named "MASK" if not grass.list_strings("rast", pattern="MASK", mapset=mapsetname, flag='r'): print 'There is currently no MASK' else: ## Remove the current MASK layer grass.run_command('r.mask',flags='r') print 'The current MASK has been removed' ``` ***Copy segmentation raster*** ``` ## Copy segmentation raster layer from SEGMENTATION mapset to current mapset grass.run_command('g.copy', overwrite=True, raster="segmentation_raster@"+user["segmentation_mapsetname"]+",segments") ``` ***Copy morphological zone (raster)*** ``` ## Copy segmentation raster layer from SEGMENTATION mapset to current mapset grass.run_command('g.copy', overwrite=True, raster="zone_morpho@"+user["segmentation_mapsetname"]+",zone_morpho") ``` ***Copy morphological zone (vector)*** ``` ## Copy segmentation raster layer from SEGMENTATION mapset to current mapset grass.run_command('g.copy', overwrite=True, vector="zone_morpho@"+user["segmentation_mapsetname"]+",zone_morpho") ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-** # Compute statistics of segments (Full AOI extend) ### Compute statistics of segment using i.segment.stats The process is make to compute statistics iteratively for each morphological zones, used here as tiles. This section uses the ['i.segment.stats' add-on](https://grass.osgeo.org/grass70/manuals/addons/i.segment.stats.html) to compute statistics for each object. ``` ## Save name of the layer to be used as tiles tile_layer='zone_morpho'+'@'+mapsetname ## Save name of the segmentation layer to be used by i.segment.stats segment_layer='segments'+'@'+mapsetname ## Save name of the column containing area_km value area_column='area_km2' ## Save name of the column containing morphological type value type_column='type' ## Save the prefix to be used for the outputfiles of i.segment.stats prefix="Segstat" ## Save the list of polygons to be processed (save the 'cat' value) listofregion=list(grass.parse_command('v.db.select', map=tile_layer, columns='cat', flags='c'))[:] for count, cat in enumerate(listofregion): print str(count)+" cat:"+str(cat) ``` ``` ## Initialize a empty string for saving print outputs txtcontent="" ## Running i.segment.stats messagetoprint="Start computing statistics for segments to be classified, using i.segment.stats on "+time.ctime()+"\n" print (messagetoprint) txtcontent+=messagetoprint+"\n" begintime_isegmentstats=time.time() ## Compute total area to be processed for process progression information processed_area=0 nbrtile=len(listofregion) attributes=grass.parse_command('db.univar', flags='g', table=tile_layer.split("@")[0], column=area_column, driver='sqlite') total_area=float(attributes['sum']) messagetoprint=str(nbrtile)+" region(s) will be processed, covering an area of "+str(round(total_area,3))+" Sqkm."+"\n\n" print (messagetoprint) txtcontent+=messagetoprint ## Save time before looping begintime_isegmentstats=time.time() ## Start loop on morphological zones count=1 for cat in listofregion[:]: ## Save current time at loop' start. begintime_current_id=time.time() ## Create a computional region for the current polygon condition="cat="+cat outputname="tmp_"+cat grass.run_command('v.extract', overwrite=True, quiet=True, input=tile_layer, type='area', where=condition, output=outputname) grass.run_command('g.region', overwrite=True, vector=outputname, align=segment_layer) grass.run_command('r.mask', overwrite=True, raster=tile_layer, maskcats=cat) grass.run_command('g.remove', quiet=True, type="vector", name=outputname, flags="f") ## Save size of the current polygon and add it to the already processed area size=round(float(grass.read_command('v.db.select', map=tile_layer, columns=area_column, where=condition,flags="c")),2) ## Print messagetoprint="Computing segments's statistics for tile n°"+str(cat) messagetoprint+=" ("+str(count)+"/"+str(len(listofregion))+")" messagetoprint+=" corresponding to "+str(size)+" km2" print (messagetoprint) txtcontent+=messagetoprint+"\n" ## Define the csv output file name, according to the optimization function selected outputcsv=os.path.join(outputfolder,prefix+"_"+str(cat)+".csv") ## Compute statistics of objets using i.segment.stats only with .csv output (no vectormap output). grass.run_command('i.segment.stats', overwrite=True, map=segment_layer, rasters=','.join(inputstats), raster_statistics=','.join(rasterstats), area_measures=','.join(areameasures), csvfile=outputcsv, processes='20') ## Add the size of the zone to the already processed area processed_area+=size ## Print messagetoprint=print_processing_time(begintime_current_id, "i.segment.stats finishes to process th current tile in ") print (messagetoprint) txtcontent+=messagetoprint+"\n" remainingtile=nbrtile-count if remainingtile>0: messagetoprint=str(round((processed_area/total_area)*100,2))+" percent of the total area processed. " messagetoprint+="Still "+str(remainingtile)+" zone(s) to process."+"\n" print (messagetoprint) txtcontent+=messagetoprint+"\n" else: messagetoprint="\n" print (messagetoprint) txtcontent+=messagetoprint ## Adapt the count count+=1 ## Remove current mask grass.run_command('r.mask', flags='r') ## Compute processing time and print it messagetoprint=print_processing_time(begintime_isegmentstats, "Statitics computed in ") print (messagetoprint) txtcontent+=messagetoprint #### Write text file with log of processing time ## Create the .txt file for processing time output and begin to write f = open(os.path.join(outputfolder,mapsetname+"_processingtime_isegmentstats.txt"), 'w') f.write(mapsetname+" processing time information for i.segment.stats"+"\n\n") f.write(txtcontent) f.close() ## print print_processing_time(begintime_computeobjstat,"Object statistics computed in ") ``` ## Concatenate individuals .csv files and replace unwanted values BE CAREFUL! Before runing the following cells, please check your data to be sure that it makes sens to replace the 'nan', 'null', or 'inf' values with "0" ``` ## Define the outputfile for .csv containing statistics for all segments outputfile=os.path.join(outputfolder,"all_segments_stats.csv") print outputfile # Create a dictionary with 'key' to be replaced by 'values' findreplacedict={} findreplacedict['nan']="0" findreplacedict['null']="0" findreplacedict['inf']="0" # Define pattern of file to concatenate pat=prefix+"_*.csv" sep="|" ## Initialize a empty string for saving print outputs txtcontent="" ## Saving current time for processing time management begintime_concat=time.time() ## Print messagetoprint="Start concatenate individual .csv files and replacing unwanted values." print (messagetoprint) txtcontent+=messagetoprint+"\n" # Concatenate and replace unwanted values messagetoprint=concat_findreplace(outputfolder,pat,sep,findreplacedict,outputfile) print (messagetoprint) txtcontent+=messagetoprint+"\n" ## Compute processing time and print it messagetoprint=print_processing_time(begintime_concat, "Process achieved in ") print (messagetoprint) txtcontent+=messagetoprint+"\n" #### Write text file with log of processing time ## Create the .txt file for processing time output and begin to write filepath=os.path.join(outputfolder,mapsetname+"_processingtime_concatreplace.txt") f = open(filepath, 'w') f.write(mapsetname+" processing time information for concatenation of individual .csv files and replacing of unwanted values."+"\n\n") f.write(txtcontent) f.close() ``` # Create new database in postgresql ``` # User for postgresql connexion dbuser="tais" # Password of user dbpassword="tais" # Host of database host="localhost" # Name of the new database dbname="ouaga_fullaoi_localsegment" # Set name of schema for objects statistics stat_schema="statistics" # Set name of table with statistics of segments - FOR OPTICAL object_stats_table="object_stats_optical" break from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT # Connect to postgres database db=None db=pg.connect(dbname='postgres', user=dbuser, password=dbpassword, host=host) # Allow to create a new database db.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) # Execute the CREATE DATABASE query cur=db.cursor() #cur.execute('DROP DATABASE IF EXISTS ' + dbname) #Comment this to avoid deleting existing DB cur.execute('CREATE DATABASE ' + dbname) cur.close() db.close() ``` ### Create PostGIS Extension in the database ``` break # Connect to the database db=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host) # Open a cursor to perform database operations cur=db.cursor() # Execute the query cur.execute('CREATE EXTENSION IF NOT EXISTS postgis') # Make the changes to the database persistent db.commit() # Close connection with database cur.close() db.close() ``` <center> <font size=4> <h2>Import statistics of segments in a Postgresql database</h2> </font> </center> ## Create new schema in the postgresql database ``` schema=stat_schema from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT # Connect to postgres database db=None db=pg.connect(dbname=dbname, user='tais', password='tais', host='localhost') # Allow to create a new database db.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) # Execute the CREATE DATABASE query cur=db.cursor() #cur.execute('DROP SCHEMA IF EXISTS '+schema+' CASCADE') #Comment this to avoid deleting existing DB try: cur.execute('CREATE SCHEMA '+schema) except Exception as e: print ("Exception occured : "+str(e)) cur.close() db.close() ``` ## Create a new table ``` # Connect to an existing database db=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host) # Open a cursor to perform database operations cur=db.cursor() # Drop table if exists: cur.execute("DROP TABLE IF EXISTS "+schema+"."+object_stats_table) # Make the changes to the database persistent db.commit() import csv # Create a empty list for saving of column name column_name=[] # Create a reader for the first csv file in the stack of csv to be imported pathtofile=os.path.join(outputfolder, outputfile) readercsvSubset=open(pathtofile) readercsv=csv.reader(readercsvSubset, delimiter='|') headerline=readercsv.next() print "Create a new table '"+schema+"."+object_stats_table+"' with header corresponding to the first row of file '"+pathtofile+"'" ## Build a query for creation of a new table with auto-incremental key-value (thus avoiding potential duplicates of 'cat' value) # All column data-types are set to 'text' in order to be able to import some 'nan', 'inf' or 'null' values present in statistics files # This table will allow to import all individual csv files in a single Postgres table, which will be cleaned after query="CREATE TABLE "+schema+"."+object_stats_table+" (" query+="key_value serial PRIMARY KEY" query+=", "+str(headerline[0])+" text" column_name.append(str(headerline[0])) for column in headerline[1:]: if column[0] in ('1','2','3','4','5','6','7','8','9','0'): query+="," query+=" "+"W"+str(column)+" double precision" column_name.append("W"+str(column)) else: query+="," query+=" "+str(column)+" double precision" column_name.append(str(column)) query+=")" # Execute the CREATE TABLE query cur.execute(query) # Make the changes to the database persistent db.commit() # Close cursor and communication with the database cur.close() db.close() ``` ## Copy objects statistics from csv to Postgresql database ``` # Connect to an existing database db=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host) # Open a cursor to perform database operations cur=db.cursor() ## Initialize a empty string for saving print outputs txtcontent="" ## Saving current time for processing time management begintime_copy=time.time() ## Print messagetoprint="Start copy of segments' statistics in the postgresql table '"+schema+"."+object_stats_table+"'" print (messagetoprint) txtcontent+=messagetoprint+"\n" # Create query for copy data from csv, avoiding the header, and updating only the column which are in the csv (to allow auto-incremental key value to wokr) query="COPY "+schema+"."+object_stats_table+"("+', '.join(column_name)+") " query+=" FROM '"+str(pathtofile)+"' HEADER DELIMITER '|' CSV;" # Execute the COPY FROM CSV query cur.execute(query) # Make the changes to the database persistent db.commit() ## Compute processing time and print it messagetoprint=print_processing_time(begintime_copy, "Process achieved in ") print (messagetoprint) txtcontent+=messagetoprint+"\n" #### Write text file with log of processing time ## Create the .txt file for processing time output and begin to write filepath=os.path.join(outputfolder,mapsetname+"_processingtime_PostGimport.txt") f = open(filepath, 'w') f.write(mapsetname+" processing time information for importation of segments' statistics in the PostGreSQL Database."+"\n\n") f.write(txtcontent) f.close() # Close cursor and communication with the database cur.close() db.close() ``` # Drop duplicate values of CAT Here, we will find duplicates. Indeed, as statistics are computed for each tile (morphological area) and computational region aligned to the pixels raster, some objets could appear in two different tile resulting on duplicates on "CAT" column. We firs select the "CAT" of duplicated objets and then puting them in a list. Then, for each duplicated "CAT", we select the key-value (primary key) of the smallest object (area_min). The row corresponding to those key-values are then remoed using the "DELETE FROM" query. ``` # Connect to an existing database db=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host) # Open a cursor to perform database operations cur=db.cursor() ## Initialize a empty string for saving print outputs txtcontent="" ## Saving current time for processing time management begintime_removeduplic=time.time() ## Print messagetoprint="Start removing duplicates in the postgresql table '"+schema+"."+object_stats_table+"'" print (messagetoprint) txtcontent+=messagetoprint+"\n" # Find duplicated 'CAT' find_duplicated_cat() # Remove duplicated count_pass=1 count_removedduplic=0 while len(cattodrop)>0: messagetoprint="Removing duplicates - Pass "+str(count_pass) print (messagetoprint) txtcontent+=messagetoprint+"\n" find_duplicated_key() remove_duplicated_key() messagetoprint=str(len(keytodrop))+" duplicates removed." print (messagetoprint) txtcontent+=messagetoprint+"\n" count_removedduplic+=len(keytodrop) # Find again duplicated 'CAT' find_duplicated_cat() count_pass+=1 messagetoprint="A total of "+str(count_removedduplic)+" duplicates were removed." print (messagetoprint) txtcontent+=messagetoprint+"\n" ## Compute processing time and print it messagetoprint=print_processing_time(begintime_removeduplic, "Process achieved in ") print (messagetoprint) txtcontent+=messagetoprint+"\n" #### Write text file with log of processing time ## Create the .txt file for processing time output and begin to write filepath=os.path.join(outputfolder,mapsetname+"_processingtime_RemoveDuplic.txt") f = open(filepath, 'w') f.write(mapsetname+" processing time information for removing duplicated objects."+"\n\n") f.write(txtcontent) f.close() # Vacuum the current Postgresql database vacuum(db) ``` # Change the primary key from 'key_value' to 'cat' ``` # Connect to an existing database db=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host) # Open a cursor to perform database operations cur=db.cursor() # Build a query to drop the current constraint on primary key query="ALTER TABLE "+schema+"."+object_stats_table+" \ DROP CONSTRAINT "+object_stats_table+"_pkey" # Execute the query cur.execute(query) # Make the changes to the database persistent db.commit() # Build a query to change the datatype of 'cat' to 'integer' query="ALTER TABLE "+schema+"."+object_stats_table+" \ ALTER COLUMN cat TYPE integer USING cat::integer" # Execute the query cur.execute(query) # Make the changes to the database persistent db.commit() # Build a query to add primary key on 'cat' query="ALTER TABLE "+schema+"."+object_stats_table+" \ ADD PRIMARY KEY (cat)" # Execute the query cur.execute(query) # Make the changes to the database persistent db.commit() # Build a query to drop column 'key_value' query="ALTER TABLE "+schema+"."+object_stats_table+" \ DROP COLUMN key_value" # Execute the query cur.execute(query) # Make the changes to the database persistent db.commit() # Vacuum the current Postgresql database vacuum(db) # Close cursor and communication with the database cur.close() db.close() ``` ### Show first rows of statistics ``` # Connect to an existing database db=pg.connect(database=dbname, user=dbuser, password=dbpassword, host=host) # Number of line to show (please limit to 100 for saving computing time) nbrow=15 # Query query="SELECT * FROM "+schema+"."+object_stats_table+" \ ORDER BY cat \ ASC LIMIT "+str(nbrow) # Execute query through panda df=pd.read_sql(query, db) # Show dataframe df.head(15) ``` <left> <font size=4> <b> End of classification part </b> </font> </left> ``` print("The script ends at "+ time.ctime()) print_processing_time(begintime_segmentation_full, "Entire process has been achieved in ") ``` **-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-**
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import scipy.stats as scy inds = np.arange(0, 50, 0.001) capacity = 20 y = 1/ (1 + np.exp( -0.1 * (inds - capacity / 2)) ) y[inds < 5] = 0 plt.plot(inds, y, label='Juveniles') y1 = y y1[inds >= 5] = 0.01 plt.plot(inds, y1, '-.', label='Adults') plt.text(20, 0.03, '1 %', size=12, color='tab:orange') plt.legend() plt.ylabel('Emigration probability', size =12) plt.xlabel('Number of individuals in pond (adults and juveniles)', size=12) plt.title('Emigration') plt.savefig('images/emigration.svg'); hatchlings = np.arange(0,10) prob = scy.poisson(5).pmf(hatchlings) plt.plot(hatchlings, prob, 'bo') plt.vlines(hatchlings, 0, prob, colors='b', lw=5, alpha=0.5) plt.xlabel('Number of fertile female juveniles per female newt', size=12) plt.ylabel('Probability', size=12) plt.title('Offspring', size=14) plt.savefig('images/offspring.svg'); import matplotlib as mpl mpl.rcParams['axes.spines.right'] = False mpl.rcParams['axes.spines.top'] = False mpl.rcParams['xtick.labelsize'] = 14 mpl.rcParams['ytick.labelsize'] = 14 fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(10,10), sharex=True) ax[0].fill_between([0.3,0.7], 0, 1/(0.7-0.3), alpha=0.3) ax[1].fill_between([0.13,0.27], 0, 1/(0.27-0.13), alpha=0.3, color='tab:red') ax[0].plot([0.5, 0.5], [0,1/(0.7-0.3)]) ax[0].plot([0.3, 0.3], [0,1/(0.7-0.3)], '--', color='tab:blue') ax[0].plot([0.7, 0.7], [0,1/(0.7-0.3)], '--', color='tab:blue') ax[0].text(0.5, 1/(0.7-0.3)+0.5, 'mean-juvenile-\nmortality-prob', ha='center', va='center', size=14) ax[0].text(0.3, 1/(0.7-0.3)+0.5, r'60 % $\cdot$ mean', ha='center', va='center', size=14) ax[0].text(0.7, 1/(0.7-0.3)+0.5, r'140 % $\cdot$ mean', ha='center', va='center', size=14) ax[1].plot([0.2, 0.2], [0,1/(0.27-0.13)], color='red') ax[1].plot([0.13, 0.13], [0,1/(0.27-0.13)], '--', color='red') ax[1].plot([0.27, 0.27], [0,1/(0.27-0.13)], '--', color='red') ax[1].text(0.2, 1/(0.27-0.13)+0.5, 'mean-adult-\nmortality-prob', ha='center', va='center', size=14) ax[1].text(0.13, 1/(0.27-0.13)+1.5, r'65 % $\cdot$ mean', ha='center', va='center', size=14) ax[1].text(0.27, 1/(0.27-0.13)+1.5, r'135 % $\cdot$ mean', ha='center', va='center', size=14) ax[0].set_ylim(0,10) ax[1].set_ylim(0,10) ax[0].set_xlim(0,0.8) ax[0].set_ylabel('density, juveniles', size=16) ax[1].set_ylabel('density, adults', size=16) ax[1].set_xlabel('mortality probability', size=16) plt.tight_layout() plt.savefig('images/mortality_prob.svg'); ```
github_jupyter
``` %load_ext itikz import itikz from itikz import nicematrix as nM import jinja2 import numpy as np import sympy as sym import panel as pn pn.extension() ## Invoke itikz without using cell magic # itikz.build_commands? # itikz.fetch_or_compile_svg? ``` # 1. Examples from the Original Itikz Notebook ``` %%itikz --temp-dir --file-prefix implicit-demo- --template pic --use-xetex --scale 0.5 \draw[help lines] grid (5, 5); \draw[fill=magenta!10] (1, 1) rectangle (2, 2); \draw[fill=magenta!10] (2, 1) rectangle (3, 2); \draw[fill=magenta!10] (3, 1) rectangle (4, 2); \draw[fill=magenta!10] (3, 2) rectangle (4, 3); \draw[fill=magenta!10] (2, 3) rectangle (3, 4); %%itikz --temp-dir --template standalone --tex-packages=smartdiagram,amsfonts \smartdiagramset{uniform sequence color=true, sequence item border color=black, sequence item font size=\footnotesize, sequence item text color=white } \smartdiagram[sequence diagram]{ $\mathbb{N}$, $\mathbb{Z}$, $\mathbb{Q}$, $\mathbb{R}$, $\mathbb{I}$, $\mathbb{C}$ } src = r""" \documentclass[tikz]{standalone} \usepackage{tikz-cd} \usetikzlibrary{cd} \begin{document} \begin{tikzcd} T \arrow[drr, bend left, "x"] \arrow[ddr, bend right, "y"] \arrow[dr, dotted, "{(x,y)}" description] & & \\ & X \times_Z Y \arrow[r, "p"] \arrow[d, "q"] & X \arrow[d, "f"] \\ & Y \arrow[r, "g"] & Z \end{tikzcd} \end{document} """ itikz.fetch_or_compile_svg(src, prefix="cd_", working_dir="/tmp/itikz", full_err=False, debug=False) ``` # 2. Linear Algebra Examples ## 2.1 Row-echelon Form ### 2.2.1 Row Echelon Form Matrix: Numerical Example ``` %%itikz --temp-dir --template standalone --tex-packages=nicematrix,tikz,relsize,amsmath --tikz-libraries=decorations.pathreplacing % --nexec=4 --use-dvi --use-xetex \NiceMatrixOptions{code-for-last-row = \color{blue}, code-for-first-row = \color{red}} $\begin{pNiceArray}{*5r|r}[left-margin = 4pt, first-col, last-row, code-before = { % ----------------------------------------------------------------------- Row-echelon form Path \tikz \draw[red] (row-1-|col-1) -- (row-2-|col-1) -- (row-2-|col-2) -- (row-3-|col-2) -- (row-3-|col-4) -- (row-4-|col-4) -- (row-4-|col-7); } ] & \color{red}{\mathbf{1}} & 1 & 1 & 2 & 2 & \; 4 \\ & 0 & \color{red}{\mathbf{1}} & -1 & 1 & 0 & \; 1 \\ & 0 & 0 & 0 & \color{red}{\mathbf{1}} & -2 & \; 2 \\ & 0 & 0 & 0 & 0 & 0 & \; 0 \\ % ------------------------------------------------------------------------------------ Basic and Free Variables \color{blue}{\begin{matrix} \\ \text{basic}\\ \text{free} \end{matrix}} & \begin{matrix} x_1 \\ \end{matrix} & \begin{matrix} x_2 \\ \end{matrix} & \begin{matrix} \\ x_3=\alpha \end{matrix} & \begin{matrix} x_4 \\ \end{matrix} & \begin{matrix} \\ x_5=\beta \end{matrix} & \end{pNiceArray}$ ``` ### 2.1.2 Stack of Matrices ``` mat_rep = r''' && A & b \\ \noalign{\vskip1.5mm} E_1 && E_1 A & E_1 b \\ \noalign{\vskip1.5mm} E_2 && E_2 E_1 A & E_2 E_1 b \\ \noalign{\vskip1.5mm} \dots && \dots & \dots \\ \noalign{\vskip1.5mm} E_k && E_k \dots E_2 E_1 A & E_k \dots E_2 E_1 b ''' submatrix_locs=[ ['A1','{1-3}{1-4}'],['A2','{2-3}{2-4}'],['A3','{3-3}{3-4}'],['A4','{5-3}{5-4}'], ['A5','{2-1}{2-1}'],['A6','{3-1}{3-1}'],['A7','{5-1}{5-1}'] ] pivot_locs=[] txt_with_locs=[] mat_format='{ccrIr}' itikz.fetch_or_compile_svg( jinja2.Template( nM.GE_TEMPLATE ).render( preamble=nM.preamble, extension=nM.extension,fig_scale=None, array_names=None,row_echelonPaths=[], mat_rep=mat_rep, mat_format=mat_format, submatrix_locs=submatrix_locs, submatrix_names=pivot_locs, txt_with_locs=txt_with_locs, row_echelon_paths=[]), prefix='test_', working_dir='/tmp/itikz', debug=False, **itikz.build_commands_dict(use_xetex=True,use_dvi=False,crop=True), nexec=4, keep_file="/tmp/itikz/foo" ) ``` ## 2.2 Systeme ``` %%itikz --temp-dir --file-prefix foo_ --template article --crop --tex-packages=systeme,amsmath,xcolor %--use-dvi --use-xetex % ----------------------------------------------------- \begin{flalign*} (\xi) & \Leftrightarrow \sysalign{r,r}\systeme[xyzw]{ \boxed{2 x} + 4 y + 8 z + 6 w = 8 @\color{red}{R_1}, 2 x + 5 y + 11 z + 7 w = 10 @\color{red}{R_2}, -4 x -9 y -20 z -12 w = -17 @\color{red}{R_3}, 4 x + 8 y + 13 z + 18 w = 22 @\color{red}{R_4} } \\ % \sysautonum{\quad \color{red}{R_{*}\; \leftarrow \;}} & \Leftrightarrow \sysautonum{\quad \color{red}{R_{*}\; \leftarrow \;}} \sysalign{r,r}\systeme[xyzw]{ \boxed{2 x} + 4 y + 8 z + 6 w = 8 @\color{red}{\ \;\;1 R_1 + 0 R_2 + 0 R_3 + 0 R_4}, \boxed{y} + 3 z + w = 2 @\color{red}{ -1 R_1 + 1 R_2 + 0 R_3 + 0 R_4}, - y - 4 z = -1 @\color{red}{\ \;\;2 R_1 + 0 R_2 + 1 R_3 + 0 R_4}, - 3 z + 6 w = 6 @\color{red}{ -2 R_1 + 0 R_2 + 0 R_3 + 1 R_4} } \end{flalign*} ``` ## 2.3 Programmatic Calls: GE Layout with PySym and Jinja2 ``` k = sym.Symbol('k'); h = sym.Symbol('h') Ab = sym.Matrix([[1,2,4,1],[2,k,8,h],[3,7,3,1]]); matrices = [[None, Ab]]; pivots = []; txt=[] # we could use row ops, but we want a computational layout: # A=A.elementary_row_op('n->n+km', k=-3, row1=2,row2=0 );A # A=A.elementary_row_op('n<->m',row1=1,row2=2);A E1=sym.eye(3);E1[1:,0]=[-2,-3]; A1=E1*Ab; matrices.append([E1,A1]); pivots.append((1,1));txt.append('Pivot at (1,1)') E2=sym.eye(3);E2=E2.elementary_row_op('n<->m',row1=1,row2=2); A2=E2*A1; matrices.append([E2,A2]); pivots.append(None); txt.append('Rows 2 <-> 3') E3=sym.eye(3);E3[2,1]=4-k; A3=E3*A2; matrices.append([E3,A3]); pivots.append((2,2));txt.append('Pivot at (2,2)') pivots.append((3,3)); txt.append('In Row Echelon Form') h,m = nM.ge( matrices, Nrhs=[1], formater = sym.latex, pivot_list=[ [(0,1), [(0,0)] ], [(1,1), [(0,0),(1,1)]], [(2,1), [(0,0),(1,1)]], [(3,1), [(0,0),(1,1),(2,2)]] ], ref_path_list = [ [0,1, [(0,0) ],'vv','cyan'], [1,1, [(0,0),(1,1) ],'hv','cyan'], [2,1, [(0,0),(1,1) ],'vh','cyan'], [3,1, [(0,0),(1,1),(2,2)],'hh'] ], comment_list = ["pivot in (1,1)", r"possible pivot in (2,2) \\ \qquad provided $k \ne 4$", r"pivot in(2,2)\\ \qquad after row exchange","pivot in (3,3)"], # <===== ???? Where are they? variable_summary = [True,True,True], array_names = ['E', ['A', 'b']], tmp_dir="tmp", keep_file="tmp/m3" ) h ``` ## 2.4 Back-Substitution: Row Echelon Form, Back-substitution, Standard Form ``` %%itikz --temp-dir --file-prefix test_ --template article --tex-packages=amssymb,cascade,systeme,nicematrix,tikz,relsize --crop --tikz-libraries=decorations.pathreplacing % ==================================================================================== Decorate matrix \NiceMatrixOptions{code-for-last-row = \color{blue}, code-for-first-row = \color{red}} $\begin{pNiceArray}{*5r|r}[left-margin = 4pt, first-col, last-row, code-before = { % ----------------------------------------------------------------------- Row-echelon form Path \tikz \draw[red] (row-1-|col-1) -- (row-2-|col-1) -- (row-2-|col-2) -- (row-3-|col-2) -- (row-3-|col-4) -- (row-4-|col-4) -- (row-4-|col-7); } ] & \color{red}{\mathbf{1}} & 1 & 1 & 2 & 2 & \; 4 \\ & 0 & \color{red}{\mathbf{1}} & -1 & 1 & 0 & \; 1 \\ & 0 & 0 & 0 & \color{red}{\mathbf{1}} & -2 & \; 2 \\ & 0 & 0 & 0 & 0 & 0 & \; 0 \\ % ------------------------------------------------------------------------------------ Basic and Free Variables \color{blue}{\begin{matrix} \\ \text{basic}\\ \text{free} \end{matrix}} & \begin{matrix} x_1 \\ \end{matrix} & \begin{matrix} x_2 \\ \end{matrix} & \begin{matrix} \\ x_3=\alpha \end{matrix} & \begin{matrix} x_4 \\ \end{matrix} & \begin{matrix} \\ x_5=\beta \end{matrix} & \end{pNiceArray}$ % ==================================================================================== Solve by Back-substitution \vspace{1cm} % below the figure; inkscape cropping fails otherwise... % ------------------------------------------------------------------------------------ Solve {\ShortCascade% {\ShortCascade% {\ShortCascade% {$\boxed{x_3 = \alpha, x_5=\beta}$}% {$x_4 = 2 + 2 x_5$}% {$\;\Rightarrow\; \boxed{x_4 = 2 + 2 \beta}$}% }% {$x_2 = 1 +x_3-x_4$}% {$\;\Rightarrow\; \boxed{x_2 = -1+\alpha-2\beta}$}% }% {$x_1 = 4 - x_2 - x_3 - 2 x_4 -2 x_5$}% {$\;\Rightarrow \; \boxed{x_1 = 1-\alpha+2\beta}.$} }% %& % --------------------------------------------------------------------------------- Standard Form \vspace{1cm} {$\; \therefore\; \left( \begin{array}{r} x_1 \\ x_2 \\ x_3 \\ x_4 \\ x_5 \end{array} \right) = \left( \begin{array}{r} 1 \\ -1 \\ 0 \\ 2 \\ 0 \end{array} \right) + \alpha \left( \begin{array}{r} -1 \\ 1 \\ 1 \\ 0 \\ 0 \end{array} \right) + \beta \left( \begin{array}{r} 2 \\ -2 \\ 0 \\ 2 \\ 1 \end{array} \right) $ } ``` ## 2.5 QR Decomposition ``` A = sym.Matrix([[ 1, 1, -1], [ 1, -2, 1], [-1, -1, 2], [ 1, 1, -1]]) W = sym.Matrix([[ 1, 1, 1], [ 1, -3, 0], [-1, -1, 2], [ 1, 1, 1]]) WtW = W.T @ W WtA = W.T @ A S = WtW**(-1) for i in range(S.shape[0]): S[i,i]=sym.sqrt(S[i,i]) Qt = S*W.T R = S*WtA matrices = [ [ None, None, A, W ], [ None, W.T, WtA, WtW ], [ S, Qt, R, None ] ] h,mz = nM.qr( matrices, formater=sym.latex, array_names=True, tmp_dir="tmp", keep_file='tmp/qr_fancy') h ``` # 3 Geometric Figures ## 3.1 Graph with Arrows ``` %%itikz --temp-dir --file-prefix graph- --template pic --use-xetex --scale 0.8 --tex-packages=amsmath % axes \draw[thick,>=stealth,->] (0,-0.2) -- (0,6); \draw[thick,>=stealth,->] (-0.2,0) -- (7,0); % grid lines \draw[step=1.0,black,thin,dotted,xshift=1cm,yshift=1cm] (-1,-1) grid (6,5); % draw the output line \draw[thin,draw=red, dashed] (-0.2,-0.1) -- (7,3.5) node[right, text=blue, text width=5em] {}; % starting vector blue, transformed vector red \draw[thick,>=stealth,->,draw=blue] (0,0) -- (5,1) node[right, text=blue, text width=5em] {\large $\mathbf{\begin{pmatrix} 5 \\ 1 \end{pmatrix}}$}; \draw[thick,>=stealth,->,dotted,draw=black] (5,1) -- (2,1); \draw[thick,>=stealth,->,draw=blue] (0,0) -- (1,3) node[text=blue, label={[xshift=0.3cm, yshift=-0.1cm]\large $\color{blue}{\mathbf{\begin{pmatrix} 1 \\ 3 \end{pmatrix}}}$}] (x2) {}; \draw[thick,>=stealth,->,dotted,draw=black] (1,3) -- (6,3); ``` ## 3.2 Parallelograms ``` %%itikz --temp-dir --file-prefix graph- --template pic --use-xetex --scale 2 --tex-packages=amsmath --tikz-libraries quotes \node (n) [draw, minimum width=3cm, minimum height=2cm, xslant=0.8] {}; \draw (n.south west) to ["$u+y$",pos=0.7,sloped] (n.north east) (n.north west) node[above] {$u$} to ["$u-y$",pos=0.3,sloped] (n.south east) node[below] {$y$}; %%itikz --temp-dir --file-prefix graph- --template pic --use-xetex --scale 2 --tex-packages=tkz-euclide --tikz-libraries arrows \tkzDefPoint(0,0){A} \tkzDefPoint(30:3){B} \tkzDefShiftPointCoord[B](10:2){C} \tkzDefShiftPointCoord[A](10:2){D} \tkzDrawPolygon(A,...,D) \tkzDrawPoints(A,...,D) %%itikz --temp-dir --file-prefix graph- --template pic --use-xetex --scale 1 --tikz-libraries shapes.geometric \tikzstyle{every node}=[trapezium, draw, minimum width=3cm, trapezium left angle=120, trapezium right angle=60] \node[trapezium stretches=false,minimum height=1cm] at (0,0) {A}; \node[trapezium stretches=false,minimum height=1cm] at (0,1.5) {\fbox{A long }}; \node[trapezium stretches=false,minimum height=1cm] at (0,3) {\fbox{A long text}}; \draw[thick,green,|-|] (-1.5,-.5) -- (1.5,-0.5); \draw[thick,green,|-|] (-1.5,0.5) -- (-1.5,-0.5); \draw[thick,blue,|-|] (-1.5,1) -- (1.5,1); \draw[thick,blue,|-|] (-1.5,1) -- (-1.5,2); \draw[thick,red,|-|] (-1.5,2.5) -- (1.5,2.5); \draw[thick,red,|-|] (-1.5,2.5) -- (-1.5,3.5); ``` ## 3.3 Arcs ### 3.3.1 Arcs with pgfplot ``` %%itikz --temp-dir --file-prefix graph- --template pic --use-xetex --scale 1 --tex-packages=pgfplots --tikz-libraries=calc \def\centerarc[#1](#2)(#3:#4:#5)% Syntax: [draw options] (center) (initial angle:final angle:radius) { \draw[#1] ($(#2)+({#5*cos(#3)},{#5*sin(#3)})$) arc (#3:#4:#5); } \centerarc[red,thick,->](0,0)(5:85:1) ; \centerarc[red,thick,->](1,1)(-160:130:1) ; ``` ### 3.3.2 Arcs with tkz-euclide ``` %%itikz --temp-dir --file-prefix graph- --template pic --use-xetex --scale 1 --tex-packages=tkz-euclide --tikz-libraries=calc \tkzDefPoint(0,0){O} \tkzDefPoint(2,-1){A} \tkzDefPoint(1,1){B} \tkzDrawArc[color=blue](O,A)(B) \tkzDrawArc[color=brown](O,B)(A) \tkzDrawArc(O,B)(A) \tkzDrawLines[add = 0 and .5](O,A O,B) \tkzDrawPoints(O,A,B) \tkzLabelPoints[below](O,A,B) ``` ### 3.1.3 Arcs with Tikz ``` %%itikz --temp-dir --file-prefix graph- --template pic --use-xetex --scale 1 --tex-packages=amsmath --tikz-libraries=calc \newcommand{\cercle}[4]{ \node[circle,inner sep=0,minimum size={2*#2}](a) at (#1) {}; \draw[red,thick] (a.#3) arc (#3:{#3+#4}:#2); } \newcommand{\mycercle}[6]{ \node[circle,inner sep=0,minimum size={2*#2}](a) at (#1) {}; \draw[#6,line width=#5] (a.#3) arc (#3:{#3+#4}:#2); } \coordinate (OR) at (0.00, 0.00); \coordinate (center) at (3,2); \cercle{center}{2cm}{25}{-90} ![\cercle{1,2}{1cm}{15}{130}][1] \mycercle {OR} {0.5cm} {0} {270} {1.00} {blue} ; itikz.build_commands_dict(use_xetex=True,use_dvi=False,crop=True) ``` ## 3.4 3D Examples ``` %reload_ext itikz import itikz from itikz import nicematrix as nM from itikz.tikz import tikz_source import jinja2 import numpy as np import sympy as sym import panel as pn pn.extension() src = tikz_source( r"""% ======================================================= colors \definecolor{la_white}{RGB}{233,235,223} %#E9EBDF \definecolor{la_dark}{RGB}{59,54,81} %#3B3651 \definecolor{la_gray}{RGB}{96,112,139} %#60708B \definecolor{la_tan}{RGB}{152,159,122} %#989F7A % -------------------------------------------------------- axes \draw[-latex] (0,0,0) -- (4,0,0) node[left] {$x$}; \draw[-latex] (0,0,0) -- (0,4,0) node[below] {$y$}; \draw[-latex] (0,0,0) -- (0,0,4) node[left] {$z$}; % ---------------------------------------------------------- planes \draw[fill=la_tan,opacity=0.3] (-3,0,-3) -- (-3,0,3) -- (3,0,3) -- (3,0,-3) -- cycle; \draw[fill=la_gray,opacity=0.4] (-3,-3,0) -- (-3,3,0) -- (3,3,0) -- (3,-3,0) -- cycle; \draw[thick](-3,0,0)--(3,0,0); % intersection of the planes % ---------------------------------------------------------- text decoration \node[anchor=south west,align=center] (line) at (3,3,3) {line of\\ intersection}; \draw[-latex] (line) to[out=180,in=75] (-2,0,0.05); """, class_args="border=23.14pt", tex_packages="tikz-3dplot", preamble=r"\tdplotsetmaincoords{70}{110}", tikz_args=r"tdplot_main_coords,font=\sffamily" ) itikz.fetch_or_compile_svg( src, prefix='test_', working_dir='/tmp/itikz', debug=False, **itikz.build_commands_dict(use_xetex=True,use_dvi=False,crop=False), nexec=1, keep_file="/tmp/foo" ) def tikz_source( code, class_args=None, tex_packages=None, tikz_libraries=None, extension="% no_extension", preamble="% preamble", tikz_args=None): template = r"""\documentclass[tikz{% for a in class_args %},{{a}}{% endfor %}]{standalone} \pagestyle{empty} {% for p in tex_packages %} {{p}} {% endfor %} {% for p in tikz_libraries %} \usetikzlibrary{{p}} {% endfor %} {{extension}} \begin{document} {{preamble}} \begin{tikzpicture}{% for p in tikz_args %}{{p}}{% endfor %} {{tikz_code}} \end{tikzpicture} \end{document} """ import re pattern = re.compile( r'(\[[^]]*])(.*)' ) def split(arg): if arg is None: return [] l = [] for a in arg.split(","): match = pattern.match( a ) if match: l.append( r"\usepackage" + match.group(1) + "{" + match.group(2)+ "}" ) else: l.append(r"\usepackage{" + a + "}") return l class_args = [] if class_args is None else [class_args] tex_packages = split(tex_packages) tikz_libraries = [] if tikz_libraries is None else ["{"+tikz_libraries+"}"] tikz_args = [] if tikz_args is None else ["["+tikz_args+"]"] src=jinja2.Template( template )\ .render( class_args=class_args, tex_packages=tex_packages, tikz_libraries=tikz_libraries, extension=extension, preamble=preamble, tikz_args=tikz_args, tikz_code=code ) return src src = tikz_source( r"""% ======================================================= colors \definecolor{la_white}{RGB}{233,235,223} %#E9EBDF \definecolor{la_dark}{RGB}{59,54,81} %#3B3651 \definecolor{la_gray}{RGB}{96,112,139} %#60708B \definecolor{la_tan}{RGB}{152,159,122} %#989F7A % -------------------------------------------------------- axes \draw[-latex] (0,0,0) -- (4,0,0) node[left] {$x$}; \draw[-latex] (0,0,0) -- (0,4,0) node[below] {$y$}; \draw[-latex] (0,0,0) -- (0,0,4) node[left] {$z$}; % ---------------------------------------------------------- planes \draw[fill=la_tan,opacity=0.3] (-3,0,-3) -- (-3,0,3) -- (3,0,3) -- (3,0,-3) -- cycle; \draw[fill=la_gray,opacity=0.4] (-3,-3,0) -- (-3,3,0) -- (3,3,0) -- (3,-3,0) -- cycle; \draw[thick](-3,0,0)--(3,0,0); % intersection of the planes % ---------------------------------------------------------- text decoration \node[anchor=south west,align=center] (line) at (3,3,3) {line of\\ intersection}; \draw[-latex] (line) to[out=180,in=75] (-2,0,0.05); """, class_args="border=23.14pt", tex_packages="tikz-3dplot", preamble=r"\tdplotsetmaincoords{70}{110}", tikz_args=r"tdplot_main_coords,font=\sffamily" ) itikz.fetch_or_compile_svg( src, prefix='test_', working_dir='/tmp/itikz', debug=False, **itikz.build_commands_dict(use_xetex=True,use_dvi=False,crop=False), nexec=1, keep_file="/tmp/foo" ) src = tikz_source( r"""% ======================================================= colors \definecolor{la_white}{RGB}{233,235,223} %#E9EBDF \definecolor{la_dark}{RGB}{59,54,81} %#3B3651 \definecolor{la_gray}{RGB}{96,112,139} %#60708B \definecolor{la_tan}{RGB}{152,159,122} %# \definecolor{la_red}{RGB}{152,0,0} %# \draw[thin,->] (0,0,0) -- (1,0,0) node[anchor=north east]{$x$}; \draw[thin,->] (0,0,0) -- (0,1,0) node[anchor=north west]{$y$}; \draw[thin,->] (0,0,0) -- (0,0,1) node[anchor=south]{$z$}; \tdplotsetcoord{O}{0}{0}{0} \tdplotsetcoord{P}{1.}{90}{-45} \tdplotsetcoord{Q}{1.}{80}{-10} \tdplotsetcoord{W}{1.}{-30}{60} \tdplotsetcoord{Pn}{-1.}{90}{-45} \tdplotsetcoord{Qn}{-1.}{80}{-10} \foreach \x in {-0.2,0,...,0.8} \foreach \y in {-0.2,0,...,0.8} { \draw[very thin,gray] (\x,-0.2) -- (\x,0.8); \draw[very thin,gray] (-0.2,\y) -- (0.8,\y); } %\def\x{.5} %\filldraw[ % draw=la_tan!10,% % fill=la_gray!20,% %] (0,0,0) % -- (\x,{sqrt(3)*\x},0) % -- (\x,{sqrt(3)*\x},1) % -- (0,0,1) % -- cycle; %\draw[color=la_dark!10,fill=la_gray!60, nearly transparent] (O) -- (P) -- (Q) -- cycle; \draw[color=la_dark!10,fill=la_tan!80, nearly transparent] (Pn) -- (Qn) -- (P) -- (Q) -- cycle; %draw a vector from origin to point (P) \draw[thick,-stealth,color=la_gray] (O) -- (P); \draw[thick,-stealth,color=la_gray] (O) -- (Q); \draw[thick,-stealth,color=la_red] (O) -- (W); """, #class_args="border=23.14pt", tex_packages="ifthen,tikz-3dplot", preamble=r"""% ----------------------------------------------- \tdplotsetmaincoords{70}{70} """, tikz_args=r"tdplot_main_coords,font=\sffamily,scale=3." ) itikz.fetch_or_compile_svg( src, prefix='test_', working_dir='/tmp/itikz', debug=False, **itikz.build_commands_dict(use_xetex=True,use_dvi=False,crop=False), nexec=1, keep_file="/tmp/bah" ) src = tikz_source( r""" % ======================================================= colors \definecolor{la_white}{RGB}{233,235,223} %#E9EBDF \definecolor{la_dark}{RGB}{59,54,81} %#3B3651 \definecolor{la_gray}{RGB}{96,112,139} %#60708B \definecolor{la_tan}{RGB}{152,159,122} %#989F7A \coordinate (A) at (0.95,3.41); \coordinate (B) at (1.95,0.23); \coordinate (C) at (3.95,1.23); \coordinate (D) at (2.95,4.41); \coordinate (E) at (1.90,3.30); \coordinate (F) at (0.25,0.45); \coordinate (G) at (2.25,1.45); \coordinate (H) at (3.90,4.30); \coordinate (I) at (-0.2,1.80); \coordinate (J) at (2.78,1.00); \coordinate (K) at (4.78,2.00); \coordinate (L) at (1.80,2.80); \path[name path=AB] (A) -- (B); \path[name path=CD] (C) -- (D); \path[name path=EF] (E) -- (F); \path[name path=IJ] (I) -- (J); \path[name path=KL] (K) -- (L); \path[name path=HG] (H) -- (G); \path[name path=IL] (I) -- (L); \path [name intersections={of=AB and EF,by=M}]; \path [name intersections={of=EF and IJ,by=N}]; \path [name intersections={of=AB and IJ,by=O}]; \path [name intersections={of=AB and IL,by=P}]; \path [name intersections={of=CD and KL,by=Q}]; \path [name intersections={of=CD and HG,by=R}]; \path [name intersections={of=KL and HG,by=S}]; \path [name path=NS] (N) -- (S); \path [name path=FG] (F) -- (G); \path [name intersections={of=NS and AB,by=T}]; \path [name intersections={of=FG and AB,by=U}]; \draw[thick, color=la_dark, fill=la_tan!60] (A) -- (B) -- (C) -- (D) -- cycle; %\draw[thick, color=la_dark, fill=la_tan!60] (E) -- (F) -- (G) -- (H) -- cycle; %\draw[thick, color=la_dark, fill=la_tan!60] (I) -- (J) -- (K) -- (L) -- cycle; \draw[thick, color=la_dark, fill=la_gray!50] (P) -- (O) -- (I) -- cycle; \draw[thick, color=la_dark, fill=la_gray!50] (O) -- (J) -- (K) -- (Q) -- cycle; \draw[thick, color=la_dark, fill=la_tan!10] (H) -- (E) -- (M) -- (R) -- cycle; \draw[thick, color=la_dark, fill=la_tan!10] (M) -- (N) -- (T) -- cycle; \draw[thick, color=la_dark, fill=la_tan!10] (N) -- (F) -- (U) -- (O) -- cycle; """, class_args="border=23.14pt", #tex_packages="tikz-3dplot", tikz_libraries="positioning,calc,intersections", #preamble=r"\tdplotsetmaincoords{70}{110}", tikz_args=r"scale=1.6" ) itikz.fetch_or_compile_svg( src, prefix='test_', working_dir='/tmp/itikz', debug=False, **itikz.build_commands_dict(use_xetex=True,use_dvi=False,crop=False), nexec=1, keep_file="/tmp/bah" ) src = tikz_source( r"""% ======================================================= colors \definecolor{la_white}{RGB}{233,235,223} \definecolor{la_dark}{RGB}{59,54,81} \definecolor{la_gray}{RGB}{96,112,139} \definecolor{la_tan}{RGB}{152,159,122} \definecolor{la_red}{RGB}{152,0,0} \tdplotsetrotatedcoords{00}{30}{0} \begin{scope}[tdplot_rotated_coords] \begin{scope}[canvas is xy plane at z=0] \fill[la_gray,fill opacity=0.3] (-2,-3.5) rectangle (2,3.5); % =============== the plane \draw[very thick] (-2,0) -- (2,0); % line on the plane \path (-150:2) coordinate (H) (-1.5,0) coordinate(X); \pgflowlevelsynccm \draw[very thick,-stealth,la_red] (0,0) -- (-30:2.5); % vector on the plane? \draw[very thick,-stealth,la_red] (0,0) -- (50:2.5); % vector on the plane? \end{scope} \draw[stealth-] (H) -- ++ (-1,0,0.2) node[pos=1.3]{$E_1$}; % ================= decorate eigenspace E_1 \draw[stealth-] (X) -- ++ (0,1,0.2) node[pos=1.3] {$X$}; \draw[very thick,-stealth,color=la_red] (0,0,0) coordinate (O) -- (1,1,4) node[right]{$p$}; % coords are (y,x,z) ?! \end{scope} \pgfmathsetmacro{\Radius}{1.5} \draw[-stealth] (O) -- (2.5*\Radius,0,0) node[pos=1.15] {$y$}; \draw[-stealth] (O) -- (0,3.5*\Radius,0) node[pos=1.15] {$x$}; \draw[-stealth] (O) -- (0,0,2.5*\Radius) node[pos=1.05] {$z$}; """, #class_args="border=23.14pt", tex_packages="ifthen,tikz-3dplot", preamble=r"""% ----------------------------------------------- \tdplotsetmaincoords{105}{-30} """, tikz_args=r"tdplot_main_coords,font=\sffamily,scale=1." ) itikz.fetch_or_compile_svg( src, prefix='test_', working_dir='/tmp/itikz', debug=False, **itikz.build_commands_dict(use_xetex=True,use_dvi=False,crop=False), nexec=1, keep_file="/tmp/bah" ) src = tikz_source( r"""% ======================================================= colors \definecolor{la_white}{RGB}{233,235,223} \definecolor{la_dark}{RGB}{59,54,81} \definecolor{la_gray}{RGB}{96,112,139} \definecolor{la_tan}{RGB}{152,159,122} \definecolor{la_red}{RGB}{152,10,10} % variables \def\rvec{.8} \def\thetavec{30} \def\phivec{60} % ------------------------------------------------------------------------ axes \coordinate (O) at (0,0,0); \draw[thick,->] (0,0,0) -- (1,0,0) node[anchor=north east]{$x$}; \draw[thick,->] (0,0,0) -- (0,1,0) node[anchor=north west]{$y$}; \draw[thick,->] (0,0,0) -- (0,0,1) node[anchor=south] {$z$}; \filldraw[fill=la_dark, nearly transparent] (-1,1,0) -- (1,1,0) -- (1,-1,0) -- (-1,-1,0) -- cycle ; % x-y plane % a= s(1,2,0), b=t(1,0,.1) % 0,OA,0A+OB,0B (0,0,0) -- (s,2s,0) -- (s+t,2s,.1t) -- (t,0,.1t) -- cycle; % (0,0,0) -- (-s,-2s,0) -- (-s+t,-2s,.1t) -- (t,0,.1t) -- cycle; % take t=.5,s=.5 \filldraw[fill=la_tan, nearly transparent] (0,0,0) -- (.5,1,0) -- (1,1,.05) -- (.5,0,.05) -- cycle; \filldraw[fill=la_tan, nearly transparent] (0,0,0) -- (-.5,-1,0) -- (0,-1,.05) -- (.5,0,.05) -- cycle; % ------------------------------------------------------------------------ vectors \tdplotsetcoord{P}{\rvec}{\thetavec}{\phivec} % P \draw[-stealth,la_red,very thick] (O) -- (P) node[above right] {$P$}; \draw[dashed,red] (O) -- (Pxy); \draw[dashed,red] (P) -- (Pxy); \draw[dashed,red] (Py) -- (Pxy); % ------------------------------------------------------------------------- arcs \tdplotdrawarc[->]{(O)}{0.2}{0}{\phivec} {anchor=north}{$\phi$} \tdplotsetthetaplanecoords{\phivec} \tdplotdrawarc[->,tdplot_rotated_coords]{(0,0,0)}{0.5}{0}{\thetavec} {anchor=south west}{$\theta$} """, # ============================================================================================= #class_args="border=23.14pt", tex_packages="ifthen,tikz-3dplot", extension= r"\tikzset{>=latex} % for LaTeX arrow head", preamble = r""" \tdplotsetmaincoords{70}{120} """, tikz_args=r"tdplot_main_coords,font=\sffamily,scale=3." ) itikz.fetch_or_compile_svg( src, prefix='test_', working_dir='/tmp/itikz', debug=False, **itikz.build_commands_dict(use_xetex=True,use_dvi=False,crop=False), nexec=1, keep_file="/tmp/bah1" ) ```
github_jupyter
# Introducción a Python: Sintaxis, Funciones y Booleanos <img style="float: right; margin: 0px 0px 15px 15px;" src="https://www.python.org/static/community_logos/python-logo.png" width="200px" height="200px" /> > Bueno, ya que sabemos qué es Python, y que ya tenemos las herramientas para trabajarlo, veremos cómo usarlo. Referencias: - https://www.kaggle.com/learn/python ___ # 1. Sintaxis básica ## 1.1 Hello, Python! ¿Qué mejor para empezar que analizar el siguiente pedazo de código? ``` work_hours = 0 print(work_hours) # ¡A trabajar! Como una hora, no menos, como cinco work_hours = work_hours + 5 if work_hours > 0: print("Mucho trabajo!") rihanna_song = "Work " * work_hours print(rihanna_song) ``` ¿Alguien adivina qué salida produce el código anterior? Bueno, veamos línea por línea qué está pasando: ``` work_hours = 0 ``` **Asignación de variable:** la línea anterior crea una variable llamada `work_hours` y le asigna el valor de `0` usando el símbolo `=`. A diferencia de otros lenguajes (como Java o `C++`), la asignación de variables en Python: - no necesita que la variable `work_hours` sea declarada antes de asignarle un valor; - no necesitamos decirle a Python qué tipo de valor tendrá la variable `work_hours` (int, float, str, list...). De hecho, podríamos luego asignarle a `work_hours` otro tipo de valor como un string (cadena de caracteres) o un booleano (`True` o `False`). ``` print(work_hours) ``` **Llamado a una función**: print es una función de Python que imprime el valor pasado a su argumento. Las funciones son llamadas poniendo paréntesis luego de su nombre, y escribiendo sus argumentos (entradas) dentro de dichos paréntesis. ``` # ¡A trabajar! Como una hora, no menos, como cinco work_hours = work_hours + 5 # work_hours += 5 # Esto es completamente equivalente a la linea de arriba print(work_hours) ``` La primer línea es un **comentario**, los cuales en Python comienzan con el símbolo `#`. A continuación se hace una reasignación. En este caso, estamos asignando a la variable `work_hours` un nuevo valor que involucra una operación aritmética en su propio valor previo. ``` if work_hours > 0: print("Mucho trabajo!") if work_hours > 10: print("Mucho trabajo!") ``` Todavía no es tiempo de ver **condicionales**, sin embargo, se puede adivinar fácilmente lo que este pedazo de código hace, ya que se puede leer casi literal. Notemos que la *indentación* es muy importante acá, y especifica qué parte del código pertenece al `if`. Lo que pertenece al `if` empieza por los dos puntos (`:`) y debe ir indentado en el renglón de abajo. Así que mucho cuidado con la indentación, sobretodo si han programado en otros lenguajes en los que este detalle no implica nada. Acá vemos un tipo de variable string (cadena de caracteres). Se especifica a Python un objeto tipo string poniendo doble comilla ("") o comilla simple (''). ``` "Work " == 'Work ' rihanna_song = "Work " * work_hours print(rihanna_song) a = 5 a type(a) a *= "A " a type(a) ``` El operador `*` puede ser usado para multiplicar dos números (`3 * 4 evalua en 12`), pero también podemos multiplicar strings por números enteros, y obtenemos un nuevo string que repite el primero esa cantidad de veces. En Python suceden muchas cosas de este estilo, muchos "truquillos" que ahorran mucho tiempo. ## 1.2 Tipos de números en Python y operaciones aritméticas Ya vimos un ejemplo de una variable que contenía un número: ``` work_hours = 0 ``` Sin embargo, hay varios tipos de "números". Si queremos ser más tecnicos, preguntémosle a Python qué tipo de variable es `work_hours`: ``` type(work_hours) ``` Vemos que es un entero (`int`). Hay otro tipo de número que encontramos en Python: ``` type(0.5) ``` Un número de punto flotante (float) es un número con decimales. Ya conocemos dos funciones estándar de Python: `print()` y `type()`. La última es bien útil para preguntarle a Python "¿Qué es esto?". Ahora veamos operaciones aritméticas: ``` # Operación suma(+)/resta(-) 5 + 8, 9 - 3 # Operación multiplicación(*) 5 * 8 # Operación división(/) 6 / 7 # Operación división entera(//) 5 // 2 # Operación módulo(%) 5 % 2 # Exponenciación(**) 2**5 # Bitwise XOR (^) ## 2 == 010 ## 5 == 101 ## 2^5 == 111 == 1 * 2**2 + 1 * 2**1 + 1 * 2**0 == 7 2^5 ``` El orden en que se efectúan las operaciones es justo como nos lo enseñaron en primaria/secundaria: - PEMDAS: Parentesis, Exponentes, Multiplicación/División, Adición/Sustracción. Ante la duda siempre usar paréntesis. ``` # Ejemplo de altura con sombrero altura_sombrero_cm = 20 mi_altura_cm = 183 # Que tan alto soy cuando me pongo sombrero? altura_total_metros = altura_sombrero_cm + mi_altura_cm / 100 print("Altura total en metros =", altura_total_metros, "?") # Que tan alto soy cuando me pongo sombrero? altura_total_metros = (altura_sombrero_cm + mi_altura_cm) / 100 print("Altura total en metros =", altura_total_metros) import this ``` ### 1.2.1 Funciones para trabajar con números `min()` y `max()` devuelven el mínimo y el máximo de sus argumentos, respectivamente... ``` # min min(1, 8, -5, 4.4, 4.89) # max max(1, 8, -5, 4.4, 4.89) ``` `abs()` devuelve el valor absoluto de su argumeto: ``` # abs abs(5), abs(-5) ``` Aparte de ser tipos de variable, `float()` e `int()` pueden ser usados como función para convertir su argumento al tipo especificado (esto lo veremos mejor cuando veamos programación orientada a objetos): ``` print(float(10)) print(int(3.33)) # They can even be called on strings! print(int('807') + 1) int(8.99999) ``` ___ # 2. Funciones y ayuda en Python ## 2.1 Pidiendo ayuda Ya vimos algunas funciones en la sección anterior (`print()`, `abs()`, `min()`, `max()`), pero, ¿y si se nos olvida que hace alguna de ellas? Que no pande el cúnico, ahí estará siempre la función `help()` para venir al rescate... ``` # Usar la función help sobre la función round help(round) help(max) # Función round round(8.99999) round(8.99999, 2) round(146, -2) ``` ### ¡CUIDADO! A la función `help()` se le pasa como argumento el nombre de la función, **no la función evaluada**. Si se le pasa la función evaluada, `help()` dará la ayuda sobre el resultado de la función y no sobre la función como tal. Por ejemplo, ``` # Help de una función help(round) a = round(10.85) type(a) # Help de una función evaluada help(round(10.85)) ``` Intenten llamar la función `help()` sobre otras funciones a ver si se encuentran algo interesante... ``` # Help sobre print help(print) # Print print(1, 'a', "Hola, ¿Cómo están?", sep="_este es un separador_", end=" ") print(56) ``` ## 2.2 Definiendo funciones Las funciones por defecto de Python son de mucha utilidad. Sin embargo, pronto nos daremos cuenta que sería más útil aún definir nuestras propias funciones para reutilizarlas cada vez que las necesitemos. Por ejemplo, creemos una función que dados tres números, devuelva la mínima diferencia absoluta entre ellos ``` # Explicar acá la forma de definir una función def diferencia_minima(a, b, c): diff1 = abs(a - b) diff2 = abs(a - c) diff3 = abs(b - c) return min(diff1, diff2, diff3) ``` Las funciones comienzan con la palabra clave `def`, y el código indentado luego de los dos puntos `:` se corre cuando la función es llamada. `return` es otra parablra clave que sólo se asocia con funciones. Cuando Python se encuentra un `return`, termina la función inmediatamente y devuelve el valor que hay seguido del `return`. ¿Qué hace específicamente la función que escribimos? ``` # Ejemplo: llamar la función unas 3 veces diferencia_minima(7, -5, 8) diferencia_minima(7.4, 7, 0) diferencia_minima(7, 6, 8) type(diferencia_minima) ``` Intentemos llamar `help` sobre la función ``` help(diferencia_minima) ``` Bueno, Python tampoco es tan listo como para leer código y entregar una buena descripción de la función. Esto es trabajo del diseñador de la función: incluir la documentación. ¿Cómo se hace? (Recordar añadir un ejemplo) ``` # Copiar y pegar la función, pero esta vez, incluir documentación de la misma def diferencia_minima(a, b, c): """ This function determines the minimum difference between the three arguments passed a, b, c. Example: >>> diferencia_minima(7, -5, 8) 1 """ diff1 = abs(a - b) diff2 = abs(a - c) diff3 = abs(b - c) return min(diff1, diff2, diff3) # Volver a llamar el help help(diferencia_minima) ``` Muy bien. Ahora, podemos observar que podemos llamar esta función sobre diferentes números, incluso de diferentes tipos: - Si todos son enteros, entonces nos retornará un entero. - Si hay algún float, nos retornará un float. ``` # Todos enteros diferencia_minima(1, 1, 4) # Uno o más floats diferencia_minima(0., 0., 1) ``` Sin embargo, no todas las entradas son válidas: ``` # String: TypeError diferencia_minima('a', 'b', 'c') ``` ### 2.2.1 Funciones que no devuelven ¿Qué pasa si no incluimos el `return` en nuestra función? ``` # Ejemplo de función sin return def imprimir(a): print(a) # Llamar la función un par de veces imprimir('Hola a todos') var = imprimir("Hola a todos") print(var) def write_file(a): with open("file.txt", 'w') as f: f.write(a) write_file("Hola a todos") ``` ### 2.2.2 Argumentos por defecto Modificar la función `saludo` para que tenga un argumento por defecto. ``` # Función saludo con argumento por defecto def greetings(name="Ashwin"): # print(f"Welcome, {name}!") # print("Welcome, " + name + "!") # print("Welcome, ", name, "!", sep="") print("Welcome, {}!".format(name)) # print("Welcome, %s!" %name) greetings("Alejandro") greetings() ``` ___ # 3. Booleanos y condicionales ## 3.1 Booleanos Python tiene un tipo de objetos de tipo `bool` los cuales pueden tomar uno de dos valores: `True` o `False`. Ejemplo: ``` x = True print(x) print(type(x)) ``` Normalmente no ponemos `True` o `False` directamente en nuestro código, sino que más bien los obtenemos luego de una operación booleana (operaciones que dan como resultado `True` o `False`). Ejemplos de operaciones: ``` # == 3 == 3. # != 2.99999 != 3 # < 8 < 5 # > 8 > 5 # <= 4 <= 4 # >= 5 >= 8 ``` **Nota:** hay una diferencia enorme entre `==` e `=`. Con el primero estamos preguntando acerca del valor (`n==2`: ¿es `n` igual a `2`?), mientras que con el segundo asignamos un valor (`n=2`: `n` guarda el valor de `2`). Ejemplo: escribir una función que dado un número nos diga si es impar ``` # Función para encontrar números impares def odd(num_int): return (num_int % 2) != 0 def odd(num_int): if (num_int % 2) != 0: return True return False # Probar la función odd(5), odd(32) (5, 4, 3) == ((5, 4, 3)) ``` ### 3.1.1 Combinando valores booleanos Python también nos provee operadores básicos para operar con valores booleanos: `and`, `or`, y `not`. Por ejemplo, podemos definir una función para ver si vale la pena llegar a la taquería de la esquina: ``` # Función: ¿vale la pena ir a la taquería? distancia, clima, paraguas ... def vale_la_pena_ir_taqueria(distancia, clima, paraguas): return (distancia <= 100) and (clima != 'lluvioso' or paraguas == True) # Probar función vale_la_pena_ir_taqueria(distancia=50, clima="soleado", paraguas=False) vale_la_pena_ir_taqueria(distancia=50, clima="lluvioso", paraguas=False) ``` También podemos combinar más de dos valores: ¿cuál es el resultado de la siguiente expresión? ``` (True or True) and False ``` Uno puede tratar de memorizarse el orden de las operaciones lógicas, así como el de las aritméticas. Sin embargo, en línea con la filosofía de Python, el uso de paréntesis enriquece mucho la legibilidad y no quedan lugares a dudas. Los siguientes códigos son equivalentes, pero, ¿cuál se lee mejor? ``` have_umbrella = True rain_level = 4 have_hood = True is_workday = False prepared_for_weather = have_umbrella or rain_level < 5 and have_hood or not rain_level > 0 and is_workday prepared_for_weather prepared_for_weather = have_umbrella or (rain_level < 5 and have_hood) or not (rain_level > 0 and is_workday) prepared_for_weather prepared_for_weather = have_umbrella or ((rain_level < 5) and have_hood) or (not (rain_level > 0 and is_workday)) prepared_for_weather prepared_for_weather = ( have_umbrella or ((rain_level < 5) and have_hood) or (not (rain_level > 0 and is_workday)) ) prepared_for_weather ``` ___ ## 3.2 Condicionales Aunque los booleanos son útiles en si, dan su verdadero salto a la fama cuando se combinan con cláusulas condicionales, usando las palabras clave `if`, `elif`, y `else`. Los condicionales nos permiten ejecutar ciertas partes de código dependiendo de alguna condición booleana: ``` # Función de inspección de un número def inspeccion(num): if num == 0: print('El numero', num, 'es cero') elif num > 0: print('El numero', num, 'es positivo') elif num < 0: print('El numero', num, 'es negativo') else: print('Nunca he visto un numero como', num) # Probar la función inspeccion(1), inspeccion(-1), inspeccion(0) ``` - `if` y `else` se utilizan justo como en otros lenguajes. - Por otra parte, la palabra clave `elif` es una contracción de "else if". - El uso de `elif` y de `else` son opcionales. - Adicionalmente, se pueden incluir tantos `elif` como se requieran. Como en las funciones, el bloque de código correspondiente al condicional empieza luego de los dos puntos (`:`), y lo que sigue está indentado 4 espacios (tabulador). Pertenece al condicional todo lo que esté indentado hasta que encontremos una línea sin indentación. Por ejemplo, analicemos la siguiente función: ``` def f(x): if x > 0: print("Only printed when x is positive; x =", x) print("Also only printed when x is positive; x =", x) print("Always printed, regardless of x's value; x =", x) f(-1) ``` ### 3.2.1 Conversión a booleanos Ya vimos que la función `int()` convierte sus argumentos en enteros, y `float()` los convierte en números de punto flotante. De manera similar `bool()` convierte sus argumentos en booleanos. ``` print(bool(1)) # Todos los números excepto el cero 0 se tratan como True print(bool(0)) print(bool("asf")) # Todos los strings excepto el string vacío "" se tratan como True print(bool("")) # No confundir el string vacío "" con un espacio " " bool(" ") ``` Por ejemplo, ¿qué imprime el siguiente código? ``` if 0: print(0) elif "tocino": print("tocino") ``` Las siguientes celdas son equivalentes. Sin embargo, por la legibilidad preferimos la primera: ``` x = 10 if x != 0: print('Estoy contento') else: print('No estoy tan contento') if x: print('Estoy contento') else: print('No estoy tan contento') ``` ### 3.2.2 Expresiones condicionales Es muy común que una variable pueda tener dos valores, dependiendo de alguna condición: ``` # Función para ver si pasó o no dependiendo de la nota def mensaje_calificacion(nota): """ Esta función imprime si pasaste o no de acuerdo a la nota obtenida. La minima nota aprobatoria es de 6. >>> mensaje_calificacion(9) Pasaste la materia, con una nota de 9 >>> mensaje_calificacion(5) Reprobaste la materia, con una nota de 5 """ if nota >= 6: print('Pasaste la materia, con una nota de', nota) else: print('Reprobaste la materia, con una nota de', nota) mensaje_calificacion(5) mensaje_calificacion(7) mensaje_calificacion(10) ``` Por otra parte, Python permite escribir este tipo de expresiones en una sola línea, lo que resulta muy últil y muy legible: ``` # Función para ver si pasó o no dependiendo de la nota def mensaje_calificacion(nota): """ Esta función imprime si pasaste o no de acuerdo a la nota obtenida. >>> mensaje_calificacion(9) Pasaste la materia, con una nota de 9 >>> mensaje_calificacion(5) Reprobaste la materia, con una nota de 5 """ resultado = 'Pasaste' if nota >= 6 else 'Reprobaste' print(resultado + ' la materia, con una nota de', nota) mensaje_calificacion(5) mensaje_calificacion(7) ``` ___ Hoy vimos: - La sintaxis básica de Python, los tipos de variable int, float y str, y algunas funciones básicas. - Cómo pedir ayuda de las funciones, y como construir nuestras propias funciones. - Variables Booleanas y condicionales. Para la próxima clase: - Tarea 1 para el miércoles (23:59). <script> $(document).ready(function(){ $('div.prompt').hide(); $('div.back-to-top').hide(); $('nav#menubar').hide(); $('.breadcrumb').hide(); $('.hidden-print').hide(); }); </script> <footer id="attribution" style="float:right; color:#808080; background:#fff;"> Created with Jupyter by jfraustro. </footer>
github_jupyter
``` import numpy as np import random import pandas as pd import sklearn from matplotlib import pyplot as plt plt.rcParams['figure.figsize'] = (10.0, 8.0) from sklearn.datasets import make_biclusters from sklearn.datasets import samples_generator as sg from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn import preprocessing # from sklearn.cluster.bicluster import SpectralCoclustering from sklearn.metrics import consensus_score from sklearn.metrics.cluster import normalized_mutual_info_score from sklearn.metrics.cluster import adjusted_rand_score from biclustering import DeltaBiclustering, MSR %pylab inline def generate_dataset(option, noise=1, noise_background=True, shuffle=False): """ This function generates syntetic datasets as described in the paper (http://cs-people.bu.edu/panagpap/Research/Bio/bicluster_survey.pdf) - Figure 4. Params option (str): bicluster structure ('a' to 'i') noise (int): value of the noise in the matrix noise_background (bool): positions where is not a bicluster should contain noise if this parameter is set to True shuffle (bool): shuffle lines and columns of the matrix if this parameter is set to True Returns data (array_like): matrix generated """ shape = (150,150) n,m = shape # values shouldn't be a lot far... centers = [20, 40, 60, 80, 100] y_row = np.zeros(150) y_col = np.zeros(150) if noise_background: data = np.random.rand(n, m)*100 else: data = np.zeros(n*m).reshape(shape) if option == 'a': data[60:110][:,70:140] = np.random.rand(50,70)*noise + centers[0] y_row[60:110] += 1 y_col[70:140] += 1 elif option == 'd': data[0:50][:,0:70] = np.random.rand(50,70)*noise + centers[0] y_row[0:50] += 1 y_col[0:70] += 1 data[50:100][:,50:100] = np.random.rand(50,50)*noise + centers[2] y_row[50:100] += 2 y_col[50:100] += 2 data[100:150][:,80:150] = np.random.rand(50,70)*noise + centers[1] y_row[100:150] += 3 y_col[80:150] += 3 elif option == 'e': data[0:70][:,0:50] = np.random.rand(70,50)*noise + centers[3] y_row[0:70] += 1 y_col[0:50] += 1 data[50:100][:,50:100] = np.random.rand(50,50)*noise + centers[1] y_row[50:100] += 2 y_col[50:100] += 2 data[80:150][:,100:150] = np.random.rand(70,50)*noise + centers[2] y_row[80:150] += 3 y_col[100:150] += 3 elif option == 'f': data[0:50][:,0:40] = np.random.rand(50,40)*noise + centers[4] y_row[0:50] += 1 y_col[0:40] += 1 data[50:150][:,0:40] = np.random.rand(100,40)*noise + centers[0] y_row[50:150] += 2 data[110:150][:,40:95] = np.random.rand(40,55)*noise + centers[2] y_row[110:150] += 3 y_col[40:95] += 2 data[110:150][:,95:150] = np.random.rand(40,55)*noise + centers[1] y_row[110:150] += 3 y_col[95:150] += 3 elif option == 'g': data[0:110][:,0:40] = np.random.rand(110,40)*noise + centers[0] data[110:150][:,0:110] = np.random.rand(40,110)*noise + centers[2] data[40:150][:,110:150] = np.random.rand(110,40)*noise + centers[1] data[0:40][:,40:150] = np.random.rand(40,110)*noise + centers[3] elif option == 'h': data[0:90][:,0:90] = np.random.rand(90,90)*noise + centers[0] data[35:55][:,35:55] = (np.random.rand(20,20)*noise + centers[1]) + data[35:55][:,35:55] data[110:140][:,35:90] = np.random.rand(30,55)*noise + centers[4] data[0:140][:,110:150] = np.random.rand(140,40)*noise + centers[2] data[0:55][:,130:150] = (np.random.rand(55,20)*noise + centers[3]) + data[0:55][:,130:150] elif option == 'i': data[20:70][:,20:70] = np.random.rand(50,50)*noise + centers[0] data[20:70][:,100:150] = np.random.rand(50,50)*noise + centers[1] data[50:110][:,50:120] = np.random.rand(60,70)*noise + centers[2] data[120:150][:,20:100] = np.random.rand(30,80)*noise + centers[3] if shuffle: np.random.shuffle(data) np.random.shuffle(data.T) return data, y_row, y_col from numba import jit @jit(nopython=True) def compute_U(S, V, m, k): V_tilde = np.dot(S, V.T) U_new = np.empty([m, k]) for i in xrange(m): errors = np.empty(k) for row_clust_ind in xrange(k): errors[row_clust_ind] = np.sum((X[i][:] - V_tilde[row_clust_ind][:])**2) ind = np.argmin(errors) U_new[i][ind] = 1 return U_new def fnmtf(X, k, l, num_iter=10, norm=True): m, n = X.shape U = np.random.rand(m,k) S = np.random.rand(k,l) V = np.random.rand(n,l) if norm: X = preprocessing.normalize(X) for i in xrange(num_iter): S = pinv(U.T.dot(U)).dot(U.T).dot(X).dot(V).dot(pinv(V.T.dot(V))) # solve subproblem to update V U_tilde = U.dot(S) V_new = np.zeros(n*l).reshape(n, l) for j in range(n): errors = np.zeros(l) for col_clust_ind in xrange(l): errors[col_clust_ind] = ((X[:][:, j] - U_tilde[:][:, col_clust_ind])**2).sum() ind = np.argmin(errors) V_new[j][ind] = 1 V = V_new # while np.linalg.det(V.T.dot(V)) <= 0: # erros = (X - U.dot(S).dot(V.T)) ** 2 # erros = np.sum(erros.dot(V), axis=0) / np.sum(V, axis=0) # erros[np.where(np.sum(V, axis=0) <= 1)] = -inf # quantidade = np.sum(V, axis=0) # indexMin = np.argmin(quantidade) # indexMax = np.argmax(erros) # indexes = np.nonzero(V[:, indexMax])[0] # for j in indexes: # if np.random.rand(1) > 0.5: # V[j, indexMax] = 0 # V[j, indexMin] = 1 # solve subproblem to update U U = compute_U(S, V, m, k) # while np.linalg.det(U.T.dot(U)) <= 0: # erros = (X - U.dot(V_tilde)) ** 2 # erros = np.sum(U.T.dot(erros), axis=1) / np.sum(U, axis=0) # erros[np.where(np.sum(U, axis=0) <= 1)] = -np.inf # quantidade = np.sum(U, axis=0) # indexMin = np.argmin(quantidade) # indexMax = np.argmax(erros) # indexes = np.nonzero(U[:, indexMax])[0] # end = len(indexes) # indexes_p = np.random.permutation(end) # U[indexes[indexes_p[0:np.floor(end/2.0)]], indexMax] = 0.0 # U[indexes[indexes_p[0:np.floor(end/2.0)]], indexMin] = 1.0 rows_ind = np.argmax(U, axis=1) cols_ind = np.argmax(V, axis=1) return U, S, V, rows_ind, cols_ind # m, n = (40, 35) # X = .01 * np.random.rand(m,n) # X[0:10][:, 0:10] = 1 + .01 * np.random.random() # X[30:40][:, 20:35] = 1 + .01 * np.random.random() # X[20:30][:, 20:35] = .6 + .01 * np.random.random() # X[30:40][:, 36:40] = 1 + .01 * np.random.random() # m, n = (6, 8) # X = .01 * np.random.rand(m,n) # X[0:2][:, 0:4] = 1 + .01 * np.random.random() # X[2:4][:, 4:8] = .6 + .01 * np.random.random() # X[4:6][:, 0:8] = .8 + .01 * np.random.random() plt.matshow(X, cmap=plt.cm.Blues) plt.title('Original data') plt.grid() plt.show() U, S, V, rows_ind, cols_ind = fnmtf(X, 3, 2, norm=False) def plot_factorization_result(U, S, V): fig = plt.figure() ax = fig.add_subplot(2, 2, 1) ax.matshow(U.dot(S).dot(V.T), cmap=plt.cm.Blues) ax.set_title('reconstruction') ax.grid() ax2 = fig.add_subplot(2, 2, 2) ax2.matshow(U, cmap=plt.cm.Blues) ax2.set_title('U*S') ax2.grid() ax3 = fig.add_subplot(2, 2, 3) ax3.matshow(S, cmap=plt.cm.Blues) ax3.set_title('S') ax3.grid() ax4 = fig.add_subplot(2, 2, 4) ax4.matshow(V.T, cmap=plt.cm.Blues) ax4.set_title('S*V\'') ax4.grid() plt.show() def scores(labels_true, labels_pred, row=True): if row: print 'Rows scores' else: print 'Cols scores' print 'Random score: %s' % adjusted_rand_score(labels_true, labels_pred) print 'Normalized mutual information score: %s' % normalized_mutual_info_score(labels_true, labels_pred) print '' plot_factorization_result(U, S, V) scores(rows_ind, [0, 0, 1, 1, 2, 2]) scores(cols_ind, [0, 0, 0, 0, 1, 1, 1, 1], row=False) X, x_labels, y_labels = generate_dataset('d', noise_background=False, shuffle=False) temp, _, _ = generate_dataset('d', noise_background=False) fig = plt.figure() ax1 = fig.add_subplot(1, 2, 1) ax1.matshow(temp, cmap=plt.cm.Blues) ax1.set_title('Original data') ax1.grid() ax2 = fig.add_subplot(1, 2, 2) ax2.matshow(X, cmap=plt.cm.Blues) ax2.set_title('Shuffled data') ax2.grid() plt.show() import time t1 = time.time() U, S, V, rows_ind, cols_ind = fnmtf(X, 3, 3, norm=False) t2 = time.time() print ('dt: {} secs'.format(t2-t1)) plot_factorization_result(U, S, V) scores(rows_ind, x_labels) scores(cols_ind, y_labels, row=False) %load_ext Cython %%cython import cython cimport cython import numpy as np cimport numpy as np @cython.boundscheck(False) @cython.wraparound(False) @cython.nonecheck(False) def fnmtf_improved(double[:, ::1] X, int k, int l, int num_iter=100, int norm=0): cdef int m = X.shape[0] cdef int n = X.shape[1] cdef unsigned int i = 0 cdef unsigned int j = 0 cdef unsigned int iter_index = 0 cdef unsigned int row_clust_ind = 0 cdef unsigned int col_clust_ind = 0 cdef unsigned int ind = 0 cdef double[:, ::1] U = np.random.rand(m, k).astype(np.float64) cdef double[:, ::1] U_best = np.random.rand(m, k).astype(np.float64) cdef double[:, ::1] S = np.random.rand(k, l).astype(np.float64) cdef double[:, ::1] S_best = np.random.rand(k, l).astype(np.float64) cdef double[:, ::1] V = np.random.rand(n, l).astype(np.float64) cdef double[:, ::1] V_best = np.random.rand(n, l).astype(np.float64) cdef double[:, ::1] U_tilde = np.empty((m, l), dtype=np.float64) cdef double[:, ::1] V_new = np.empty((n, l), dtype=np.float64) cdef double[:, ::1] V_tilde = np.empty((l, n), dtype=np.float64) cdef double[:, ::1] U_new = np.empty((m, k), dtype=np.float64) cdef double error_best = 10e9999 cdef double error = 10e9999 cdef double[:] errors_v = np.zeros(l, dtype=np.float64) cdef double[:] errors_u = np.zeros(k, dtype=np.float64) for iter_index in range(num_iter): S[:, :] = np.dot( np.dot(np.linalg.pinv(np.dot(U.T, U)), np.dot(np.dot(U.T, X), V)), np.linalg.pinv(np.dot(V.T, V)) ) # solve subproblem to update V U_tilde[:, :] = np.dot(U, S) V_new[:, :] = np.empty((n, l), dtype=np.int) for j in range(n): errors_v = np.zeros(l, dtype=np.float64) for col_clust_ind in range(l): errors_v[col_clust_ind] = np.sum(np.square(np.subtract(X[:, j], U_tilde[:, col_clust_ind]))) ind = np.argmin(errors_v) V_new[j, ind] = 1.0 V[:, :] = V_new # solve subproblem to update U V_tilde[:, :] = np.dot(S, V.T) U_new[:, :] = np.empty((m, k), dtype=np.int) for i in range(m): errors_u = np.zeros(k, dtype=np.float64) for row_clust_ind in range(k): errors_u[row_clust_ind] = np.sum(np.square(np.subtract(X[i, :], V_tilde[row_clust_ind, :]))) ind = np.argmin(errors_u) U_new[i, ind] = 1.0 U[:, :] = U_new error_ant = error error = np.sum(np.square(np.subtract(X, np.dot(np.dot(U, S), V.T)))) if error < error_best: U_best[:, :] = U S_best[:, :] = S V_best[:, :] = V error_best = error import time X, x_labels, y_labels = generate_dataset('d', noise_background=False, shuffle=False) t1 = time.time() U, S, V, rows_ind, cols_ind = fnmtf_improved(X, 3, 3) t2 = time.time() print ('dt: {} secs'.format(t2-t1)) plot_factorization_result(U, S, V) scores(rows_ind, x_labels) scores(cols_ind, y_labels, row=False) ```
github_jupyter
## Assigning gender based on first name A straightforward task in natural language processing is to assign gender based on first name. Social scientists are often interested in gender inequalities and may have a dataset that lists name but not gender, such as a list of journal articles with authors in a study of gendered citation practices. Assigning gender based on name is usually done by comparing a given name with the name's gender distribution on official records, such as the US Social Security baby name list. While this works for most names, some names, such as Gershun or Hunna, are too rare to have reliable estimates based on most available official records. Other names, such as Jian or Blake, are common among both men and women. A fourth category of names are those which are dispropriately one gender or another, but do have non-trivial numbers of a different gender, such as Cody or Kyle. For both these names and androgynous names, their are often generational differences in the gendered distribution. The most efficient way to gender names in Python is with the `gender_guesser` library, which is based on Jörg Michael's multinational list of more than 48,000 names. The first time you use the library, you may need to install it: `%pip install gender_guesser` The `gender_guesser` library is set up so that first you import the gender function and then create a detector. In my case, the detector is named `d` and one parameter is passed, which instructors the detector to ignore capitalization. ``` import gender_guesser.detector as gender d = gender.Detector(case_sensitive=False) ``` When passed a name, the detector's `get_gender` returns either 'male', 'female', 'mostly_male', 'mostly_female', 'andy' (for androgenous names), or 'unknown' (for names not in the dataset). ``` d.get_gender("Barack") d.get_gender("Theresa") d.get_gender("JAMIE") d.get_gender("sidney") d.get_gender("Tal") ``` In almost all cases, you will want to analyze a large list of names, rather than a single name. For example, the University of North Carolina, Chapel Hill makes available salary information on employees. The dataset includes name, department, position salary and years of employment, but not gender. ``` import pandas as pd df = pd.read_csv("data/unc_salaries.csv") df.head(10) ``` A column with name-based gender assignment can be created by applying `d.get_gender` to the first name column. ``` df["Gender"] = df["First Name"].apply(d.get_gender) df["Gender"].value_counts(normalize=True) ``` For this dataset, the majority of the names can be gendered, while less than ten percent of names are not in the dataset. Selecting the rows in the dataframe where gender is unknown and the listing the values can be useful for inspecting cases and evaluating the gender-name assignment process. ``` cases = df["Gender"] == "unknown" df[cases]["First Name"].values ``` My quick interpreation of this list is that it names that are certainly rare in the US, and some are likely transliterated using a non-common English spelling. The name with missing gender are not-random and the process of creating missingness is likely correlated with other variables of interest, such as salary. This might impact a full-analysis of gender patterns, but I'll ignore that in the preliminary analysis. If you were conducted your analysis in another statistical package, you could export your dataframe with the new gender column. ``` df.to_csv("unc_salaries_gendered.csv") ``` You could also produce some summary statistics in your notebook. For example, the pandas `groupby` method can be used to estimate median salary by gender. ``` df.groupby("Gender")["Salary"].median() ``` Comparing the male and female-coded names, this shows evidence of a large salary gap based on gender. The "mostly" and unknown categories are in the middle, but interesting the androgynous names are associated with the lowest salaries. Grouping by gender and position may be useful in understanding the mechanisms that produce the gender gap. I also focus on just the individuals with names that are coded as male or female. ``` subset = df["Gender"].isin(["male", "female"]) df[subset].groupby(["Position", "Gender"])["Salary"].median() ``` This summary dataframe can also be plotted, which clearly shows that the median salary for male Assistant Professors is higher than the median salary of the higher ranked female Associate Professors. ``` %matplotlib inline df[subset].groupby(['Position','Gender'])['Salary'].median().plot(kind='barh'); ``` Sometimes the first name will not be it's own field, but included as part of the name column that includes the full name. In that case, you will need to create a function that extracts the first name. In this dataframe, the `name` column is the last name, followed by a comma, and then the first name and possibly a middle name or initial. A brief function extracts the first name, ``` def gender_name(name): """ Extracts and genders first name when the original name is formatted "Last, First M". Assumes a gender.Detector named `d` is already declared. """ first_name = name.split(", ")[-1] # grab the slide after the comma first_name = first_name.split(" ")[0] # remove middle name/initial gender = d.get_gender(first_name) return gender ``` This function can now be applied to the full name column. ``` df["Gender"] = df["Full Name"].apply(gender_name) df["Gender"].value_counts() ``` The results are the same as original gender column.
github_jupyter
version 1.0.3 #![Spark Logo](http://spark-mooc.github.io/web-assets/images/ta_Spark-logo-small.png) + ![Python Logo](http://spark-mooc.github.io/web-assets/images/python-logo-master-v3-TM-flattened_small.png) # **Text Analysis and Entity Resolution** ####Entity resolution is a common, yet difficult problem in data cleaning and integration. This lab will demonstrate how we can use Apache Spark to apply powerful and scalable text analysis techniques and perform entity resolution across two datasets of commercial products. #### Entity Resolution, or "[Record linkage][wiki]" is the term used by statisticians, epidemiologists, and historians, among others, to describe the process of joining records from one data source with another that describe the same entity. Our terms with the same meaning include, "entity disambiguation/linking", duplicate detection", "deduplication", "record matching", "(reference) reconciliation", "object identification", "data/information integration", and "conflation". #### Entity Resolution (ER) refers to the task of finding records in a dataset that refer to the same entity across different data sources (e.g., data files, books, websites, databases). ER is necessary when joining datasets based on entities that may or may not share a common identifier (e.g., database key, URI, National identification number), as may be the case due to differences in record shape, storage location, and/or curator style or preference. A dataset that has undergone ER may be referred to as being cross-linked. [wiki]: https://en.wikipedia.org/wiki/Record_linkage ### Code #### This assignment can be completed using basic Python, pySpark Transformations and actions, and the plotting library matplotlib. Other libraries are not allowed. ### Files #### Data files for this assignment are from the [metric-learning](https://code.google.com/p/metric-learning/) project and can be found at: `cs100/lab3` #### The directory contains the following files: * **Google.csv**, the Google Products dataset * **Amazon.csv**, the Amazon dataset * **Google_small.csv**, 200 records sampled from the Google data * **Amazon_small.csv**, 200 records sampled from the Amazon data * **Amazon_Google_perfectMapping.csv**, the "gold standard" mapping * **stopwords.txt**, a list of common English words #### Besides the complete data files, there are "sample" data files for each dataset - we will use these for **Part 1**. In addition, there is a "gold standard" file that contains all of the true mappings between entities in the two datasets. Every row in the gold standard file has a pair of record IDs (one Google, one Amazon) that belong to two record that describe the same thing in the real world. We will use the gold standard to evaluate our algorithms. ### **Part 0: Preliminaries** #### We read in each of the files and create an RDD consisting of lines. #### For each of the data files ("Google.csv", "Amazon.csv", and the samples), we want to parse the IDs out of each record. The IDs are the first column of the file (they are URLs for Google, and alphanumeric strings for Amazon). Omitting the headers, we load these data files into pair RDDs where the *mapping ID* is the key, and the value is a string consisting of the name/title, description, and manufacturer from the record. #### The file format of an Amazon line is: `"id","title","description","manufacturer","price"` #### The file format of a Google line is: `"id","name","description","manufacturer","price"` ``` import re DATAFILE_PATTERN = '^(.+),"(.+)",(.*),(.*),(.*)' def removeQuotes(s): """ Remove quotation marks from an input string Args: s (str): input string that might have the quote "" characters Returns: str: a string without the quote characters """ return ''.join(i for i in s if i!='"') def parseDatafileLine(datafileLine): """ Parse a line of the data file using the specified regular expression pattern Args: datafileLine (str): input string that is a line from the data file Returns: str: a string parsed using the given regular expression and without the quote characters """ match = re.search(DATAFILE_PATTERN, datafileLine) if match is None: print 'Invalid datafile line: %s' % datafileLine return (datafileLine, -1) elif match.group(1) == '"id"': print 'Header datafile line: %s' % datafileLine return (datafileLine, 0) else: product = '%s %s %s' % (match.group(2), match.group(3), match.group(4)) return ((removeQuotes(match.group(1)), product), 1) import sys import os from test_helper import Test baseDir = os.path.join('data') inputPath = os.path.join('cs100', 'lab3') GOOGLE_PATH = 'Google.csv' GOOGLE_SMALL_PATH = 'Google_small.csv' AMAZON_PATH = 'Amazon.csv' AMAZON_SMALL_PATH = 'Amazon_small.csv' GOLD_STANDARD_PATH = 'Amazon_Google_perfectMapping.csv' STOPWORDS_PATH = 'stopwords.txt' def parseData(filename): """ Parse a data file Args: filename (str): input file name of the data file Returns: RDD: a RDD of parsed lines """ return (sc .textFile(filename, 4, 0) .map(parseDatafileLine) .cache()) def loadData(path): """ Load a data file Args: path (str): input file name of the data file Returns: RDD: a RDD of parsed valid lines """ filename = os.path.join(baseDir, inputPath, path) raw = parseData(filename).cache() failed = (raw .filter(lambda s: s[1] == -1) .map(lambda s: s[0])) for line in failed.take(10): print '%s - Invalid datafile line: %s' % (path, line) valid = (raw .filter(lambda s: s[1] == 1) .map(lambda s: s[0]) .cache()) print '%s - Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (path, raw.count(), valid.count(), failed.count()) assert failed.count() == 0 assert raw.count() == (valid.count() + 1) return valid googleSmall = loadData(GOOGLE_SMALL_PATH) google = loadData(GOOGLE_PATH) amazonSmall = loadData(AMAZON_SMALL_PATH) amazon = loadData(AMAZON_PATH) ``` #### Let's examine the lines that were just loaded in the two subset (small) files - one from Google and one from Amazon ``` for line in googleSmall.take(3): print 'google: %s: %s\n' % (line[0], line[1]) for line in amazonSmall.take(3): print 'amazon: %s: %s\n' % (line[0], line[1]) ``` ### **Part 1: ER as Text Similarity - Bags of Words** #### A simple approach to entity resolution is to treat all records as strings and compute their similarity with a string distance function. In this part, we will build some components for performing bag-of-words text-analysis, and then use them to compute record similarity. #### [Bag-of-words][bag-of-words] is a conceptually simple yet powerful approach to text analysis. #### The idea is to treat strings, a.k.a. **documents**, as *unordered collections* of words, or **tokens**, i.e., as bags of words. > #### **Note on terminology**: a "token" is the result of parsing the document down to the elements we consider "atomic" for the task at hand. Tokens can be things like words, numbers, acronyms, or other exotica like word-roots or fixed-length character strings. > #### Bag of words techniques all apply to any sort of token, so when we say "bag-of-words" we really mean "bag-of-tokens," strictly speaking. #### Tokens become the atomic unit of text comparison. If we want to compare two documents, we count how many tokens they share in common. If we want to search for documents with keyword queries (this is what Google does), then we turn the keywords into tokens and find documents that contain them. The power of this approach is that it makes string comparisons insensitive to small differences that probably do not affect meaning much, for example, punctuation and word order. [bag-of-words]: https://en.wikipedia.org/wiki/Bag-of-words_model ### **1(a) Tokenize a String** #### Implement the function `simpleTokenize(string)` that takes a string and returns a list of non-empty tokens in the string. `simpleTokenize` should split strings using the provided regular expression. Since we want to make token-matching case insensitive, make sure all tokens are turned lower-case. Give an interpretation, in natural language, of what the regular expression, `split_regex`, matches. #### If you need help with Regular Expressions, try the site [regex101](https://regex101.com/) where you can interactively explore the results of applying different regular expressions to strings. *Note that \W includes the "_" character*. You should use [re.split()](https://docs.python.org/2/library/re.html#re.split) to perform the string split. Also, make sure you remove any empty tokens. ``` # TODO: Replace <FILL IN> with appropriate code quickbrownfox = 'A quick brown fox jumps over the lazy dog.' split_regex = r'\W+' def simpleTokenize(string): """ A simple implementation of input string tokenization Args: string (str): input string Returns: list: a list of tokens """ return [item for item in re.split(split_regex, string.lower()) if item] print simpleTokenize(quickbrownfox) # Should give ['a', 'quick', 'brown', ... ] # TEST Tokenize a String (1a) Test.assertEquals(simpleTokenize(quickbrownfox), ['a','quick','brown','fox','jumps','over','the','lazy','dog'], 'simpleTokenize should handle sample text') Test.assertEquals(simpleTokenize(' '), [], 'simpleTokenize should handle empty string') Test.assertEquals(simpleTokenize('!!!!123A/456_B/789C.123A'), ['123a','456_b','789c','123a'], 'simpleTokenize should handle puntuations and lowercase result') Test.assertEquals(simpleTokenize('fox fox'), ['fox', 'fox'], 'simpleTokenize should not remove duplicates') ``` ### **(1b) Removing stopwords** #### *[Stopwords][stopwords]* are common (English) words that do not contribute much to the content or meaning of a document (e.g., "the", "a", "is", "to", etc.). Stopwords add noise to bag-of-words comparisons, so they are usually excluded. #### Using the included file "stopwords.txt", implement `tokenize`, an improved tokenizer that does not emit stopwords. [stopwords]: https://en.wikipedia.org/wiki/Stop_words ``` # TODO: Replace <FILL IN> with appropriate code stopfile = os.path.join(baseDir, inputPath, STOPWORDS_PATH) stopwords = set(sc.textFile(stopfile).collect()) print 'These are the stopwords: %s' % stopwords def tokenize(string): """ An implementation of input string tokenization that excludes stopwords Args: string (str): input string Returns: list: a list of tokens without stopwords """ return [token for token in simpleTokenize(string) if token not in stopwords] print tokenize(quickbrownfox) # Should give ['quick', 'brown', ... ] # TEST Removing stopwords (1b) Test.assertEquals(tokenize("Why a the?"), [], 'tokenize should remove all stopwords') Test.assertEquals(tokenize("Being at the_?"), ['the_'], 'tokenize should handle non-stopwords') Test.assertEquals(tokenize(quickbrownfox), ['quick','brown','fox','jumps','lazy','dog'], 'tokenize should handle sample text') ``` ### **(1c) Tokenizing the small datasets** #### Now let's tokenize the two *small* datasets. For each ID in a dataset, `tokenize` the values, and then count the total number of tokens. #### How many tokens, total, are there in the two datasets? ``` # TODO: Replace <FILL IN> with appropriate code amazonRecToToken = amazonSmall.map(lambda x: (x[0], tokenize(x[1]))) googleRecToToken = googleSmall.map(lambda x: (x[0], tokenize(x[1]))) def countTokens(vendorRDD): """ Count and return the number of tokens Args: vendorRDD (RDD of (recordId, tokenizedValue)): Pair tuple of record ID to tokenized output Returns: count: count of all tokens """ return vendorRDD.map(lambda x: len(x[1])).sum() totalTokens = countTokens(amazonRecToToken) + countTokens(googleRecToToken) print 'There are %s tokens in the combined datasets' % totalTokens # TEST Tokenizing the small datasets (1c) Test.assertEquals(totalTokens, 22520, 'incorrect totalTokens') ``` ### **(1d) Amazon record with the most tokens** #### Which Amazon record has the biggest number of tokens? #### In other words, you want to sort the records and get the one with the largest count of tokens. ``` # TODO: Replace <FILL IN> with appropriate code def findBiggestRecord(vendorRDD): """ Find and return the record with the largest number of tokens Args: vendorRDD (RDD of (recordId, tokens)): input Pair Tuple of record ID and tokens Returns: list: a list of 1 Pair Tuple of record ID and tokens """ return vendorRDD.takeOrdered(1, lambda x: -len(x[1])) biggestRecordAmazon = findBiggestRecord(amazonRecToToken) print 'The Amazon record with ID "%s" has the most tokens (%s)' % (biggestRecordAmazon[0][0], len(biggestRecordAmazon[0][1])) # TEST Amazon record with the most tokens (1d) Test.assertEquals(biggestRecordAmazon[0][0], 'b000o24l3q', 'incorrect biggestRecordAmazon') Test.assertEquals(len(biggestRecordAmazon[0][1]), 1547, 'incorrect len for biggestRecordAmazon') ``` ### **Part 2: ER as Text Similarity - Weighted Bag-of-Words using TF-IDF** #### Bag-of-words comparisons are not very good when all tokens are treated the same: some tokens are more important than others. Weights give us a way to specify which tokens to favor. With weights, when we compare documents, instead of counting common tokens, we sum up the weights of common tokens. A good heuristic for assigning weights is called "Term-Frequency/Inverse-Document-Frequency," or [TF-IDF][tfidf] for short. #### **TF** #### TF rewards tokens that appear many times in the same document. It is computed as the frequency of a token in a document, that is, if document *d* contains 100 tokens and token *t* appears in *d* 5 times, then the TF weight of *t* in *d* is *5/100 = 1/20*. The intuition for TF is that if a word occurs often in a document, then it is more important to the meaning of the document. #### **IDF** #### IDF rewards tokens that are rare overall in a dataset. The intuition is that it is more significant if two documents share a rare word than a common one. IDF weight for a token, *t*, in a set of documents, *U*, is computed as follows: * #### Let *N* be the total number of documents in *U* * #### Find *n(t)*, the number of documents in *U* that contain *t* * #### Then *IDF(t) = N/n(t)*. #### Note that *n(t)/N* is the frequency of *t* in *U*, and *N/n(t)* is the inverse frequency. > #### **Note on terminology**: Sometimes token weights depend on the document the token belongs to, that is, the same token may have a different weight when it's found in different documents. We call these weights *local* weights. TF is an example of a local weight, because it depends on the length of the source. On the other hand, some token weights only depend on the token, and are the same everywhere that token is found. We call these weights *global*, and IDF is one such weight. #### **TF-IDF** #### Finally, to bring it all together, the total TF-IDF weight for a token in a document is the product of its TF and IDF weights. [tfidf]: https://en.wikipedia.org/wiki/Tf%E2%80%93idf ### **(2a) Implement a TF function** #### Implement `tf(tokens)` that takes a list of tokens and returns a Python [dictionary](https://docs.python.org/2/tutorial/datastructures.html#dictionaries) mapping tokens to TF weights. #### The steps your function should perform are: * #### Create an empty Python dictionary * #### For each of the tokens in the input `tokens` list, count 1 for each occurance and add the token to the dictionary * #### For each of the tokens in the dictionary, divide the token's count by the total number of tokens in the input `tokens` list ``` # TODO: Replace <FILL IN> with appropriate code from collections import Counter def tf(tokens): """ Compute TF Args: tokens (list of str): input list of tokens from tokenize Returns: dictionary: a dictionary of tokens to its TF values """ count = len(tokens) word_freq = Counter(tokens) return {key: float(value)/count for key, value in word_freq.items()} print tf(tokenize(quickbrownfox)) # Should give { 'quick': 0.1666 ... } # TEST Implement a TF function (2a) tf_test = tf(tokenize(quickbrownfox)) Test.assertEquals(tf_test, {'brown': 0.16666666666666666, 'lazy': 0.16666666666666666, 'jumps': 0.16666666666666666, 'fox': 0.16666666666666666, 'dog': 0.16666666666666666, 'quick': 0.16666666666666666}, 'incorrect result for tf on sample text') tf_test2 = tf(tokenize('one_ one_ two!')) Test.assertEquals(tf_test2, {'one_': 0.6666666666666666, 'two': 0.3333333333333333}, 'incorrect result for tf test') ``` ### **(2b) Create a corpus** #### Create a pair RDD called `corpusRDD`, consisting of a combination of the two small datasets, `amazonRecToToken` and `googleRecToToken`. Each element of the `corpusRDD` should be a pair consisting of a key from one of the small datasets (ID or URL) and the value is the associated value for that key from the small datasets. ``` # TODO: Replace <FILL IN> with appropriate code corpusRDD = amazonRecToToken.union(googleRecToToken) # TEST Create a corpus (2b) Test.assertEquals(corpusRDD.count(), 400, 'incorrect corpusRDD.count()') ``` ### **(2c) Implement an IDFs function** #### Implement `idfs` that assigns an IDF weight to every unique token in an RDD called `corpus`. The function should return an pair RDD where the `key` is the unique token and value is the IDF weight for the token. #### Recall that the IDF weight for a token, *t*, in a set of documents, *U*, is computed as follows: * #### Let *N* be the total number of documents in *U*. * #### Find *n(t)*, the number of documents in *U* that contain *t*. * #### Then *IDF(t) = N/n(t)*. #### The steps your function should perform are: * #### Calculate *N*. Think about how you can calculate *N* from the input RDD. * #### Create an RDD (*not a pair RDD*) containing the unique tokens from each document in the input `corpus`. For each document, you should only include a token once, *even if it appears multiple times in that document.* * #### For each of the unique tokens, count how many times it appears in the document and then compute the IDF for that token: *N/n(t)* #### Use your `idfs` to compute the IDF weights for all tokens in `corpusRDD` (the combined small datasets). #### How many unique tokens are there? ``` # TODO: Replace <FILL IN> with appropriate code def idfs(corpus): """ Compute IDF Args: corpus (RDD): input corpus Returns: RDD: a RDD of (token, IDF value) """ N = corpus.count() uniqueTokens = corpus.flatMap(lambda x: list(set(x[1]))) tokenCountPairTuple = uniqueTokens.map(lambda x: (x, 1)) tokenSumPairTuple = tokenCountPairTuple.reduceByKey(lambda a, b: a + b) return tokenSumPairTuple.map(lambda x: (x[0], float(N)/x[1])) idfsSmall = idfs(amazonRecToToken.union(googleRecToToken)) uniqueTokenCount = idfsSmall.count() print 'There are %s unique tokens in the small datasets.' % uniqueTokenCount # TEST Implement an IDFs function (2c) Test.assertEquals(uniqueTokenCount, 4772, 'incorrect uniqueTokenCount') tokenSmallestIdf = idfsSmall.takeOrdered(1, lambda s: s[1])[0] Test.assertEquals(tokenSmallestIdf[0], 'software', 'incorrect smallest IDF token') Test.assertTrue(abs(tokenSmallestIdf[1] - 4.25531914894) < 0.0000000001, 'incorrect smallest IDF value') ``` ### **(2d) Tokens with the smallest IDF** #### Print out the 11 tokens with the smallest IDF in the combined small dataset. ``` smallIDFTokens = idfsSmall.takeOrdered(11, lambda s: s[1]) print smallIDFTokens ``` ### **(2e) IDF Histogram** #### Plot a histogram of IDF values. Be sure to use appropriate scaling and bucketing for the data. #### First plot the histogram using `matplotlib` ``` import matplotlib.pyplot as plt small_idf_values = idfsSmall.map(lambda s: s[1]).collect() fig = plt.figure(figsize=(8,3)) plt.hist(small_idf_values, 50, log=True) pass ``` ### **(2f) Implement a TF-IDF function** #### Use your `tf` function to implement a `tfidf(tokens, idfs)` function that takes a list of tokens from a document and a Python dictionary of IDF weights and returns a Python dictionary mapping individual tokens to total TF-IDF weights. #### The steps your function should perform are: * #### Calculate the token frequencies (TF) for `tokens` * #### Create a Python dictionary where each token maps to the token's frequency times the token's IDF weight #### Use your `tfidf` function to compute the weights of Amazon product record 'b000hkgj8k'. To do this, we need to extract the record for the token from the tokenized small Amazon dataset and we need to convert the IDFs for the small dataset into a Python dictionary. We can do the first part, by using a `filter()` transformation to extract the matching record and a `collect()` action to return the value to the driver. For the second part, we use the [`collectAsMap()` action](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.collectAsMap) to return the IDFs to the driver as a Python dictionary. ``` # TODO: Replace <FILL IN> with appropriate code def tfidf(tokens, idfs): """ Compute TF-IDF Args: tokens (list of str): input list of tokens from tokenize idfs (dictionary): record to IDF value Returns: dictionary: a dictionary of records to TF-IDF values """ tfs = tf(tokens) tfIdfDict = dict((k, tfs[k] * idfs[k]) for k in tokens if k in idfs) return tfIdfDict recb000hkgj8k = amazonRecToToken.filter(lambda x: x[0] == 'b000hkgj8k').collect()[0][1] idfsSmallWeights = idfsSmall.collectAsMap() rec_b000hkgj8k_weights = tfidf(recb000hkgj8k, idfsSmallWeights) print 'Amazon record "b000hkgj8k" has tokens and weights:\n%s' % rec_b000hkgj8k_weights # TEST Implement a TF-IDF function (2f) Test.assertEquals(rec_b000hkgj8k_weights, {'autocad': 33.33333333333333, 'autodesk': 8.333333333333332, 'courseware': 66.66666666666666, 'psg': 33.33333333333333, '2007': 3.5087719298245617, 'customizing': 16.666666666666664, 'interface': 3.0303030303030303}, 'incorrect rec_b000hkgj8k_weights') ``` ### **Part 3: ER as Text Similarity - Cosine Similarity** #### Now we are ready to do text comparisons in a formal way. The metric of string distance we will use is called **[cosine similarity][cosine]**. We will treat each document as a vector in some high dimensional space. Then, to compare two documents we compute the cosine of the angle between their two document vectors. This is *much* easier than it sounds. #### The first question to answer is how do we represent documents as vectors? The answer is familiar: bag-of-words! We treat each unique token as a dimension, and treat token weights as magnitudes in their respective token dimensions. For example, suppose we use simple counts as weights, and we want to interpret the string "Hello, world! Goodbye, world!" as a vector. Then in the "hello" and "goodbye" dimensions the vector has value 1, in the "world" dimension it has value 2, and it is zero in all other dimensions. #### The next question is: given two vectors how do we find the cosine of the angle between them? Recall the formula for the dot product of two vectors: #### $$ a \cdot b = \| a \| \| b \| \cos \theta $$ #### Here $ a \cdot b = \sum a_i b_i $ is the ordinary dot product of two vectors, and $ \|a\| = \sqrt{ \sum a_i^2 } $ is the norm of $ a $. #### We can rearrange terms and solve for the cosine to find it is simply the normalized dot product of the vectors. With our vector model, the dot product and norm computations are simple functions of the bag-of-words document representations, so we now have a formal way to compute similarity: #### $$ similarity = \cos \theta = \frac{a \cdot b}{\|a\| \|b\|} = \frac{\sum a_i b_i}{\sqrt{\sum a_i^2} \sqrt{\sum b_i^2}} $$ #### Setting aside the algebra, the geometric interpretation is more intuitive. The angle between two document vectors is small if they share many tokens in common, because they are pointing in roughly the same direction. For that case, the cosine of the angle will be large. Otherwise, if the angle is large (and they have few words in common), the cosine is small. Therefore, cosine similarity scales proportionally with our intuitive sense of similarity. [cosine]: https://en.wikipedia.org/wiki/Cosine_similarity ### **(3a) Implement the components of a `cosineSimilarity` function** #### Implement the components of a `cosineSimilarity` function. #### Use the `tokenize` and `tfidf` functions, and the IDF weights from Part 2 for extracting tokens and assigning them weights. #### The steps you should perform are: * #### Define a function `dotprod` that takes two Python dictionaries and produces the dot product of them, where the dot product is defined as the sum of the product of values for tokens that appear in *both* dictionaries * #### Define a function `norm` that returns the square root of the dot product of a dictionary and itself * #### Define a function `cossim` that returns the dot product of two dictionaries divided by the norm of the first dictionary and then by the norm of the second dictionary ``` # TODO: Replace <FILL IN> with appropriate code import math def dotprod(a, b): """ Compute dot product Args: a (dictionary): first dictionary of record to value b (dictionary): second dictionary of record to value Returns: dotProd: result of the dot product with the two input dictionaries """ return sum(a[k] * b[k] for k in a.keys() if k in b.keys()) def norm(a): """ Compute square root of the dot product Args: a (dictionary): a dictionary of record to value Returns: norm: a dictionary of tokens to its TF values """ return math.sqrt(dotprod(a,a)) def cossim(a, b): """ Compute cosine similarity Args: a (dictionary): first dictionary of record to value b (dictionary): second dictionary of record to value Returns: cossim: dot product of two dictionaries divided by the norm of the first dictionary and then by the norm of the second dictionary """ return dotprod(a,b)/(norm(a) * norm(b)) testVec1 = {'foo': 2, 'bar': 3, 'baz': 5 } testVec2 = {'foo': 1, 'bar': 0, 'baz': 20 } dp = dotprod(testVec1, testVec2) nm = norm(testVec1) print dp, nm # TEST Implement the components of a cosineSimilarity function (3a) Test.assertEquals(dp, 102, 'incorrect dp') Test.assertTrue(abs(nm - 6.16441400297) < 0.0000001, 'incorrrect nm') ``` ### **(3b) Implement a `cosineSimilarity` function** #### Implement a `cosineSimilarity(string1, string2, idfsDictionary)` function that takes two strings and a dictionary of IDF weights, and computes their cosine similarity in the context of some global IDF weights. #### The steps you should perform are: * #### Apply your `tfidf` function to the tokenized first and second strings, using the dictionary of IDF weights * #### Compute and return your `cossim` function applied to the results of the two `tfidf` functions ``` # TODO: Replace <FILL IN> with appropriate code def cosineSimilarity(string1, string2, idfsDictionary): """ Compute cosine similarity between two strings Args: string1 (str): first string string2 (str): second string idfsDictionary (dictionary): a dictionary of IDF values Returns: cossim: cosine similarity value """ w1 = tfidf(tokenize(string1), idfsDictionary) w2 = tfidf(tokenize(string2), idfsDictionary) return cossim(w1, w2) cossimAdobe = cosineSimilarity('Adobe Photoshop', 'Adobe Illustrator', idfsSmallWeights) print cossimAdobe # TEST Implement a cosineSimilarity function (3b) Test.assertTrue(abs(cossimAdobe - 0.0577243382163) < 0.0000001, 'incorrect cossimAdobe') ``` ### **(3c) Perform Entity Resolution** #### Now we can finally do some entity resolution! #### For *every* product record in the small Google dataset, use your `cosineSimilarity` function to compute its similarity to every record in the small Amazon dataset. Then, build a dictionary mapping `(Google URL, Amazon ID)` tuples to similarity scores between 0 and 1. #### We'll do this computation two different ways, first we'll do it without a broadcast variable, and then we'll use a broadcast variable #### The steps you should perform are: * #### Create an RDD that is a combination of the small Google and small Amazon datasets that has as elements all pairs of elements (a, b) where a is in self and b is in other. The result will be an RDD of the form: `[ ((Google URL1, Google String1), (Amazon ID1, Amazon String1)), ((Google URL1, Google String1), (Amazon ID2, Amazon String2)), ((Google URL2, Google String2), (Amazon ID1, Amazon String1)), ... ]` * #### Define a worker function that given an element from the combination RDD computes the cosineSimlarity for the two records in the element * #### Apply the worker function to every element in the RDD #### Now, compute the similarity between Amazon record `b000o24l3q` and Google record `http://www.google.com/base/feeds/snippets/17242822440574356561`. ``` # TODO: Replace <FILL IN> with appropriate code crossSmall = (googleSmall .cartesian(amazonSmall) .cache()) def computeSimilarity(record): """ Compute similarity on a combination record Args: record: a pair, (google record, amazon record) Returns: pair: a pair, (google URL, amazon ID, cosine similarity value) """ googleRec = record[0] amazonRec = record[1] googleURL = googleRec[0] amazonID = amazonRec[0] googleValue = googleRec[1] amazonValue = amazonRec[1] cs = cosineSimilarity(googleValue, amazonValue, idfsSmallWeights) return (googleURL, amazonID, cs) similarities = (crossSmall .map(computeSimilarity) .cache()) def similar(amazonID, googleURL): """ Return similarity value Args: amazonID: amazon ID googleURL: google URL Returns: similar: cosine similarity value """ return (similarities .filter(lambda record: (record[0] == googleURL and record[1] == amazonID)) .collect()[0][2]) similarityAmazonGoogle = similar('b000o24l3q', 'http://www.google.com/base/feeds/snippets/17242822440574356561') print 'Requested similarity is %s.' % similarityAmazonGoogle # TEST Perform Entity Resolution (3c) Test.assertTrue(abs(similarityAmazonGoogle - 0.000303171940451) < 0.0000001, 'incorrect similarityAmazonGoogle') ``` ### **(3d) Perform Entity Resolution with Broadcast Variables** #### The solution in (3c) works well for small datasets, but it requires Spark to (automatically) send the `idfsSmallWeights` variable to all the workers. If we didn't `cache()` similarities, then it might have to be recreated if we run `similar()` multiple times. This would cause Spark to send `idfsSmallWeights` every time. #### Instead, we can use a broadcast variable - we define the broadcast variable in the driver and then we can refer to it in each worker. Spark saves the broadcast variable at each worker, so it is only sent once. #### The steps you should perform are: * #### Define a `computeSimilarityBroadcast` function that given an element from the combination RDD computes the cosine simlarity for the two records in the element. This will be the same as the worker function `computeSimilarity` in (3c) except that it uses a broadcast variable. * #### Apply the worker function to every element in the RDD #### Again, compute the similarity between Amazon record `b000o24l3q` and Google record `http://www.google.com/base/feeds/snippets/17242822440574356561`. ``` # TODO: Replace <FILL IN> with appropriate code def computeSimilarityBroadcast(record): """ Compute similarity on a combination record, using Broadcast variable Args: record: a pair, (google record, amazon record) Returns: pair: a pair, (google URL, amazon ID, cosine similarity value) """ googleRec = record[0] amazonRec = record[1] googleURL = googleRec[0] amazonID = amazonRec[0] googleValue = googleRec[1] amazonValue = amazonRec[1] cs = cosineSimilarity(googleValue, amazonValue, idfsSmallBroadcast.value) return (googleURL, amazonID, cs) idfsSmallBroadcast = sc.broadcast(idfsSmallWeights) similaritiesBroadcast = (crossSmall .map(computeSimilarity) .cache()) def similarBroadcast(amazonID, googleURL): """ Return similarity value, computed using Broadcast variable Args: amazonID: amazon ID googleURL: google URL Returns: similar: cosine similarity value """ return (similaritiesBroadcast .filter(lambda record: (record[0] == googleURL and record[1] == amazonID)) .collect()[0][2]) similarityAmazonGoogleBroadcast = similarBroadcast('b000o24l3q', 'http://www.google.com/base/feeds/snippets/17242822440574356561') print 'Requested similarity is %s.' % similarityAmazonGoogleBroadcast # TEST Perform Entity Resolution with Broadcast Variables (3d) from pyspark import Broadcast Test.assertTrue(isinstance(idfsSmallBroadcast, Broadcast), 'incorrect idfsSmallBroadcast') Test.assertEquals(len(idfsSmallBroadcast.value), 4772, 'incorrect idfsSmallBroadcast value') Test.assertTrue(abs(similarityAmazonGoogleBroadcast - 0.000303171940451) < 0.0000001, 'incorrect similarityAmazonGoogle') ``` ### **(3e) Perform a Gold Standard evaluation** #### First, we'll load the "gold standard" data and use it to answer several questions. We read and parse the Gold Standard data, where the format of each line is "Amazon Product ID","Google URL". The resulting RDD has elements of the form ("AmazonID GoogleURL", 'gold') ``` GOLDFILE_PATTERN = '^(.+),(.+)' # Parse each line of a data file useing the specified regular expression pattern def parse_goldfile_line(goldfile_line): """ Parse a line from the 'golden standard' data file Args: goldfile_line: a line of data Returns: pair: ((key, 'gold', 1 if successful or else 0)) """ match = re.search(GOLDFILE_PATTERN, goldfile_line) if match is None: print 'Invalid goldfile line: %s' % goldfile_line return (goldfile_line, -1) elif match.group(1) == '"idAmazon"': print 'Header datafile line: %s' % goldfile_line return (goldfile_line, 0) else: key = '%s %s' % (removeQuotes(match.group(1)), removeQuotes(match.group(2))) return ((key, 'gold'), 1) goldfile = os.path.join(baseDir, inputPath, GOLD_STANDARD_PATH) gsRaw = (sc .textFile(goldfile) .map(parse_goldfile_line) .cache()) gsFailed = (gsRaw .filter(lambda s: s[1] == -1) .map(lambda s: s[0])) for line in gsFailed.take(10): print 'Invalid goldfile line: %s' % line goldStandard = (gsRaw .filter(lambda s: s[1] == 1) .map(lambda s: s[0]) .cache()) print 'Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (gsRaw.count(), goldStandard.count(), gsFailed.count()) assert (gsFailed.count() == 0) assert (gsRaw.count() == (goldStandard.count() + 1)) ``` ### Using the "gold standard" data we can answer the following questions: * #### How many true duplicate pairs are there in the small datasets? * #### What is the average similarity score for true duplicates? * #### What about for non-duplicates? #### The steps you should perform are: * #### Create a new `sims` RDD from the `similaritiesBroadcast` RDD, where each element consists of a pair of the form ("AmazonID GoogleURL", cosineSimilarityScore). An example entry from `sims` is: ('b000bi7uqs http://www.google.com/base/feeds/snippets/18403148885652932189', 0.40202896125621296) * #### Combine the `sims` RDD with the `goldStandard` RDD by creating a new `trueDupsRDD` RDD that has the just the cosine similarity scores for those "AmazonID GoogleURL" pairs that appear in both the `sims` RDD and `goldStandard` RDD. Hint: you can do this using the join() transformation. * #### Count the number of true duplicate pairs in the `trueDupsRDD` dataset * #### Compute the average similarity score for true duplicates in the `trueDupsRDD` datasets. Remember to use `float` for calculation * #### Create a new `nonDupsRDD` RDD that has the just the cosine similarity scores for those "AmazonID GoogleURL" pairs from the `similaritiesBroadcast` RDD that **do not** appear in both the *sims* RDD and gold standard RDD. * #### Compute the average similarity score for non-duplicates in the last datasets. Remember to use `float` for calculation ``` # TODO: Replace <FILL IN> with appropriate code sims = similaritiesBroadcast.map(lambda x: (x[1] + " " + x[0], x[2])) trueDupsRDD = (sims .join(goldStandard).map(lambda x: (x[0], x[1][0]))) trueDupsCount = trueDupsRDD.count() avgSimDups = trueDupsRDD.map(lambda x: x[1]).sum()/float(trueDupsCount) nonDupsRDD = (sims .leftOuterJoin(goldStandard).filter(lambda x: x[1][1] == None).map(lambda x: (x[0], x[1][0]))) avgSimNon = nonDupsRDD.map(lambda x: x[1]).sum()/float(nonDupsRDD.count()) print 'There are %s true duplicates.' % trueDupsCount print 'The average similarity of true duplicates is %s.' % avgSimDups print 'And for non duplicates, it is %s.' % avgSimNon # TEST Perform a Gold Standard evaluation (3e) Test.assertEquals(trueDupsCount, 146, 'incorrect trueDupsCount') Test.assertTrue(abs(avgSimDups - 0.264332573435) < 0.0000001, 'incorrect avgSimDups') Test.assertTrue(abs(avgSimNon - 0.00123476304656) < 0.0000001, 'incorrect avgSimNon') ``` ### **Part 4: Scalable ER** #### In the previous parts, we built a text similarity function and used it for small scale entity resolution. Our implementation is limited by its quadratic run time complexity, and is not practical for even modestly sized datasets. In this part, we will implement a more scalable algorithm and use it to do entity resolution on the full dataset. ### Inverted Indices #### To improve our ER algorithm from the earlier parts, we should begin by analyzing its running time. In particular, the algorithm above is quadratic in two ways. First, we did a lot of redundant computation of tokens and weights, since each record was reprocessed every time it was compared. Second, we made quadratically many token comparisons between records. #### The first source of quadratic overhead can be eliminated with precomputation and look-up tables, but the second source is a little more tricky. In the worst case, every token in every record in one dataset exists in every record in the other dataset, and therefore every token makes a non-zero contribution to the cosine similarity. In this case, token comparison is unavoidably quadratic. #### But in reality most records have nothing (or very little) in common. Moreover, it is typical for a record in one dataset to have at most one duplicate record in the other dataset (this is the case assuming each dataset has been de-duplicated against itself). In this case, the output is linear in the size of the input and we can hope to achieve linear running time. #### An [**inverted index**](https://en.wikipedia.org/wiki/Inverted_index) is a data structure that will allow us to avoid making quadratically many token comparisons. It maps each token in the dataset to the list of documents that contain the token. So, instead of comparing, record by record, each token to every other token to see if they match, we will use inverted indices to *look up* records that match on a particular token. > #### **Note on terminology**: In text search, a *forward* index maps documents in a dataset to the tokens they contain. An *inverted* index supports the inverse mapping. > #### **Note**: For this section, use the complete Google and Amazon datasets, not the samples ### **(4a) Tokenize the full dataset** #### Tokenize each of the two full datasets for Google and Amazon. ``` # TODO: Replace <FILL IN> with appropriate code amazonFullRecToToken = amazon.map(lambda x: (x[0], tokenize(x[1]))) googleFullRecToToken = google.map(lambda x: (x[0], tokenize(x[1]))) print 'Amazon full dataset is %s products, Google full dataset is %s products' % (amazonFullRecToToken.count(), googleFullRecToToken.count()) # TEST Tokenize the full dataset (4a) Test.assertEquals(amazonFullRecToToken.count(), 1363, 'incorrect amazonFullRecToToken.count()') Test.assertEquals(googleFullRecToToken.count(), 3226, 'incorrect googleFullRecToToken.count()') ``` ### **(4b) Compute IDFs and TF-IDFs for the full datasets** #### We will reuse your code from above to compute IDF weights for the complete combined datasets. #### The steps you should perform are: * #### Create a new `fullCorpusRDD` that contains the tokens from the full Amazon and Google datasets. * #### Apply your `idfs` function to the `fullCorpusRDD` * #### Create a broadcast variable containing a dictionary of the IDF weights for the full dataset. * #### For each of the Amazon and Google full datasets, create weight RDDs that map IDs/URLs to TF-IDF weighted token vectors. ``` # TODO: Replace <FILL IN> with appropriate code fullCorpusRDD = amazonFullRecToToken.union(googleFullRecToToken) idfsFull = idfs(fullCorpusRDD) idfsFullCount = idfsFull.count() print 'There are %s unique tokens in the full datasets.' % idfsFullCount # Recompute IDFs for full dataset idfsFullWeights = idfsFull.collectAsMap() idfsFullBroadcast = sc.broadcast(idfsFullWeights) # Pre-compute TF-IDF weights. Build mappings from record ID weight vector. amazonWeightsRDD = amazonFullRecToToken.map(lambda x: (x[0], tfidf(x[1],idfsFullBroadcast.value))) googleWeightsRDD = googleFullRecToToken.map(lambda x: (x[0], tfidf(x[1],idfsFullBroadcast.value))) print 'There are %s Amazon weights and %s Google weights.' % (amazonWeightsRDD.count(), googleWeightsRDD.count()) # TEST Compute IDFs and TF-IDFs for the full datasets (4b) Test.assertEquals(idfsFullCount, 17078, 'incorrect idfsFullCount') Test.assertEquals(amazonWeightsRDD.count(), 1363, 'incorrect amazonWeightsRDD.count()') Test.assertEquals(googleWeightsRDD.count(), 3226, 'incorrect googleWeightsRDD.count()') ``` ### **(4c) Compute Norms for the weights from the full datasets** #### We will reuse your code from above to compute norms of the IDF weights for the complete combined dataset. #### The steps you should perform are: * #### Create two collections, one for each of the full Amazon and Google datasets, where IDs/URLs map to the norm of the associated TF-IDF weighted token vectors. * #### Convert each collection into a broadcast variable, containing a dictionary of the norm of IDF weights for the full dataset ``` # TODO: Replace <FILL IN> with appropriate code amazonNorms = amazonWeightsRDD.map(lambda x: (x[0], norm(x[1]))) amazonNormsBroadcast = sc.broadcast(amazonNorms.collectAsMap()) googleNorms = googleWeightsRDD.map(lambda x: (x[0], norm(x[1]))) googleNormsBroadcast = sc.broadcast(googleNorms.collectAsMap()) # TEST Compute Norms for the weights from the full datasets (4c) Test.assertTrue(isinstance(amazonNormsBroadcast, Broadcast), 'incorrect amazonNormsBroadcast') Test.assertEquals(len(amazonNormsBroadcast.value), 1363, 'incorrect amazonNormsBroadcast.value') Test.assertTrue(isinstance(googleNormsBroadcast, Broadcast), 'incorrect googleNormsBroadcast') Test.assertEquals(len(googleNormsBroadcast.value), 3226, 'incorrect googleNormsBroadcast.value') ``` ### **(4d) Create inverted indicies from the full datasets** #### Build inverted indices of both data sources. #### The steps you should perform are: * #### Create an invert function that given a pair of (ID/URL, TF-IDF weighted token vector), returns a list of pairs of (token, ID/URL). Recall that the TF-IDF weighted token vector is a Python dictionary with keys that are tokens and values that are weights. * #### Use your invert function to convert the full Amazon and Google TF-IDF weighted token vector datasets into two RDDs where each element is a pair of a token and an ID/URL that contain that token. These are inverted indicies. ``` # TODO: Replace <FILL IN> with appropriate code def invert(record): """ Invert (ID, tokens) to a list of (token, ID) Args: record: a pair, (ID, token vector) Returns: pairs: a list of pairs of token to ID """ pairs = [(token, record[0]) for token in record[1]] return pairs amazonInvPairsRDD = (amazonWeightsRDD .flatMap(invert) .cache()) googleInvPairsRDD = (googleWeightsRDD .flatMap(invert) .cache()) print 'There are %s Amazon inverted pairs and %s Google inverted pairs.' % (amazonInvPairsRDD.count(), googleInvPairsRDD.count()) # TEST Create inverted indicies from the full datasets (4d) invertedPair = invert((1, {'foo': 2})) Test.assertEquals(invertedPair[0][1], 1, 'incorrect invert result') Test.assertEquals(amazonInvPairsRDD.count(), 111387, 'incorrect amazonInvPairsRDD.count()') Test.assertEquals(googleInvPairsRDD.count(), 77678, 'incorrect googleInvPairsRDD.count()') ``` ### **(4e) Identify common tokens from the full dataset** #### We are now in position to efficiently perform ER on the full datasets. Implement the following algorithm to build an RDD that maps a pair of (ID, URL) to a list of tokens they share in common: * #### Using the two inverted indicies (RDDs where each element is a pair of a token and an ID or URL that contains that token), create a new RDD that contains only tokens that appear in both datasets. This will yield an RDD of pairs of (token, iterable(ID, URL)). * #### We need a mapping from (ID, URL) to token, so create a function that will swap the elements of the RDD you just created to create this new RDD consisting of ((ID, URL), token) pairs. * #### Finally, create an RDD consisting of pairs mapping (ID, URL) to all the tokens the pair shares in common ``` # TODO: Replace <FILL IN> with appropriate code def swap(record): """ Swap (token, (ID, URL)) to ((ID, URL), token) Args: record: a pair, (token, (ID, URL)) Returns: pair: ((ID, URL), token) """ token = record[0] keys = record[1] return (keys, token) commonTokens = (amazonInvPairsRDD .join(googleInvPairsRDD).map(swap).groupByKey() .cache()) print 'Found %d common tokens' % commonTokens.count() # TEST Identify common tokens from the full dataset (4e) Test.assertEquals(commonTokens.count(), 2441100, 'incorrect commonTokens.count()') ``` ### **(4f) Identify common tokens from the full dataset** #### Use the data structures from parts **(4a)** and **(4e)** to build a dictionary to map record pairs to cosine similarity scores. #### The steps you should perform are: * #### Create two broadcast dictionaries from the amazonWeights and googleWeights RDDs * #### Create a `fastCosinesSimilarity` function that takes in a record consisting of the pair ((Amazon ID, Google URL), tokens list) and computes the sum for each of the tokens in the token list of the products of the Amazon weight for the token times the Google weight for the token. The sum should then be divided by the norm for the Google URL and then divided by the norm for the Amazon ID. The function should return this value in a pair with the key being the (Amazon ID, Google URL). *Make sure you use broadcast variables you created for both the weights and norms* * #### Apply your `fastCosinesSimilarity` function to the common tokens from the full dataset ``` # TODO: Replace <FILL IN> with appropriate code amazonWeightsBroadcast = sc.broadcast(amazonWeightsRDD.collectAsMap()) googleWeightsBroadcast = sc.broadcast(googleWeightsRDD.collectAsMap()) def fastCosineSimilarity(record): """ Compute Cosine Similarity using Broadcast variables Args: record: ((ID, URL), token) Returns: pair: ((ID, URL), cosine similarity value) """ amazonRec = record[0][0] googleRec = record[0][1] tokens = record[1] s = sum(amazonWeightsBroadcast.value[amazonRec][i] * googleWeightsBroadcast.value[googleRec][i] for i in tokens) value = s/(amazonNormsBroadcast.value[amazonRec] * googleNormsBroadcast.value[googleRec]) key = (amazonRec, googleRec) return (key, value) similaritiesFullRDD = (commonTokens .map(fastCosineSimilarity) .cache()) print similaritiesFullRDD.count() # TEST Identify common tokens from the full dataset (4f) similarityTest = similaritiesFullRDD.filter(lambda ((aID, gURL), cs): aID == 'b00005lzly' and gURL == 'http://www.google.com/base/feeds/snippets/13823221823254120257').collect() Test.assertEquals(len(similarityTest), 1, 'incorrect len(similarityTest)') Test.assertTrue(abs(similarityTest[0][1] - 4.286548414e-06) < 0.000000000001, 'incorrect similarityTest fastCosineSimilarity') Test.assertEquals(similaritiesFullRDD.count(), 2441100, 'incorrect similaritiesFullRDD.count()') ``` ### **Part 5: Analysis** #### Now we have an authoritative list of record-pair similarities, but we need a way to use those similarities to decide if two records are duplicates or not. The simplest approach is to pick a **threshold**. Pairs whose similarity is above the threshold are declared duplicates, and pairs below the threshold are declared distinct. #### To decide where to set the threshold we need to understand what kind of errors result at different levels. If we set the threshold too low, we get more **false positives**, that is, record-pairs we say are duplicates that in reality are not. If we set the threshold too high, we get more **false negatives**, that is, record-pairs that really are duplicates but that we miss. #### ER algorithms are evaluated by the common metrics of information retrieval and search called **precision** and **recall**. Precision asks of all the record-pairs marked duplicates, what fraction are true duplicates? Recall asks of all the true duplicates in the data, what fraction did we successfully find? As with false positives and false negatives, there is a trade-off between precision and recall. A third metric, called **F-measure**, takes the harmonic mean of precision and recall to measure overall goodness in a single value: #### $$ Fmeasure = 2 \frac{precision * recall}{precision + recall} $$ > #### **Note**: In this part, we use the "gold standard" mapping from the included file to look up true duplicates, and the results of Part 4. > #### **Note**: In this part, you will not be writing any code. We've written all of the code for you. Run each cell and then answer the quiz questions on Studio. ### **(5a) Counting True Positives, False Positives, and False Negatives** #### We need functions that count True Positives (true duplicates above the threshold), and False Positives and False Negatives: * #### We start with creating the `simsFullRDD` from our `similaritiesFullRDD` that consists of a pair of ((Amazon ID, Google URL), simlarity score) * #### From this RDD, we create an RDD consisting of only the similarity scores * #### To look up the similarity scores for true duplicates, we perform a left outer join using the `goldStandard` RDD and `simsFullRDD` and extract the ``` # Create an RDD of ((Amazon ID, Google URL), similarity score) simsFullRDD = similaritiesFullRDD.map(lambda x: ("%s %s" % (x[0][0], x[0][1]), x[1])) assert (simsFullRDD.count() == 2441100) # Create an RDD of just the similarity scores simsFullValuesRDD = (simsFullRDD .map(lambda x: x[1]) .cache()) assert (simsFullValuesRDD.count() == 2441100) # Look up all similarity scores for true duplicates # This helper function will return the similarity score for records that are in the gold standard and the simsFullRDD (True positives), and will return 0 for records that are in the gold standard but not in simsFullRDD (False Negatives). def gs_value(record): if (record[1][1] is None): return 0 else: return record[1][1] # Join the gold standard and simsFullRDD, and then extract the similarities scores using the helper function trueDupSimsRDD = (goldStandard .leftOuterJoin(simsFullRDD) .map(gs_value) .cache()) print 'There are %s true duplicates.' % trueDupSimsRDD.count() assert(trueDupSimsRDD.count() == 1300) ``` #### The next step is to pick a threshold between 0 and 1 for the count of True Positives (true duplicates above the threshold). However, we would like to explore many different thresholds. To do this, we divide the space of thresholds into 100 bins, and take the following actions: * #### We use Spark Accumulators to implement our counting function. We define a custom accumulator type, `VectorAccumulatorParam`, along with functions to initialize the accumulator's vector to zero, and to add two vectors. Note that we have to use the += operator because you can only add to an accumulator. * #### We create a helper function to create a list with one entry (bit) set to a value and all others set to 0. * #### We create 101 bins for the 100 threshold values between 0 and 1. * #### Now, for each similarity score, we can compute the false positives. We do this by adding each similarity score to the appropriate bin of the vector. Then we remove true positives from the vector by using the gold standard data. * #### We define functions for computing false positive and negative and true positives, for a given threshold. ``` from pyspark.accumulators import AccumulatorParam class VectorAccumulatorParam(AccumulatorParam): # Initialize the VectorAccumulator to 0 def zero(self, value): return [0] * len(value) # Add two VectorAccumulator variables def addInPlace(self, val1, val2): for i in xrange(len(val1)): val1[i] += val2[i] return val1 # Return a list with entry x set to value and all other entries set to 0 def set_bit(x, value, length): bits = [] for y in xrange(length): if (x == y): bits.append(value) else: bits.append(0) return bits # Pre-bin counts of false positives for different threshold ranges BINS = 101 nthresholds = 100 def bin(similarity): return int(similarity * nthresholds) # fpCounts[i] = number of entries (possible false positives) where bin(similarity) == i zeros = [0] * BINS fpCounts = sc.accumulator(zeros, VectorAccumulatorParam()) def add_element(score): global fpCounts b = bin(score) fpCounts += set_bit(b, 1, BINS) simsFullValuesRDD.foreach(add_element) # Remove true positives from FP counts def sub_element(score): global fpCounts b = bin(score) fpCounts += set_bit(b, -1, BINS) trueDupSimsRDD.foreach(sub_element) def falsepos(threshold): fpList = fpCounts.value return sum([fpList[b] for b in range(0, BINS) if float(b) / nthresholds >= threshold]) def falseneg(threshold): return trueDupSimsRDD.filter(lambda x: x < threshold).count() def truepos(threshold): return trueDupSimsRDD.count() - falsenegDict[threshold] ``` ### **(5b) Precision, Recall, and F-measures** #### We define functions so that we can compute the [Precision][precision-recall], [Recall][precision-recall], and [F-measure][f-measure] as a function of threshold value: * #### Precision = true-positives / (true-positives + false-positives) * #### Recall = true-positives / (true-positives + false-negatives) * #### F-measure = 2 x Recall x Precision / (Recall + Precision) [precision-recall]: https://en.wikipedia.org/wiki/Precision_and_recall [f-measure]: https://en.wikipedia.org/wiki/Precision_and_recall#F-measure ``` # Precision = true-positives / (true-positives + false-positives) # Recall = true-positives / (true-positives + false-negatives) # F-measure = 2 x Recall x Precision / (Recall + Precision) def precision(threshold): tp = trueposDict[threshold] return float(tp) / (tp + falseposDict[threshold]) def recall(threshold): tp = trueposDict[threshold] return float(tp) / (tp + falsenegDict[threshold]) def fmeasure(threshold): r = recall(threshold) p = precision(threshold) return 2 * r * p / (r + p) ``` ### **(5c) Line Plots** #### We can make line plots of precision, recall, and F-measure as a function of threshold value, for thresholds between 0.0 and 1.0. You can change `nthresholds` (above in part **(5a)**) to change the threshold values to plot. ``` thresholds = [float(n) / nthresholds for n in range(0, nthresholds)] falseposDict = dict([(t, falsepos(t)) for t in thresholds]) falsenegDict = dict([(t, falseneg(t)) for t in thresholds]) trueposDict = dict([(t, truepos(t)) for t in thresholds]) precisions = [precision(t) for t in thresholds] recalls = [recall(t) for t in thresholds] fmeasures = [fmeasure(t) for t in thresholds] print precisions[0], fmeasures[0] assert (abs(precisions[0] - 0.000532546802671) < 0.0000001) assert (abs(fmeasures[0] - 0.00106452669505) < 0.0000001) fig = plt.figure() plt.plot(thresholds, precisions) plt.plot(thresholds, recalls) plt.plot(thresholds, fmeasures) plt.legend(['Precision', 'Recall', 'F-measure']) pass ``` ### Discussion #### State-of-the-art tools can get an F-measure of about 60% on this dataset. In this lab exercise, our best F-measure is closer to 40%. Look at some examples of errors (both False Positives and False Negatives) and think about what went wrong. ### There are several ways we might improve our simple classifier, including: #### * Using additional attributes #### * Performing better featurization of our textual data (e.g., stemming, n-grams, etc.) #### * Using different similarity functions
github_jupyter
# Representación y visualización de datos El aprendizaje automático trata de ajustar modelos a los datos; por esta razón, empezaremos discutiendo como los datos pueden ser representados para ser accesibles por el ordenador. Además de esto, nos basaremos en los ejemplos de matplotlib de la sección anterior para usarlos para representar datos. ## Datos en scikit-learn Los datos en scikit-learn, salvo algunas excepciones, suelen estar almacenados en **arrays de 2 dimensiones**, con forma `[n_samples, n_features]`. Muchos algoritmos aceptan también matrices ``scipy.sparse`` con la misma forma. - **n_samples:** este es el número de ejemplos. Cada ejemplo es un item a procesar (por ejemplo, clasificar). Un ejemplo puede ser un documento, una imagen, un sonido, un vídeo, un objeto astronómico, una fila de una base de datos o de un fichero CSV, o cualquier cosa que se pueda describir usando un conjunto prefijado de trazas cuantitativas. - **n_features:** este es el número de características descriptoras que se utilizan para describir cada item de forma cuantitativa. Las características son, generalmente, valores reales, aunque pueden ser categóricas o valores discretos. El número de características debe ser fijado de antemano. Sin embargo, puede ser extremadamente alto (por ejemplo, millones de características), siendo cero en la mayoría de casos. En este tipo de datos, es buena idea usar matrices `scipy.sparse` que manejan mucho mejor la memoria. Como ya comentamos en la sección anterior, representamos los ejemplos (puntos o instancias) como filas en el array de datos y almacenamos las características correspondientes, las "dimensiones", como columnas. ### Un ejemplo simple: el dataset Iris Como ejemplo de un dataset simple, vamos a echar un vistazo al conjunto iris almacenado en scikit-learn. Los datos consisten en medidas de tres especies de flores iris distintas: Iris Setosa <img src="figures/iris_setosa.jpg" width="50%"> Iris Versicolor <img src="figures/iris_versicolor.jpg" width="50%"> Iris Virginica <img src="figures/iris_virginica.jpg" width="50%"> ### Pregunta rápida: **Asumamos que estamos interesados en categorizar nuevos ejemplos; queremos predecir si una flor nueva va a ser Iris-Setosa, Iris-Versicolor, o Iris-Virginica. Basándonos en lo discutido en secciones anteriores, ¿cómo construiríamos este dataset?** Recuerda: necesitamos un array 2D con forma (*shape*) `[n_samples x n_features]`. - ¿Qué sería `n_samples`? - ¿Qué podría ser `n_features`? Recuerda que debe haber un número **fijo** de características por cada ejemplo, y cada característica *j* debe ser el mismo tipo de cantidad para cada ejemplo. ### Cargando el dataset Iris desde scikit-learn Para futuros experimentos con algoritmos de aprendizaje automático, te recomendamos que añadas a favoritos el [Repositorio UCI](http://archive.ics.uci.edu/ml/), que aloja muchos de los datasets que se utilizan para probar los algoritmos de aprendizaje automático. Además, algunos de estos datasets ya están incluidos en scikit-learn, pudiendo así evitar tener que descargar, leer, convertir y limpiar los ficheros de texto o CSV. El listado de datasets ya disponibles en scikit learn puede consultarse [aquí](http://scikit-learn.org/stable/datasets/#toy-datasets). Por ejemplo, scikit-learn contiene el dataset iris. Los datos consisten en: - Características: 1. Longitud de sépalo en cm 2. Ancho de sépalo en cm 3. Longitud de pétalo en cm 4. Ancho de sépalo en cm - Etiquetas a predecir: 1. Iris Setosa 2. Iris Versicolour 3. Iris Virginica <img src="figures/petal_sepal.jpg" alt="Sepal" style="width: 50%;"/> (Image: "Petal-sepal". Licensed under CC BY-SA 3.0 via Wikimedia Commons - https://commons.wikimedia.org/wiki/File:Petal-sepal.jpg#/media/File:Petal-sepal.jpg) ``scikit-learn`` incluye una copia del archivo CSV de iris junto con una función que lo lee a arrays de numpy: ``` from sklearn.datasets import load_iris iris = load_iris() ``` El dataset es un objeto ``Bunch``. Puedes ver que contiene utilizando el método ``keys()``: ``` iris.keys() ``` Las características de cada flor se encuentra en el atributo ``data`` del dataset: ``` n_samples, n_features = iris.data.shape print('Número de ejemplos:', n_samples) print('Número de características:', n_features) # sepal length, sepal width, petal length y petal width del primer ejemplo (primera flor) print(iris.data[0]) ``` La información sobre la clase de cada ejemplo se encuentra en el atributo ``target`` del dataset: ``` print(iris.data.shape) print(iris.target.shape) print(iris.target) import numpy as np np.bincount(iris.target) ``` La función de numpy llamada `bincount` (arriba) nos permite ver que las clases se distribuyen de forma uniforme en este conjunto de datos (50 flores de cada especie), donde: - clase 0: Iris-Setosa - clase 1: Iris-Versicolor - clase 2: Iris-Virginica Los nombres de las clases se almacenan en ``target_names``: ``` print(iris.target_names) ``` Estos datos tienen cuatro dimensiones, pero podemos visualizar una o dos de las dimensiones usando un histograma o un scatter. Primero, activamos el *matplotlib inline mode*: ``` %matplotlib inline import matplotlib.pyplot as plt x_index = 3 colors = ['blue', 'red', 'green'] for label, color in zip(range(len(iris.target_names)), colors): plt.hist(iris.data[iris.target==label, x_index], label=iris.target_names[label], color=color) plt.xlabel(iris.feature_names[x_index]) plt.legend(loc='upper right') plt.show() x_index = 3 y_index = 0 colors = ['blue', 'red', 'green'] for label, color in zip(range(len(iris.target_names)), colors): plt.scatter(iris.data[iris.target==label, x_index], iris.data[iris.target==label, y_index], label=iris.target_names[label], c=color) plt.xlabel(iris.feature_names[x_index]) plt.ylabel(iris.feature_names[y_index]) plt.legend(loc='upper left') plt.show() ``` <div class="alert alert-success"> <b>Ejercicio</b>: <ul> <li> **Cambia** `x_index` **e** `y_index` ** en el script anterior y encuentra una combinación de los dos parámetros que separe de la mejor forma posible las tres clases.** </li> <li> Este ejercicio es un adelanto a lo que se denomina **reducción de dimensionalidad**, que veremos después. </li> </ul> </div> ### Matrices scatterplot En lugar de realizar los plots por separado, una herramienta común que utilizan los analistas son las **matrices scatterplot**. Estas matrices muestran los scatter plots entre todas las características del dataset, así como los histogramas para ver la distribución de cada característica. ``` import pandas as pd iris_df = pd.DataFrame(iris.data, columns=iris.feature_names) pd.plotting.scatter_matrix(iris_df, c=iris.target, figsize=(8, 8)); ``` ## Otros datasets disponibles [Scikit-learn pone a disposición de la comunidad una gran cantidad de datasets](http://scikit-learn.org/stable/datasets/#dataset-loading-utilities). Vienen en tres modos: - **Packaged Data:** pequeños datasets ya disponibles en la distribución de scikit-learn, a los que se puede acceder mediante ``sklearn.datasets.load_*`` - **Downloadable Data:** estos datasets son más grandes y pueden descargarse mediante herramientas que scikit-learn ya incluye. Estas herramientas están en ``sklearn.datasets.fetch_*`` - **Generated Data:** estos datasets se generan mediante modelos basados en semillas aleatorias (datasets sintéticos). Están disponibles en ``sklearn.datasets.make_*`` Puedes explorar las herramientas de datasets de scikit-learn usando la funcionalidad de autocompletado que tiene IPython. Tras importar el paquete ``datasets`` de ``sklearn``, teclea datasets.load_<TAB> o datasets.fetch_<TAB> o datasets.make_<TAB> para ver una lista de las funciones disponibles ``` from sklearn import datasets ``` Advertencia: muchos de estos datasets son bastante grandes y puede llevar bastante tiempo descargarlos. Si comienzas una descarga con un libro de IPython y luego quieres detenerla, puedes utilizar la opción "kernel interrupt" accesible por el menú o con ``Ctrl-m i``. Puedes presionar ``Ctrl-m h`` para una lista de todos los atajos ``ipython``. ## Cargando los datos de dígitos Ahora vamos a ver otro dataset, donde podemos estudiar mejor como representar los datos. Podemos explorar los datos de la siguiente forma: ``` from sklearn.datasets import load_digits digits = load_digits() digits.keys() n_samples, n_features = digits.data.shape print((n_samples, n_features)) print(digits.data[0]) print(digits.data[-1]) print(digits.target) ``` Aquí la etiqueta es directamente el dígito que representa cada ejemplo. Los datos consisten en un array de longitud 64... pero, ¿qué significan estos datos? Una pista viene dada por el hecho de que tenemos dos versiones de los datos: ``data`` y ``images``. Vamos a echar un vistazo a ambas: ``` print(digits.data.shape) print(digits.images.shape) ``` Podemos ver que son lo mismo, mediante un simple *reshaping*: ``` import numpy as np print(np.all(digits.images.reshape((1797, 64)) == digits.data)) ``` Vamos a visualizar los datos. Es un poco más complejo que el scatter plot que hicimos anteriormente. ``` # Configurar la figura fig = plt.figure(figsize=(6, 6)) # tamaño en pulgadas fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05) # mostrar algunos dígitos: cada imagen es de 8x8 for i in range(64): ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[]) ax.imshow(digits.images[i], cmap=plt.cm.binary, interpolation='nearest') # Etiquetar la imagen con el valor objetivo ax.text(0, 7, str(digits.target[i])) ``` Ahora podemos saber que significan las características. Cada característica es una cantidad real que representa la oscuridad de un píxel en una imagen 8x8 de un dígito manuscrito. Aunque cada ejemplo tiene datos que son inherentemente de dos dimensiones, la matriz de datos incluye estos datos 2D en un **solo vector**, contenido en cada **fila** de la misma. <div class="alert alert-success"> <b>Ejercicio: trabajando con un dataset de reconocimiento facial</b>: <ul> <li> Vamos a pararnos a explorar el dataset de reconocimiento facial de Olivetti. Descarga los datos (sobre 1.4MB), y visualiza las caras. Puedes copiar el código utilizado para visualizar los dígitos, modificándolo convenientemente. </li> </ul> </div> ``` from sklearn.datasets import fetch_olivetti_faces # descarga el dataset faces # Utiliza el script anterior para representar las caras # Pista: plt.cm.bone es un buen colormap para este dataset ```
github_jupyter
# Day 1 ``` from sklearn.datasets import load_iris import pandas as pd import numpy as np iris = load_iris() df = pd.DataFrame(np.c_[iris['data'], iris['target']], columns = iris['feature_names'] + ['species']) df['species'] = df['species'].replace([0,1,2], iris.target_names) df.head() import numpy as np import matplotlib.pyplot as plt rng = np.random.RandomState(42) x = 10 * rng.rand(50) y = 2 * x - 1 + rng.randn(50) x plt.scatter(x, y) plt.show() # 1 from sklearn.linear_model import LinearRegression # 2 LinearRegression? model_lr = LinearRegression(fit_intercept=True) # 3 # x = data feature # y = data target x.shape x_matriks = x[:, np.newaxis] x_matriks.shape # 4 # model_lr.fit(input_data, output_data) model_lr.fit(x_matriks, y) # Testing x_test = np.linspace(10, 12, 15) x_test = x_test[:, np.newaxis] x_test # 5 y_test = model_lr.predict(x_test) y_test y_train = model_lr.predict(x_matriks) plt.scatter(x, y, color='r') plt.plot(x, y_train, label="Model Training") plt.plot(x_test, y_test, label="Test Result/hasil Prediksi") plt.legend() plt.show() ``` # Day 2 ``` from sklearn.datasets import load_iris import pandas as pd import numpy as np iris = load_iris() df = pd.DataFrame(np.c_[iris['data'], iris['target']], columns = iris['feature_names'] + ['species']) df.head() iris from scipy import stats z = stats.zscore(df) z print(np.where(z>3)) # import class model from sklearn.neighbors import KNeighborsClassifier z[15][1] # Membuat objek model dan memilih hyperparameter # KNeighborsClassifier? model_knn = KNeighborsClassifier(n_neighbors=6, weights='distance') # Memisahkan data feature dan target X = df.drop('species', axis=1) y = df['species'] X # Perintahkan model untuk mempelajari data dengan menggunakan method .fit() model_knn.fit(X, y) # predict x_new = np.array([ [2.5, 4, 3, 0.1], [1, 3.5, 1.7, 0.4], [4, 1, 3, 0.3] ]) y_new = model_knn.predict(x_new) y_new # 0 = sentosa # 1 = versicolor # 2 = virginica import numpy as np import matplotlib.pyplot as plt rng = np.random.RandomState(1) x = 10*rng.rand(50) y = 5*x + 10 + rng.rand(50) plt.scatter(x, y) plt.show() from sklearn.linear_model import LinearRegression model_lr = LinearRegression(fit_intercept=True) model_lr.fit(x[:, np.newaxis], y) y_predict = model_lr.predict(x[:, np.newaxis]) plt.plot(x, y_predict, color='r', label='Model Predicted Data') plt.scatter(x, y, label='Actual Data') plt.legend() plt.show() model_lr.coef_ model_lr.intercept_ # y = 5*x + 10 + rng.rand(50) x = rng.rand(50, 3) y = np.dot(x, [4, 2, 7]) + 20 # sama dengan x*4 + x*2 + x*7 + 20 x.shape y model_lr2 = LinearRegression(fit_intercept=True) model_lr2.fit(x, y) y_predict = model_lr2.predict(x) model_lr2.coef_ model_lr2.intercept_ ``` # Day 3 ``` from sklearn.neighbors import KNeighborsClassifier model_knn = KNeighborsClassifier(n_neighbors=2) x_train = df.drop('species', axis=1) y_train = df['species'] model_knn.fit(x_train, y_train) # cara salah dalam mengevaluasi model y_prediksi = model_knn.predict(x_train) from sklearn.metrics import accuracy_score score = accuracy_score(y_train, y_prediksi) score # cara yang benar x = df.drop('species', axis=1) y = df['species'] y.value_counts() from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=21, stratify=y) # x -> x_train, x_test -0.3-0.2 # y -> y_train, y_test -0.3-0.2 # valuenya sama karena stratify y_train.value_counts() print(x_train.shape) print(x_test.shape) model_knn = KNeighborsClassifier(n_neighbors=2) model_knn.fit(x_train, y_train) y_predik = model_knn.predict(x_test) from sklearn.metrics import accuracy_score score = accuracy_score(y_test, y_predik) score from sklearn.model_selection import cross_val_score model_knn = KNeighborsClassifier(n_neighbors=2) cv_result = cross_val_score(model_knn, x, y, cv=10) cv_result.mean() import pandas as pd import numpy as np colnames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class'] df = pd.read_csv('pima-indians-diabetes.csv', names=colnames) df.head() df['class'].value_counts() from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split from sklearn.preprocessing import scale X = df.drop('class', axis=1) Xs = scale(X) y = df['class'] X_train, X_test, y_train, y_test = train_test_split(Xs, y, random_state=21, stratify=y, test_size=0.2) model_lr = LogisticRegression(random_state=21) params_grid = { 'C':np.arange(0.1, 1, 0.1), 'class_weight':[{0:x, 1:1-x} for x in np.arange(0.1, 0.9, 0.1)] } gscv = GridSearchCV(model_lr, params_grid, cv=10, scoring='f1') gscv.fit(X_train, y_train) X_test y_pred = gscv.predict(X_test) y_pred from sklearn.metrics import confusion_matrix, classification_report confusion_matrix(y_test, y_pred, labels=[1, 0]) TP = 39 FN = 15 FP = 25 TN = 75 print(classification_report(y_test, y_pred)) # menghitung nilai precisi, recall, f-1 score dari model kita dalam memprediksi data yang positif precision = TP/(TP+FP) recall = TP/(TP+FN) f1score = 2 * precision * recall / (precision + recall) print(precision) print(recall) print(f1score) # menghitung nilai precisi, recall, f-1 score dari model kita dalam memprediksi data yang negatif precision = TN/(TN+FN) recall = TN/(TN+FP) f1score = (precision * recall * 2) / (precision + recall) print(precision) print(recall) print(f1score) ``` # Day 4 ``` from sklearn.datasets import load_iris import pandas as pd import numpy as np colnames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class'] df = pd.read_csv('pima-indians-diabetes.csv', names=colnames) df.head() from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import cross_validate, cross_val_score X = df.drop('class', axis=1) y = df['class'] model = KNeighborsClassifier(n_neighbors=5) cv_score1 = cross_validate(model, X, y, cv=10, return_train_score=True) cv_score2 = cross_val_score(model, X, y, cv=10) cv_score1 cv_score2 cv_score1['test_score'].mean() cv_score2.mean() def knn_predict(k): model = KNeighborsClassifier(n_neighbors=k) score = cross_validate(model, X, y, cv=10, return_train_score=True) train_score = score['train_score'].mean() test_score = score['test_score'].mean() return train_score, test_score train_scores = [] test_scores = [] for k in range(2, 100): # lakukan fitting # kemudian scoring train_score, test_score = knn_predict(k) train_scores.append(train_score) test_scores.append(test_score) train_scores import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=(14, 8)) ax.plot(range(2, 100), train_scores, marker='x', color='b', label='Train Scores') ax.plot(range(2, 100), test_scores, marker='o', color='g', label='Test Scores') ax.set_xlabel('Nilai K') ax.set_ylabel('Score') fig.legend() plt.show() from sklearn.model_selection import GridSearchCV, RandomizedSearchCV model = KNeighborsClassifier() param_grid = {'n_neighbors':np.arange(5, 50), 'weights':['distance', 'uniform']} gscv = GridSearchCV(model, param_grid=param_grid, scoring='accuracy', cv=5) gscv.fit(X, y) gscv.best_params_ gscv.best_score_ rscv = RandomizedSearchCV(model, param_grid, n_iter=15, scoring='accuracy', cv=5) rscv.fit(X, y) rscv.best_params_ rscv.best_score_ ``` # Day 5 ``` data = { 'pendidikan_terakhir' : ['SD', 'SMP', 'SMA', 'SMP', 'SMP'], 'tempat_tinggal' : ['Bandung', 'Garut', 'Bandung', 'Cirebon', 'Jakarta'], 'status' : ['Menikah', 'Jomblo', 'Janda', 'Jomblo', 'Duda'], 'tingkat_ekonomi' : ['Kurang Mampu', 'Berkecukupan', 'Mampu', 'Sangat Mampu', 'Mampu'], 'jumlah_anak' : [1, 4, 2, 0, 3] } import pandas as pd df = pd.DataFrame(data) df.head() df = pd.get_dummies(df, columns=['tempat_tinggal', 'status']) df obj_dict = { 'Kurang Mampu' : 0, 'Berkecukupan' : 1, 'Mampu' : 2, 'Sangat Mampu' : 3 } df['tingkat_ekonomi'] = df['tingkat_ekonomi'].replace(obj_dict) df['tingkat_ekonomi'] import numpy as np data = { 'pendidikan_terakhir' : [np.nan, 'SMP', 'SD', 'SMP', 'SMP', 'SD', 'SMP', 'SMA', 'SD'], 'tingkat_ekonomi' : [0, 1, 2, 3, 2, 2, 1, 1, 3], # 'jumlah_anak' : [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 1, 2] 'jumlah_anak' : [1, np.nan, np.nan, 1, 1, 1, 3, 1, 2] } data_ts = { 'Hari' : [1, 2, 3, 4, 5], 'Jumlah' : [12, 23, np.nan, 12, 20] } df = pd.DataFrame(data) df_ts = pd.DataFrame(data_ts) df ``` 5 Cara dalam menghandle missing value: 1. Drop missing value : Jumlah missing value data banyak 2. Filling with mean/median : berlaku untuk data yang bertipe numerik 3. Filling with modus : berlaku untuk data yang bertipe kategori 4. Filling with bffill atau ffill 5. KNN ``` 1. # drop berdasarkan row df.dropna(axis=0) # 1. drop berdasarkan column df.drop(['jumlah_anak'], axis=1) # 2 kelemahannya kurang akurat df['jumlah_anak'] = df['jumlah_anak'].fillna(df['jumlah_anak'].mean()) df['jumlah_anak'] df['jumlah_anak'] = df['jumlah_anak'].astype(int) df['jumlah_anak'] df # 3 df['pendidikan_terakhir'].value_counts() df['pendidikan_terakhir'] = df['pendidikan_terakhir'].fillna('SMP') df # 4 bfill nan diisi dengan nilai sebelumnya df_ts.fillna(method='bfill') # 4 ffill nan diisi dengan nilai sebelumnya df_ts.fillna(method='ffill') df from sklearn.impute import KNNImputer imp = KNNImputer(n_neighbors=5) # imp.fit_transform(df['jumlah_anak'][:, np.newaxis]) imp.fit_transform(df[['jumlah_anak', 'tingkat_ekonomi']]) import pandas as pd colnames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class'] df = pd.read_csv('pima-indians-diabetes.csv', names=colnames) df.head() df.describe() X = df.drop('class', axis=1) X.head() from sklearn.preprocessing import StandardScaler stdscalar = StandardScaler() datascale = stdscalar.fit_transform(X) colnames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age'] dfscale = pd.DataFrame(datascale, columns=colnames) dfscale dfscale.describe() from sklearn.preprocessing import Normalizer normscaler = Normalizer() datanorm = normscaler.fit_transform(X) colnames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age'] dfnorm = pd.DataFrame(datanorm, columns=colnames) dfnorm dfnorm.describe() ``` 1. Normalization digunakan ketika kita tidak tahu bahwa kita tidak harus memiliki asumsi bahwa data kita itu memiliki distribusi normal, dan kita memakai algoritma ML yang tidak harus mengasumsikan bentuk distribusi dari data... contohnya KNN, neural network, dll 2. Standardization apabila data kita berasumsi memiliki distribusi normal
github_jupyter
``` import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline import warnings warnings.simplefilter(action='ignore', category=FutureWarning) df = pd.read_csv('credit.csv') df.head() df.shape df.info() df.isnull().sum() df.duplicated().sum() df.corr() df['default'].value_counts() ``` # How many defaulter in the data? ``` df['default'].value_counts().plot(kind='pie',autopct=lambda p:'{:.2f}%\n({:.0f})'.format(p,(p/100)*(df['default'].value_counts().sum()))) ``` # How many students in this data? ``` df['student'].value_counts().plot(kind='pie',autopct=lambda p:'{:.2f}%\n({:.0f})'.format(p,(p/100)*(df['student'].value_counts().sum()))) ``` # How balance column looks like? ``` sns.distplot(df['balance']) ``` We can say this is right skewed data. majority of the people have balance between 500-1000 # How income column looks like? ``` sns.distplot(df['income']) df.sort_values(by='student') def Value_Countplot(data,hue=None,x_pos=0.25,rotation=None): ''' data: data hue: hue data x_pos: int/float - to position the value ''' ax = sns.countplot(data,hue=hue) for i in ax.patches: height = i.get_height() # get height of bar (value of y axis) x = i.get_x() # get x_axis value ax.text(x=x+x_pos,y=height-(height/10),s=height,fontsize=10,fontweight='bold',rotation=rotation) ``` # Students vs Defaulter on Bar Plot ``` plt.figure(figsize=(15,5)) # plt.subplot(311) Value_Countplot(df['default'],hue=df['student'],x_pos=0.15) plt.title('Bar Plot: Students vs Defaulter',fontdict={'fontsize':20,'color':'red'}) plt.figure(figsize=(15,8)) sns.scatterplot(df['balance'],df['income'],hue=df['default'],style=df['student']) plt.title('Scatter Plot: Income vs Balance vs Student vs Default',fontdict={'fontsize':20,'color':'red'}) sns.clustermap(pd.crosstab(df['student'],df['default'])) plt.title('Cluster Map: Students vs Defaulter',fontdict={'fontsize':20,'color':'red'}) sns.pairplot(df,hue='default',kind='kde') # plt.title('Pair Plot: Income vs Balance vs Default',fontdict={'fontsize':20,'color':'red'}) plt.figure(figsize=(15,5)) sns.boxplot(df['balance'],df['default']) plt.title('Box Plot: Balance vs Default',fontdict={'fontsize':20,'color':'red'}) plt.figure(figsize=(15,5)) sns.boxplot(df['balance'],df['student']) plt.title('Box Plot: Balance vs Student',fontdict={'fontsize':20,'color':'red'}) plt.figure(figsize=(15,5)) sns.boxplot(df['income'],df['student']) plt.title('Box Plot: Income vs Student',fontdict={'fontsize':20,'color':'red'}) plt.figure(figsize=(15,5)) sns.boxplot(df['income'],df['default']) plt.title('Box Plot: Income vs Default',fontdict={'fontsize':20,'color':'red'}) ```
github_jupyter
# Table of Contents <p><div class="lev1 toc-item"><a href="#Simulated-annealing-in-Python" data-toc-modified-id="Simulated-annealing-in-Python-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Simulated annealing in Python</a></div><div class="lev2 toc-item"><a href="#References" data-toc-modified-id="References-11"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>References</a></div><div class="lev2 toc-item"><a href="#See-also" data-toc-modified-id="See-also-12"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>See also</a></div><div class="lev2 toc-item"><a href="#About" data-toc-modified-id="About-13"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>About</a></div><div class="lev2 toc-item"><a href="#Algorithm" data-toc-modified-id="Algorithm-14"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>Algorithm</a></div><div class="lev2 toc-item"><a href="#Basic-but-generic-Python-code" data-toc-modified-id="Basic-but-generic-Python-code-15"><span class="toc-item-num">1.5&nbsp;&nbsp;</span>Basic but generic Python code</a></div><div class="lev2 toc-item"><a href="#Basic-example" data-toc-modified-id="Basic-example-16"><span class="toc-item-num">1.6&nbsp;&nbsp;</span>Basic example</a></div><div class="lev2 toc-item"><a href="#Visualizing-the-steps" data-toc-modified-id="Visualizing-the-steps-17"><span class="toc-item-num">1.7&nbsp;&nbsp;</span>Visualizing the steps</a></div><div class="lev2 toc-item"><a href="#More-visualizations" data-toc-modified-id="More-visualizations-18"><span class="toc-item-num">1.8&nbsp;&nbsp;</span>More visualizations</a></div> # Simulated annealing in Python This small notebook implements, in [Python 3](https://docs.python.org/3/), the [simulated annealing](https://en.wikipedia.org/wiki/Simulated_annealing) algorithm for numerical optimization. ## References - The Wikipedia page: [simulated annealing](https://en.wikipedia.org/wiki/Simulated_annealing). - It was implemented in `scipy.optimize` before version 0.14: [`scipy.optimize.anneal`](https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.optimize.anneal.html). - [This blog post](http://apmonitor.com/me575/index.php/Main/SimulatedAnnealing). - These Stack Overflow questions: [15853513](https://stackoverflow.com/questions/15853513/) and [19757551](https://stackoverflow.com/questions/19757551/). ## See also - For a real-world use of simulated annealing, this Python module seems useful: [perrygeo/simanneal on GitHub](https://github.com/perrygeo/simanneal). ## About - *Date:* 20/07/2017. - *Author:* [Lilian Besson](https://GitHub.com/Naereen), (C) 2017. - *Licence:* [MIT Licence](http://lbesson.mit-license.org). ---- > This notebook should be compatible with both Python versions, [2](https://docs.python.org/2/) and [3](https://docs.python.org/3/). ``` from __future__ import print_function, division # Python 2 compatibility if needed import numpy as np import numpy.random as rn import matplotlib.pyplot as plt # to plot import matplotlib as mpl from scipy import optimize # to compare import seaborn as sns sns.set(context="talk", style="darkgrid", palette="hls", font="sans-serif", font_scale=1.05) FIGSIZE = (19, 8) #: Figure size, in inches! mpl.rcParams['figure.figsize'] = FIGSIZE ``` ---- ## Algorithm The following pseudocode presents the simulated annealing heuristic. - It starts from a state $s_0$ and continues to either a maximum of $k_{\max}$ steps or until a state with an energy of $e_{\min}$ or less is found. - In the process, the call $\mathrm{neighbour}(s)$ should generate a randomly chosen neighbour of a given state $s$. - The annealing schedule is defined by the call $\mathrm{temperature}(r)$, which should yield the temperature to use, given the fraction $r$ of the time budget that has been expended so far. > **Simulated Annealing**: > > - Let $s$ = $s_0$ > - For $k = 0$ through $k_{\max}$ (exclusive): > + $T := \mathrm{temperature}(k ∕ k_{\max})$ > + Pick a random neighbour, $s_{\mathrm{new}} := \mathrm{neighbour}(s)$ > + If $P(E(s), E(s_{\mathrm{new}}), T) \geq \mathrm{random}(0, 1)$: > * $s := s_{\mathrm{new}}$ > - Output: the final state $s$ ---- ## Basic but generic Python code Let us start with a very generic implementation: ``` def annealing(random_start, cost_function, random_neighbour, acceptance, temperature, maxsteps=1000, debug=True): """ Optimize the black-box function 'cost_function' with the simulated annealing algorithm.""" state = random_start() cost = cost_function(state) states, costs = [state], [cost] for step in range(maxsteps): fraction = step / float(maxsteps) T = temperature(fraction) new_state = random_neighbour(state, fraction) new_cost = cost_function(new_state) if debug: print("Step #{:>2}/{:>2} : T = {:>4.3g}, state = {:>4.3g}, cost = {:>4.3g}, new_state = {:>4.3g}, new_cost = {:>4.3g} ...".format(step, maxsteps, T, state, cost, new_state, new_cost)) if acceptance_probability(cost, new_cost, T) > rn.random(): state, cost = new_state, new_cost states.append(state) costs.append(cost) # print(" ==> Accept it!") # else: # print(" ==> Reject it...") return state, cost_function(state), states, costs ``` ---- ## Basic example We will use this to find the global minimum of the function $x \mapsto x^2$ on $[-10, 10]$. ``` interval = (-10, 10) def f(x): """ Function to minimize.""" return x ** 2 def clip(x): """ Force x to be in the interval.""" a, b = interval return max(min(x, b), a) def random_start(): """ Random point in the interval.""" a, b = interval return a + (b - a) * rn.random_sample() def cost_function(x): """ Cost of x = f(x).""" return f(x) def random_neighbour(x, fraction=1): """Move a little bit x, from the left or the right.""" amplitude = (max(interval) - min(interval)) * fraction / 10 delta = (-amplitude/2.) + amplitude * rn.random_sample() return clip(x + delta) def acceptance_probability(cost, new_cost, temperature): if new_cost < cost: # print(" - Acceptance probabilty = 1 as new_cost = {} < cost = {}...".format(new_cost, cost)) return 1 else: p = np.exp(- (new_cost - cost) / temperature) # print(" - Acceptance probabilty = {:.3g}...".format(p)) return p def temperature(fraction): """ Example of temperature dicreasing as the process goes on.""" return max(0.01, min(1, 1 - fraction)) ``` Let's try! ``` annealing(random_start, cost_function, random_neighbour, acceptance_probability, temperature, maxsteps=30, debug=True); ``` Now with more steps: ``` state, c, states, costs = annealing(random_start, cost_function, random_neighbour, acceptance_probability, temperature, maxsteps=1000, debug=False) state c ``` ---- ## Visualizing the steps ``` def see_annealing(states, costs): plt.figure() plt.suptitle("Evolution of states and costs of the simulated annealing") plt.subplot(121) plt.plot(states, 'r') plt.title("States") plt.subplot(122) plt.plot(costs, 'b') plt.title("Costs") plt.show() see_annealing(states, costs) ``` ---- ## More visualizations ``` def visualize_annealing(cost_function): state, c, states, costs = annealing(random_start, cost_function, random_neighbour, acceptance_probability, temperature, maxsteps=1000, debug=False) see_annealing(states, costs) return state, c visualize_annealing(lambda x: x**3) visualize_annealing(lambda x: x**2) visualize_annealing(np.abs) visualize_annealing(np.cos) visualize_annealing(lambda x: np.sin(x) + np.cos(x)) ``` In all these examples, the simulated annealing converges to a global minimum. It can be non-unique, but it is found. ---- > That's it for today, folks! More notebooks can be found on [my GitHub page](https://GitHub.com/Naereen/notebooks).
github_jupyter
``` import numpy as np import pandas as pd # stats from scipy import stats # Plotting import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns import math %matplotlib inline _df4 = pd.read_csv('winequality-red.csv',sep=";") _df4 # _df4.head() ``` # Basics of MatPlotLib # Pylab interface, where we use plt. # Stateful interface where we use ax = plt.axes() # Object Oriented one where we use ax = plt.subplots() to create an array of axes ## 1st is pylab ``` # linspace(start,end,step) gives value equally spaced, not a + step, a1 + step, but a divided in equal step x = np.linspace(-np.pi,2*np.pi,256,endpoint=True) c,s = np.cos(x), np.sin(x) plt.figure(figsize=(12,6), dpi=80,facecolor ="cyan") #figure() gives control over frame,dpi,edgecolor,facecolor,linewidth plt.subplot(2,1,1)#subplot(number_rows,number_columns,#plot_number) plt.xlim(-4.0,4.0) plt.xticks(np.linspace(-4,4,9,endpoint=True)) plt.yticks(np.linspace(-1,1,5,endpoint=True)) plt.plot(x,c,color='green', linestyle="-.",label="cos(x)")##### width x height in figsize # ':' gives ..... # '-.' gives -.-.-. # '--' gives - - - - # '-' gives - # Setting x & y limits # plt.xlim(start,end) same for ylim() # set y ticks plt.legend() plt.subplot(2,1,2) plt.plot(x,s,':c',label="sin(x)") #plot(x,y) # we can save figureby savefig("../path/file_name.png", dpi=72) ''' many file formats are available so plase check ''' # plt.show() # Should be used when running from script, or else from ipython it's not important, should be used only once # to update grapgh we use plt.draw() plt.legend() ``` # Above Interface was stateful based, But we will go for object oriented interface ``` # Creating Above figure in oop x = np.linspace(-2*np.pi,2*np.pi,256) plt.style.use('seaborn-whitegrid') # could also be classical fig, ax = plt.subplots(2) ### Creating ax[] array of axes ax[0].plot(x,np.sin(x),':c',label="sin(x)") ax[0].set(xlabel="x",ylabel="sin(x)",title="sin(x)") # set(xlim(),ylim(),xlabel="",ylabel="",title="") ax[0].legend() ax[1].plot(x,np.cos(x)) fig = plt.figure() ax = plt.axes() x = np.linspace(0,10,2000) ax.plot(x,np.sin(x),'--c')# c first letter of color only for ax.plot(x,np.cos(x),':r') # rgbcmyk Cyan, Magneta,Yello,blacK ``` # plt.axis([xmin, xmax, ymin, ymax],'tight') to set limit in a single call, It also allows to tighten bounds. ## Above dig is not bound tight # Labeling Plots ## plt.title("xxxxxx") plt.xlabel("xxx") plt.ylabel("xxxx") ``` x = np.linspace(0,10,30) ax = plt.axes() ax.plot(x,np.sin(x),'o',color="black") plt.figure(figsize=(12,12),dpi=80) rng = np.random.RandomState(0) for marker in ['o','.',',','x','+','v','^','<','>','s','d']: plt.plot(rng.rand(5),rng.rand(5),marker,label="marker = {}".format(marker),color="red") plt.legend(numpoints=1) ``` ## This markers can also be combined with '-' line like '-o' ## full coustomization of markers be like ### plt.plot(x,np.sin(x),'>c',markersize=15,linewidth=1,markerfacecolor='white',markeredgecolor="red",markeredgewidth=2) ``` plt.plot(x,np.sin(x),'-pc',markersize=15,linewidth=1,markerfacecolor='white',markeredgecolor="red",markeredgewidth=2,label="line") plt.legend() y = np.random.randint(0,100,50) x = np.random.randint(0,50,50) plt.scatter(x,y,c=y,s=y,alpha=0.3,cmap='viridis') plt.colorbar() _d2 = _df4.pivot_table(values='pH',index="alcohol",columns="quality") _d2 # scatter graph can also be used for plotting 4 max function, two extra in c and size plt.style.use('seaborn-whitegrid') a = _df4['pH'].value_counts() plt.figure(figsize=(12,12),dpi=80) plt.scatter(_df4['quality'],_df4['alcohol'],s=100,c=_df4['pH'],cmap=plt.cm.PuOr) #alpha for opaquness plt.colorbar() ``` ## plt.scatter(f(x),f(y),s=f(z),c=f(w),cmap=plt.cm.PuOr,alpha=n) ### s and c can take numbers as well as function, alpha is used for transparency n-(0,1) # color-map i.e cmap is too important to choose which colormap we would follow ## we can refer for different color-map on below given link # https://chrisalbon.com/python/set_the_color_of_a_matplotlib.html ``` plt.figure(figsize=(12,12),dpi=80) plt.plot(_df4['quality'],_df4['alcohol'],'o',markersize=15,linewidth=1,markerfacecolor='white',markeredgecolor="red",markeredgewidth=2) ``` # Plot Error Bars plt.errorbar(x,f(x), yerr=dy,fmt='o',color=" ",ecolor=" ",elinewidth=3,capsize=0) x axis error (xerr) # I have skipped continuous error, please go through pdf # We will start object-oriented approach
github_jupyter
## "FAQ-Style QA": Utilizing existing FAQs for Question Answering While *extractive Question Answering* works on pure texts and is therefore more generalizable, there's also a common alternative that utilizes existing FAQ data. Pros: - Very fast at inference time - Utilize existing FAQ data - Quite good control over answers Cons: - Generalizability: We can only answer questions that are similar to existing ones in FAQ In some use cases, a combination of extractive QA and FAQ-style can also be an interesting option. *Use this [link](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial4_Tutorial4_FAQ_style_QA.ipynb) to open the notebook in Google Colab.* ``` #TODO ! pip install git+git://github.com/deepset-ai/haystack.git@319e238f4652a05a95f02fa4cd19ef406440a789 #! pip install farm-haystack from haystack import Finder from haystack.database.elasticsearch import ElasticsearchDocumentStore from haystack.retriever.elasticsearch import EmbeddingRetriever from haystack.utils import print_answers import pandas as pd import requests ``` ### Start an Elasticsearch server You can start Elasticsearch on your local machine instance using Docker. If Docker is not readily available in your environment (eg., in Colab notebooks), then you can manually download and execute Elasticsearch from source. ``` # Recommended: Start Elasticsearch using Docker # ! docker run -d -p 9200:9200 -e "discovery.type=single-node" elasticsearch:7.6.2 # In Colab / No Docker environments: Start Elasticsearch from source ! wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.6.2-linux-x86_64.tar.gz -q ! tar -xzf elasticsearch-7.6.2-linux-x86_64.tar.gz ! chown -R daemon:daemon elasticsearch-7.6.2 import os from subprocess import Popen, PIPE, STDOUT es_server = Popen(['elasticsearch-7.6.2/bin/elasticsearch'], stdout=PIPE, stderr=STDOUT, preexec_fn=lambda: os.setuid(1) # as daemon ) # wait until ES has started ! sleep 30 ``` ### Init the DocumentStore In contrast to Tutorial 1 (extractive QA), we: * specify the name of our `text_field` in Elasticsearch that we want to return as an answer * specify the name of our `embedding_field` in Elasticsearch where we'll store the embedding of our question and that is used later for calculating our similarity to the incoming user question * set `excluded_meta_data=["question_emb"]` so that we don't return the huge embedding vectors in our search results ``` from haystack.database.elasticsearch import ElasticsearchDocumentStore document_store = ElasticsearchDocumentStore(host="localhost", username="", password="", index="document", text_field="answer", embedding_field="question_emb", embedding_dim=768, excluded_meta_data=["question_emb"]) ``` ### Create a Retriever using embeddings Instead of retrieving via Elasticsearch's plain BM25, we want to use vector similarity of the questions (user question vs. FAQ ones). We can use the `EmbeddingRetriever` for this purpose and specify a model that we use for the embeddings. ``` retriever = EmbeddingRetriever(document_store=document_store, embedding_model="deepset/sentence_bert", gpu=False) ``` ### Prepare & Index FAQ data We create a pandas dataframe containing some FAQ data (i.e curated pairs of question + answer) and index those in elasticsearch. Here: We download some question-answer pairs related to COVID-19 ``` # Download temp = requests.get("https://raw.githubusercontent.com/deepset-ai/COVID-QA/master/data/faqs/faq_covidbert.csv") open('small_faq_covid.csv', 'wb').write(temp.content) # Get dataframe with columns "question", "answer" and some custom metadata df = pd.read_csv("small_faq_covid.csv") # Minimal cleaning df.fillna(value="", inplace=True) df["question"] = df["question"].apply(lambda x: x.strip()) print(df.head()) # Get embeddings for our questions from the FAQs questions = list(df["question"].values) df["question_emb"] = retriever.create_embedding(texts=questions) # Convert Dataframe to list of dicts and index them in our DocumentStore docs_to_index = df.to_dict(orient="records") document_store.write_documents(docs_to_index) ``` ### Ask questions Initialize a Finder (this time without a reader) and ask questions ``` finder = Finder(reader=None, retriever=retriever) prediction = finder.get_answers_via_similar_questions(question="How is the virus spreading?", top_k_retriever=10) print_answers(prediction, details="all") ```
github_jupyter
## **Variables and Data Types** **Topics Covered** > Creating Variable > DataTypes > None Keyword > Multi Line statement and Multi Comment ----- ### Creating a Variable * Variables are used to store values. In Python you don't have to declare a varaible. * Variable is created the moment you assign a value to it. * *3 Rules* > 1. It can be only one word. > 2. It can use only letters, numbers, and the underscore ( _ ) character. > 3. It can’t begin with a number. ---- ``` # One word. No space allowed my Wallet = 10 # No special character @wallet = 10 @wallet # Should begin either with alphabet or _ 10wallet = 10 # Type string i = "hello" i # Type integer variable_1 = 10 variable_1 x = 10 # An integer variable # assigning value 10 wallet = 10 wallet # Updating the value wallet = 20 wallet ``` 1. Variables do not need to be declared with any particular type and can even change type after they have been set. ``` # Previously x stored an integer value x = "Hello" # A string ``` 2. Assigning values to multiple variables ``` x, y, z = "Red", "Black", "White" print(x) print(y) print(z) ``` 3. Assign the same value to multiple variables in one line ``` x = y = z = "Red" print(x) print(y) print(z) ``` ------------- ### Data Types * Data types are the classification of objects. * The basic types build into Python include float, int, str and bool. ![image.png](attachment:b3eb87ca-5e37-4b56-ad80-7f2f7fdd7d0f.png) ``` x = "Hello" type(x) x = 'Hello' type(x) x = "100" type(x) x = 10 type(x) x = 10.5 type(x) x = "True" type(x) x = True y = False type(x) false = 10 y = false print(y) # print 10 y = False print(y) # Print False y = false ``` > The above threw an error because python is case sensitive. We need to use `False` as built in value. Else it treats it as a variable. > `type()` is used to check data type of a given object. **Q**. An example for Case sensitive variable ``` x = False false = 10 print(x, false) ``` **** ### Multiline statement We can make a statement extend over multiple lines with the line continuation character(`\`). * Explicit Continuation : When you right away use the line continuation character (`\`) to split a statement into multiple lines. * Implicit line continuation is when you split a statement using either of parentheses ( ), brackets [ ] and braces { }. ``` # Explicit line continuation a = 1 + 2 + 3 + \ 4 + 5 + 6 + \ 7 + 8 + 9 a # Implicit a = (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9) a ``` **** ### Multiline Comment Triple quotes (`'''` or `"""`) are generally used for multi-line strings. But they can be used as a multi-line comment as well. ``` """This is also a perfect example of multi-line comments""" """This is also a perfect example of multi-line comments""" a = b = c = "Hello" ``` *** ### None Keyword > The **`None`** keyword is used to define a `null` value, or no value at all. > `None` is not the same as 0, False, or an empty string. > Comparing `None` to anything will always return False except None itself ``` X = None "Value of X : {0} , Type of X : {1}".format(X,type(X)) # None is always false bool(None) ``` --------
github_jupyter
# Writing a Device driver ### Basic structure Here is a simple (but complete and functional) code block that implements a VISA driver for a power sensor: ``` import labbench as lb import pandas as pd # Specific driver definitions are implemented by subclassing classes like lb.VISADevice class PowerSensor(lb.VISADevice): initiate_continuous = lb.property.bool(key='INIT:CONT') output_trigger = lb.property.bool(key='OUTP:TRIG') trigger_source = lb.property.str(key='TRIG:SOUR', only=['IMM','INT','EXT','BUS','INT1']) trigger_count = lb.property.int(key='TRIG:COUN', min=1,max=200,step=1) measurement_rate = lb.property.str(key='SENS:MRAT', only=['NORM','DOUB','FAST']) sweep_aperture = lb.property.float(key='SWE:APER', min=20e-6, max=200e-3,help='time (in s)') frequency = lb.property.float(key='SENS:FREQ', min=10e6, max=18e9,help='center frequency (Hz)') def preset (self): """ Apply the instrument's preset state. """ self.write('SYST:PRES') def fetch (self): """ Get already-acquired data from the instrument. Returns: The data trace packaged as a pd.DataFrame """ response = self.query('FETC?').split(',') if len(response)==1: return float(response[0]) else: return pd.to_numeric(pd.Series(response)) ``` Let's work through what this does. ### 1. Every `labbench` driver is a subclass of a labbench Device class, such as lb.VISADevice: This is the definition of the PowerSensor: ```python class PowerSensor(lb.VISADevice): # ... ``` This single line gives our power sensor driver all of the general capabilities of a VISA driver this driver class (known as "subclassing" "inheriting" in software engineering). This means that in this one line, the PowerSensor driver has adopted _all of the same member and attribute features as a "plain" VISADevice_. The `VISADevice` class helps streamline use of the `pyvisa` with features like * managing connection and disconnection, given a VISA resource string; * shortcuts for accessing simple instrument states, implemented entirely based on definitions (discussed below); and * wrapper methods (i.e., member functions) for pyvisa resource `write` and `query` methods. A more complete listing of everything that comes with `lb.VISADevice` is in the [programming reference](http://ssm.ipages.nist.gov/labbench/labbench.html#labbench.backends.VISADevice). This power sensor driver definition is just that - a definition. To _use_ the driver and connect to the instrument in the lab, instantiate it and connect to the device. This is the simplest recommended way to instantiate, connect, and then disconnect in a script: ```python # Here is the `with` block with PowerSensor('TCPIP::10.0.0.1::::INSTR') as sensor: pass # The sensor is connected in this "with" block. Afterward, it disconnects, even # if there is an exception. Automation code that uses the sensor would go here. # Now the `with` block is done and we're disconnected print('Disconnected, all done') ``` It's nice to leave the sensor connected sometimes, like for interactive play on a python prompt. In that case, you can manually connect and disconnect: ```python sensor = PowerSensor('TCPIP::10.0.0.1::::INSTR') sensor.connect() # The sensor is connected now. Automation code that uses the sensor would go here. sensor.disconnect() # We have to manually disconnect when we don't use a with block. print('Disconnected, all done') ``` There are two key pieces here: * The instantiation, `PowerSensor('TCPIP::10.0.0.1::::INSTR')`, is where we create a power sensor object that we can interact with. All `VISADevice` drivers use this standard resource string formatting; other types of drivers have different formats. * The `with` block (talked about under the name _context management_ in python language documents) serves two functions for any labbench driver (not just VISADevice): 1. The instrument is connected at the start of the with block 2. guarantees that the instrument will be disconnected after the with end of the with block, _even if there is an error inside the block!_ ### 2. Getting and setting simple parameters in the device the `state` object ##### Reading the definition Each driver has an attribute called `state`. It is an optional way to give your users shortcuts to get and set simple instrument settings. This is the definition from the example above: ```python initiate_continuous = lb.Bool (key='INIT:CONT') output_trigger = lb.Bool (key='OUTP:TRIG') trigger_source = lb.EnumBytes (key='TRIG:SOUR', values=['IMM','INT','EXT','BUS','INT1']) trigger_count = lb.Int (key='TRIG:COUN', min=1,max=200,step=1) measurement_rate = lb.EnumBytes (key='SENS:MRAT', values=['NORM','DOUB','FAST']) sweep_aperture = lb.Float (key='SWE:APER', min=20e-6, max=200e-3,help='time (in s)') frequency = lb.Float (key='SENS:FREQ', min=10e6, max=18e9,help='input center frequency (in Hz)') ``` The `VISADevice` driver uses the metadata given for each descriptor above to determine how to communicate with the remote instrument on assignment. Behind the scenes, the `state` object has extra features that can monitor changes to these states to automatically record the changes we make to these states to a database, or (in the future) automatically generate a GUI front-panel. *Every* labbench driver has a state object, including at least the boolean state called `connected` (indicating whether the host computer is connected with the remote device is connected or not). --- ##### Using state attributes Making an instance of PowerSensor - in the example, this was `PowerSensor('TCPIP::10.0.0.1::::INSTR')` - causes the `state` object to become interactive. Assignment causes causes the setting to be applied to the instrument. For example, `sensor.state.initiate_continuous = True` makes machinery inside `lb.VISADevice` do the following: 1. validate that `True` is a valid python boolean value (because we defined it as `lb.Bool`) 2. convert the python boolean `True` to a string (because `lb.VISADevice` knows SCPI uses string commands) 3. send the SCPI string `'INIT:CONT TRUE'` (because we told it the command string is `'INIT:CONT'`, and by default it assumes that settings should be applied as `'<command> <value>'`) Likewise, a parameter "get" operation is triggered by simply using the attribute. The statement `print(sensor.state.initiate_continuous)` triggers `lb.VISADevice` to do the following: 1. an SCPI query with the string `'INIT:CONT?'` (because we told it the command string is `'INIT:CONT'`, and by default it assumes that settings should be applied as `'<command>?'` with return values reported in a response string), 2. the response string is converted to a python boolean type (because we defined it as `lb.Bool`), 3. the converted boolean value is passed to the `print` function for display. ##### Example of assigning to and from states Here is working example that gets and sets parameter values by communicating with the device. ```python with PowerSensor('TCPIP::10.0.0.1::::INSTR') as sensor: # This prints True if we're still in the with block print(sensor.state.isopen) # Use SCPI to request the identity of the sensor, # then return and print it. This was inherited from # VISADevice, so it is available on any VISADevice driver. print(sensor.state.identity) # PowerSensor.state.frequency is defined as a float. Assigning # to it causes logic inherited from lb.VISADevice # to convert this to a string, and then write the SCPI string # 'SENS:FREQ 2.45e9' to the instrument. sensor.state.frequency = 2.45e9 # Set the power sensor center frequency to 2.45e9 GHz # We can also access the remote value of sensor.state.frequency. # Behind the scenes, each time we fetch the value, magic in # lb.VISADevice retrieves the current value from the instrument # with the SCPI query 'SENS:FREQ?', and then converts it to a floating point # number. print('The sensor frequency is {} GHz'.format(sensor.state.frequency/1e9)) print(sensor.state.isopen) # Prints False - we're disconnected ``` Simply put: assigning to or from with the attribute in the driver state instance causes remote set or get operations. The python data type matches the definition in the `state` class. ##### Discovering and navigating states Inheriting from `VISADevice` means that `PowerSensor.state` includes the seven states defined here, plus all others listed provided by VISADevice.state. Since these aren't listed here, it can get confusing tracking what has been inherited (like in other object-oriented libraries). Fortunately, there are many ways to explore the entire list of states that have been inherited from the parent state class: 1. Look it up [in the API reference manual](http://ssm.ipages.nist.gov/labbench/labbench.html#labbench.visa.VISADevice.state) 2. When working with an instantiated driver object in an ipython or jupyter notebook command prompt, type `lb.VISADevice.state.` and press tab to autocomplete a list of valid options. You'll also see some functions to do esoteric things with these states. 3. When working in an editor like pycharm or spyder, you can ctrl+click on the right side of `VISADevice.state` to skip directly to looking at the definition of `VISADevice.state` in the `labbench` library 4. When working in any kind of python prompt, you can use the `help` function ```python help(PowerSensor.state) ``` 5. When working in an ipython or jupyter prompt, a nicer option than 4. is the ipython help magick: ```python PowerSensor.state? ``` ##### Writing state attributes The way we code this is a little unusual outside of python packages for web development. When we write a driver class, we add attributes defined with helper information such as - the python type that should represent the parameter - bounds for acceptable values of the parameter - descriptive "help" information for the user These attributes are a kind of python type state class is a _descriptor_. We call them "traits" because following an underlying library that we extend, [traitlets](https://github.com/ipython/traitlets) under the hood. The example includes seven state traits. After instantiating with `PowerSensor()`, we can start interacting with `sensor.state`. Each one is now a live object we can assign to and use like any other python object. The difference is, each time we get the value, it is queried from the instrument, and each time we assign to it (the normal `=` operator), a set command goes to the instrument to set it. The definition above includes metadata that dictates the python data type handled for this assignment operation, and how it should be converted: | **Descriptor metadata type** | **Uses in `PowerSensor` example** | **Behavior depends on the Device implementation** | |--------------------------------- |------------------------------------|----------------------------------- | | Python data type for assignment | `lb.Float`, `lb.EnumBytes`, etc. | No | | Data validation settings | `min`,`max`,`step` (for numbers) | No | | | `values` (for enumerated types) | No | | Documentation strings | `help` | No | | Associated backend command | `command` | Yes | Some types of drivers ignore `command` keyword, as discussed in [how to write a labbench device driver](how to write a device driver). ### 3. Device methods for commands and data acquisition The `state` class above is useful for remote assignment operations on simple scalar data types. Supporting a broader collection of operation types ("trigger a measurement," "fetch and return measurement data," etc.) need the flexibility of more general-purpose functions. In python, a member function of a class is called a method. Here are the methods defined in `PowerSensor`: ```python def preset (self): self.write('SYST:PRES') def fetch (self): response = self.query('FETC?').split(',') if len(response)==1: return float(response[0]) else: return pd.to_numeric(pd.Series(response)) ``` These are the methods that are specific to our power sensor device. * The `preset` function tells the device to revert to its default state. * The `fetch` method performs some text processing on the response from the device, and returns either a single scalar or a pandas Series if the result is a sequence of power values. The `labbench` convention is that the names of these methods are verbs (or sentence predicates, when single words are not specific enough). ##### Example data acquisition script Here is an example that presets the device, sets the center frequency to 2.45 GHz, and then collects 10 power samples: ``` with PowerSensor('TCPIP::10.0.0.1::::INSTR') as sensor: print('Connected to power sensor {}'.format(sensor.state.identity)) sensor.preset() sensor.wait() # VISADevice includes in the standard VISA wait method, which sends SCPI '*WAI' sensor.state.frequency = 2.45e9 # Set the power sensor center frequency to 2.45e9 GHz power_levels = pd.Series([sensor.fetch() for i in range(10)]) print('All done! Got these power levels: ') print(power_levels) ``` ##### Discovering and navigating device driver methods Inheritance has similar implications as it does for the `VISADevice.state` class. Inheriting from `VISADevice` means that `PowerSensor` includes the `preset` and `fetch` methods, plus many more from `lb.VISADevice` (some of which it inherited from `lb.Device`). Since these aren't listed in the example definition above, it can get confusing tracking what methods are available through inheritance (like in other object-oriented libraries). Sometimes, informally, this confusion is called "abstraction halitosis." Fortunately, there are many ways to identify the available objects and methods: 1. Look it up [in the API reference manual](http://ssm.ipages.nist.gov/labbench/labbench.html#labbench.visa.VISADevice) 2. When working with an instantiated driver object in an ipython or jupyter notebook command prompt, type `lb.VISADevice.` and press tab to autocomplete a list of valid options. You'll also see some functions to do esoteric things with these states. 3. When working in an editor like pycharm or spyder, you can ctrl+click on the right side of `VISADevice` to skip directly to looking at the definition of `VISADevice` in the `labbench` library 4. When working in any kind of python prompt, you can use the `help` function ```python help(PowerSensor) ``` 5. When working in an ipython or jupyter prompt, a nicely formatted version of 4. is the ipython help magick: ```python PowerSensor? ``` ## Miscellaneous extras ##### Connecting to multiple devices The best way to connect to multiple devices is to use a single `with` block. For example, a 10-sample acquisition with two power sensors might look like this: ``` with PowerSensor('TCPIP::10.0.0.1::::INSTR') as sensor1,\ PowerSensor('TCPIP::10.0.0.2::::INSTR') as sensor2: print('Connected to power sensors') for sensor in sensor1, sensor2: sensor.preset() sensor.wait() # VISADevice includes in the standard VISA wait method, which sends SCPI '*WAI' sensor.state.frequency = 2.45e9 # Set the power sensor center frequency to 2.45e9 GHz power_levels = pd.DataFrame([[sensor1.fetch(),sensor2.fetch()] for i in range(10)]) print('All done! Got these power levels: ') print(power_levels) ``` ##### Execute a function on state changes Database management and user interface tools make extensive use of callbacks, which gives an opportunity for you to execute custom code any time an assignment causes a state to change. A state change can occur in a couple of ways: * This triggers a callback if 2.45e9 is different than the last observed frequency: ```python sensor.state.frequency = 2.45e9 ``` * This triggers a callback if the instrument returns a frequency that is is different than the last observed frequency ```python current_freq = sensor.state.frequency ``` Configure a function call on an observed change with the `observe` method in `sensor.state`: ``` def callback(change): """ the callback function is given a single argument. change is a dictionary containing the descriptor ('frequency'), the state instance that contains frequency, and both the old and new values. """ # insert GUI update here? # commit updated state to a database here? print(change) with PowerSensor('TCPIP::10.0.0.1::::INSTR') as sensor: sensor.state.observe(callback) sensor.preset() sensor.wait() # VISADevice includes in the standard VISA wait method, which sends SCPI '*WAI' sensor.state.frequency = 2.45e9 # Set the power sensor center frequency to 2.45e9 GHz print('All done! Got these power levels: ') print(power_levels) ``` Use of callbacks can help separate the actual measurement loop (the contents of the `with` block) from other functions for debugging, GUI, and database management. The result can be code that is more clear.
github_jupyter
# Computation on Arrays: Broadcasting We saw in the previous section how NumPy's universal functions can be used to *vectorize* operations and thereby remove slow Python loops. Another means of vectorizing operations is to use NumPy's *broadcasting* functionality. Broadcasting is simply a set of rules for applying binary ufuncs (e.g., addition, subtraction, multiplication, etc.) on arrays of different sizes. ## Introducing Broadcasting Recall that for arrays of the same size, binary operations are performed on an element-by-element basis: ``` import numpy as np a = np.array([0, 1, 2]) b = np.array([5, 5, 5]) a + b ``` Broadcasting allows these types of binary operations to be performed on arrays of different sizes–for example, we can just as easily add a scalar (think of it as a zero-dimensional array) to an array: ``` a + 5 ``` We can think of this as an operation that stretches or duplicates the value ``5`` into the array ``[5, 5, 5]``, and adds the results. The advantage of NumPy's broadcasting is that this duplication of values does not actually take place, but it is a useful mental model as we think about broadcasting. We can similarly extend this to arrays of higher dimension. Observe the result when we add a one-dimensional array to a two-dimensional array: ``` M = np.ones((3, 3)) M M + a ``` Here the one-dimensional array ``a`` is stretched, or broadcast across the second dimension in order to match the shape of ``M``. While these examples are relatively easy to understand, more complicated cases can involve broadcasting of both arrays. Consider the following example: ``` a = np.arange(3) b = np.arange(3)[:, np.newaxis] print(a) print(b) a + b ``` Just as before we stretched or broadcasted one value to match the shape of the other, here we've stretched *both* ``a`` and ``b`` to match a common shape, and the result is a two-dimensional array! The geometry of these examples is visualized in the following figure. ![Broadcasting Visual](figures/broadcasting.png) The light boxes represent the broadcasted values: again, this extra memory is not actually allocated in the course of the operation, but it can be useful conceptually to imagine that it is. ## Rules of Broadcasting Broadcasting in NumPy follows a strict set of rules to determine the interaction between the two arrays: - Rule 1: If the two arrays differ in their number of dimensions, the shape of the one with fewer dimensions is *padded* with ones on its leading (left) side. - Rule 2: If the shape of the two arrays does not match in any dimension, the array with shape equal to 1 in that dimension is stretched to match the other shape. - Rule 3: If in any dimension the sizes disagree and neither is equal to 1, an error is raised. To make these rules clear, let's consider a few examples in detail. ### Broadcasting example 1 Let's look at adding a two-dimensional array to a one-dimensional array: ``` M = np.ones((2, 3)) a = np.arange(3) ``` Let's consider an operation on these two arrays. The shape of the arrays are - ``M.shape = (2, 3)`` - ``a.shape = (3,)`` We see by rule 1 that the array ``a`` has fewer dimensions, so we pad it on the left with ones: - ``M.shape -> (2, 3)`` - ``a.shape -> (1, 3)`` By rule 2, we now see that the first dimension disagrees, so we stretch this dimension to match: - ``M.shape -> (2, 3)`` - ``a.shape -> (2, 3)`` The shapes match, and we see that the final shape will be ``(2, 3)``: ``` M + a ``` ### Broadcasting example 2 Let's take a look at an example where both arrays need to be broadcast: ``` a = np.arange(3).reshape((3, 1)) b = np.arange(3) ``` Again, we'll start by writing out the shape of the arrays: - ``a.shape = (3, 1)`` - ``b.shape = (3,)`` Rule 1 says we must pad the shape of ``b`` with ones: - ``a.shape -> (3, 1)`` - ``b.shape -> (1, 3)`` And rule 2 tells us that we upgrade each of these ones to match the corresponding size of the other array: - ``a.shape -> (3, 3)`` - ``b.shape -> (3, 3)`` Because the result matches, these shapes are compatible. We can see this here: ``` a + b ``` ### Broadcasting example 3 Now let's take a look at an example in which the two arrays are not compatible: ``` M = np.ones((3, 2)) a = np.arange(3) ``` This is just a slightly different situation than in the first example: the matrix ``M`` is transposed. How does this affect the calculation? The shape of the arrays are - ``M.shape = (3, 2)`` - ``a.shape = (3,)`` Again, rule 1 tells us that we must pad the shape of ``a`` with ones: - ``M.shape -> (3, 2)`` - ``a.shape -> (1, 3)`` By rule 2, the first dimension of ``a`` is stretched to match that of ``M``: - ``M.shape -> (3, 2)`` - ``a.shape -> (3, 3)`` Now we hit rule 3–the final shapes do not match, so these two arrays are incompatible, as we can observe by attempting this operation: ``` M + a ``` Note the potential confusion here: you could imagine making ``a`` and ``M`` compatible by, say, padding ``a``'s shape with ones on the right rather than the left. But this is not how the broadcasting rules work! That sort of flexibility might be useful in some cases, but it would lead to potential areas of ambiguity. If right-side padding is what you'd like, you can do this explicitly by reshaping the array (we'll use the ``np.newaxis`` keyword introduced in The Basics of NumPy Arrays): ``` a[:, np.newaxis].shape M + a[:, np.newaxis] ``` Also note that while we've been focusing on the ``+`` operator here, these broadcasting rules apply to *any* binary ``ufunc``. For example, here is the ``logaddexp(a, b)`` function, which computes ``log(exp(a) + exp(b))`` with more precision than the naive approach: ``` np.logaddexp(M, a[:, np.newaxis]) ``` For more information on the many available universal functions, refer to Computation on NumPy Arrays: Universal Functions. ## Broadcasting in Practice Broadcasting operations form the core of many examples we'll see throughout this book. We'll now take a look at a couple simple examples of where they can be useful. ### Centering an array In the previous section, we saw that ufuncs allow a NumPy user to remove the need to explicitly write slow Python loops. Broadcasting extends this ability. One commonly seen example is when centering an array of data. Imagine you have an array of 10 observations, each of which consists of 3 values. Using the standard convention, we'll store this in a $10 \times 3$ array: ``` X = np.random.random((10, 3)) ``` We can compute the mean of each feature using the ``mean`` aggregate across the first dimension: ``` Xmean = X.mean(0) Xmean ``` And now we can center the ``X`` array by subtracting the mean (this is a broadcasting operation): ``` X_centered = X - Xmean ``` To double-check that we've done this correctly, we can check that the centered array has near zero mean: ``` X_centered.mean(0) ``` To within machine precision, the mean is now zero. ### Plotting a two-dimensional function One place that broadcasting is very useful is in displaying images based on two-dimensional functions. If we want to define a function $z = f(x, y)$, broadcasting can be used to compute the function across the grid: ``` # x and y have 50 steps from 0 to 5 x = np.linspace(0, 5, 50) y = np.linspace(0, 5, 50)[:, np.newaxis] z = np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x) ``` We'll use Matplotlib to plot this two-dimensional array (these tools will be discussed in full in Density and Contour Plots): ``` %matplotlib inline import matplotlib.pyplot as plt plt.imshow(z, origin='lower', extent=[0, 5, 0, 5], cmap='viridis') plt.colorbar(); ``` The result is a compelling visualization of the two-dimensional function.
github_jupyter
*Note: This is not yet ready, but shows the direction I'm leaning in for Fourth Edition Search.* # State-Space Search This notebook describes several state-space search algorithms, and how they can be used to solve a variety of problems. We start with a simple algorithm and a simple domain: finding a route from city to city. Later we will explore other algorithms and domains. ## The Route-Finding Domain Like all state-space search problems, in a route-finding problem you will be given: - A start state (for example, `'A'` for the city Arad). - A goal state (for example, `'B'` for the city Bucharest). - Actions that can change state (for example, driving from `'A'` to `'S'`). You will be asked to find: - A path from the start state, through intermediate states, to the goal state. We'll use this map: <img src="http://robotics.cs.tamu.edu/dshell/cs625/images/map.jpg" height="366" width="603"> A state-space search problem can be represented by a *graph*, where the vertexes of the graph are the states of the problem (in this case, cities) and the edges of the graph are the actions (in this case, driving along a road). We'll represent a city by its single initial letter. We'll represent the graph of connections as a `dict` that maps each city to a list of the neighboring cities (connected by a road). For now we don't explicitly represent the actions, nor the distances between cities. ``` romania = { 'A': ['Z', 'T', 'S'], 'B': ['F', 'P', 'G', 'U'], 'C': ['D', 'R', 'P'], 'D': ['M', 'C'], 'E': ['H'], 'F': ['S', 'B'], 'G': ['B'], 'H': ['U', 'E'], 'I': ['N', 'V'], 'L': ['T', 'M'], 'M': ['L', 'D'], 'N': ['I'], 'O': ['Z', 'S'], 'P': ['R', 'C', 'B'], 'R': ['S', 'C', 'P'], 'S': ['A', 'O', 'F', 'R'], 'T': ['A', 'L'], 'U': ['B', 'V', 'H'], 'V': ['U', 'I'], 'Z': ['O', 'A']} ``` Suppose we want to get from `A` to `B`. Where can we go from the start state, `A`? ``` romania['A'] ``` We see that from `A` we can get to any of the three cities `['Z', 'T', 'S']`. Which should we choose? *We don't know.* That's the whole point of *search*: we don't know which immediate action is best, so we'll have to explore, until we find a *path* that leads to the goal. How do we explore? We'll start with a simple algorithm that will get us from `A` to `B`. We'll keep a *frontier*&mdash;a collection of not-yet-explored states&mdash;and expand the frontier outward until it reaches the goal. To be more precise: - Initially, the only state in the frontier is the start state, `'A'`. - Until we reach the goal, or run out of states in the frontier to explore, do the following: - Remove the first state from the frontier. Call it `s`. - If `s` is the goal, we're done. Return the path to `s`. - Otherwise, consider all the neighboring states of `s`. For each one: - If we have not previously explored the state, add it to the end of the frontier. - Also keep track of the previous state that led to this new neighboring state; we'll need this to reconstruct the path to the goal, and to keep us from re-visiting previously explored states. # A Simple Search Algorithm: `breadth_first` The function `breadth_first` implements this strategy: ``` from collections import deque # Doubly-ended queue: pop from left, append to right. def breadth_first(start, goal, neighbors): "Find a shortest sequence of states from start to the goal." frontier = deque([start]) # A queue of states previous = {start: None} # start has no previous state; other states will while frontier: s = frontier.popleft() if s == goal: return path(previous, s) for s2 in neighbors[s]: if s2 not in previous: frontier.append(s2) previous[s2] = s def path(previous, s): "Return a list of states that lead to state s, according to the previous dict." return [] if (s is None) else path(previous, previous[s]) + [s] ``` A couple of things to note: 1. We always add new states to the end of the frontier queue. That means that all the states that are adjacent to the start state will come first in the queue, then all the states that are two steps away, then three steps, etc. That's what we mean by *breadth-first* search. 2. We recover the path to an `end` state by following the trail of `previous[end]` pointers, all the way back to `start`. The dict `previous` is a map of `{state: previous_state}`. 3. When we finally get an `s` that is the goal state, we know we have found a shortest path, because any other state in the queue must correspond to a path that is as long or longer. 3. Note that `previous` contains all the states that are currently in `frontier` as well as all the states that were in `frontier` in the past. 4. If no path to the goal is found, then `breadth_first` returns `None`. If a path is found, it returns the sequence of states on the path. Some examples: ``` breadth_first('A', 'B', romania) breadth_first('L', 'N', romania) breadth_first('N', 'L', romania) breadth_first('E', 'E', romania) ``` Now let's try a different kind of problem that can be solved with the same search function. ## Word Ladders Problem A *word ladder* problem is this: given a start word and a goal word, find the shortest way to transform the start word into the goal word by changing one letter at a time, such that each change results in a word. For example starting with `green` we can reach `grass` in 7 steps: `green` &rarr; `greed` &rarr; `treed` &rarr; `trees` &rarr; `tress` &rarr; `cress` &rarr; `crass` &rarr; `grass` We will need a dictionary of words. We'll use 5-letter words from the [Stanford GraphBase](http://www-cs-faculty.stanford.edu/~uno/sgb.html) project for this purpose. Let's get that file from aimadata. ``` from search import * sgb_words = DataFile("EN-text/sgb-words.txt") ``` We can assign `WORDS` to be the set of all the words in this file: ``` WORDS = set(sgb_words.read().split()) len(WORDS) ``` And define `neighboring_words` to return the set of all words that are a one-letter change away from a given `word`: ``` def neighboring_words(word): "All words that are one letter away from this word." neighbors = {word[:i] + c + word[i+1:] for i in range(len(word)) for c in 'abcdefghijklmnopqrstuvwxyz' if c != word[i]} return neighbors & WORDS ``` For example: ``` neighboring_words('hello') neighboring_words('world') ``` Now we can create `word_neighbors` as a dict of `{word: {neighboring_word, ...}}`: ``` word_neighbors = {word: neighboring_words(word) for word in WORDS} ``` Now the `breadth_first` function can be used to solve a word ladder problem: ``` breadth_first('green', 'grass', word_neighbors) breadth_first('smart', 'brain', word_neighbors) breadth_first('frown', 'smile', word_neighbors) ``` # More General Search Algorithms Now we'll embelish the `breadth_first` algorithm to make a family of search algorithms with more capabilities: 1. We distinguish between an *action* and the *result* of an action. 3. We allow different measures of the cost of a solution (not just the number of steps in the sequence). 4. We search through the state space in an order that is more likely to lead to an optimal solution quickly. Here's how we do these things: 1. Instead of having a graph of neighboring states, we instead have an object of type *Problem*. A Problem has one method, `Problem.actions(state)` to return a collection of the actions that are allowed in a state, and another method, `Problem.result(state, action)` that says what happens when you take an action. 2. We keep a set, `explored` of states that have already been explored. We also have a class, `Frontier`, that makes it efficient to ask if a state is on the frontier. 3. Each action has a cost associated with it (in fact, the cost can vary with both the state and the action). 4. The `Frontier` class acts as a priority queue, allowing the "best" state to be explored next. We represent a sequence of actions and resulting states as a linked list of `Node` objects. The algorithm `breadth_first_search` is basically the same as `breadth_first`, but using our new conventions: ``` def breadth_first_search(problem): "Search for goal; paths with least number of steps first." if problem.is_goal(problem.initial): return Node(problem.initial) frontier = FrontierQ(Node(problem.initial), LIFO=False) explored = set() while frontier: node = frontier.pop() explored.add(node.state) for action in problem.actions(node.state): child = node.child(problem, action) if child.state not in explored and child.state not in frontier: if problem.is_goal(child.state): return child frontier.add(child) ``` Next is `uniform_cost_search`, in which each step can have a different cost, and we still consider first one os the states with minimum cost so far. ``` def uniform_cost_search(problem, costfn=lambda node: node.path_cost): frontier = FrontierPQ(Node(problem.initial), costfn) explored = set() while frontier: node = frontier.pop() if problem.is_goal(node.state): return node explored.add(node.state) for action in problem.actions(node.state): child = node.child(problem, action) if child.state not in explored and child not in frontier: frontier.add(child) elif child in frontier and frontier.cost[child] < child.path_cost: frontier.replace(child) ``` Finally, `astar_search` in which the cost includes an estimate of the distance to the goal as well as the distance travelled so far. ``` def astar_search(problem, heuristic): costfn = lambda node: node.path_cost + heuristic(node.state) return uniform_cost_search(problem, costfn) ``` # Search Tree Nodes The solution to a search problem is now a linked list of `Node`s, where each `Node` includes a `state` and the `path_cost` of getting to the state. In addition, for every `Node` except for the first (root) `Node`, there is a previous `Node` (indicating the state that lead to this `Node`) and an `action` (indicating the action taken to get here). ``` class Node(object): """A node in a search tree. A search tree is spanning tree over states. A Node contains a state, the previous node in the tree, the action that takes us from the previous state to this state, and the path cost to get to this state. If a state is arrived at by two paths, then there are two nodes with the same state.""" def __init__(self, state, previous=None, action=None, step_cost=1): "Create a search tree Node, derived from a previous Node by an action." self.state = state self.previous = previous self.action = action self.path_cost = 0 if previous is None else (previous.path_cost + step_cost) def __repr__(self): return "<Node {}: {}>".format(self.state, self.path_cost) def __lt__(self, other): return self.path_cost < other.path_cost def child(self, problem, action): "The Node you get by taking an action from this Node." result = problem.result(self.state, action) return Node(result, self, action, problem.step_cost(self.state, action, result)) ``` # Frontiers A frontier is a collection of Nodes that acts like both a Queue and a Set. A frontier, `f`, supports these operations: * `f.add(node)`: Add a node to the Frontier. * `f.pop()`: Remove and return the "best" node from the frontier. * `f.replace(node)`: add this node and remove a previous node with the same state. * `state in f`: Test if some node in the frontier has arrived at state. * `f[state]`: returns the node corresponding to this state in frontier. * `len(f)`: The number of Nodes in the frontier. When the frontier is empty, `f` is *false*. We provide two kinds of frontiers: One for "regular" queues, either first-in-first-out (for breadth-first search) or last-in-first-out (for depth-first search), and one for priority queues, where you can specify what cost function on nodes you are trying to minimize. ``` from collections import OrderedDict import heapq class FrontierQ(OrderedDict): "A Frontier that supports FIFO or LIFO Queue ordering." def __init__(self, initial, LIFO=False): """Initialize Frontier with an initial Node. If LIFO is True, pop from the end first; otherwise from front first.""" self.LIFO = LIFO self.add(initial) def add(self, node): "Add a node to the frontier." self[node.state] = node def pop(self): "Remove and return the next Node in the frontier." (state, node) = self.popitem(self.LIFO) return node def replace(self, node): "Make this node replace the nold node with the same state." del self[node.state] self.add(node) class FrontierPQ: "A Frontier ordered by a cost function; a Priority Queue." def __init__(self, initial, costfn=lambda node: node.path_cost): "Initialize Frontier with an initial Node, and specify a cost function." self.heap = [] self.states = {} self.costfn = costfn self.add(initial) def add(self, node): "Add node to the frontier." cost = self.costfn(node) heapq.heappush(self.heap, (cost, node)) self.states[node.state] = node def pop(self): "Remove and return the Node with minimum cost." (cost, node) = heapq.heappop(self.heap) self.states.pop(node.state, None) # remove state return node def replace(self, node): "Make this node replace a previous node with the same state." if node.state not in self: raise ValueError('{} not there to replace'.format(node.state)) for (i, (cost, old_node)) in enumerate(self.heap): if old_node.state == node.state: self.heap[i] = (self.costfn(node), node) heapq._siftdown(self.heap, 0, i) return def __contains__(self, state): return state in self.states def __len__(self): return len(self.heap) ``` # Search Problems `Problem` is the abstract class for all search problems. You can define your own class of problems as a subclass of `Problem`. You will need to override the `actions` and `result` method to describe how your problem works. You will also have to either override `is_goal` or pass a collection of goal states to the initialization method. If actions have different costs, you should override the `step_cost` method. ``` class Problem(object): """The abstract class for a search problem.""" def __init__(self, initial=None, goals=(), **additional_keywords): """Provide an initial state and optional goal states. A subclass can have additional keyword arguments.""" self.initial = initial # The initial state of the problem. self.goals = goals # A collection of possibe goal states. self.__dict__.update(**additional_keywords) def actions(self, state): "Return a list of actions executable in this state." raise NotImplementedError # Override this! def result(self, state, action): "The state that results from executing this action in this state." raise NotImplementedError # Override this! def is_goal(self, state): "True if the state is a goal." return state in self.goals # Optionally override this! def step_cost(self, state, action, result=None): "The cost of taking this action from this state." return 1 # Override this if actions have different costs def action_sequence(node): "The sequence of actions to get to this node." actions = [] while node.previous: actions.append(node.action) node = node.previous return actions[::-1] def state_sequence(node): "The sequence of states to get to this node." states = [node.state] while node.previous: node = node.previous states.append(node.state) return states[::-1] ``` # Two Location Vacuum World ``` dirt = '*' clean = ' ' class TwoLocationVacuumProblem(Problem): """A Vacuum in a world with two locations, and dirt. Each state is a tuple of (location, dirt_in_W, dirt_in_E).""" def actions(self, state): return ('W', 'E', 'Suck') def is_goal(self, state): return dirt not in state def result(self, state, action): "The state that results from executing this action in this state." (loc, dirtW, dirtE) = state if action == 'W': return ('W', dirtW, dirtE) elif action == 'E': return ('E', dirtW, dirtE) elif action == 'Suck' and loc == 'W': return (loc, clean, dirtE) elif action == 'Suck' and loc == 'E': return (loc, dirtW, clean) else: raise ValueError('unknown action: ' + action) problem = TwoLocationVacuumProblem(initial=('W', dirt, dirt)) result = uniform_cost_search(problem) result action_sequence(result) state_sequence(result) problem = TwoLocationVacuumProblem(initial=('E', clean, dirt)) result = uniform_cost_search(problem) action_sequence(result) ``` # Water Pouring Problem Here is another problem domain, to show you how to define one. The idea is that we have a number of water jugs and a water tap and the goal is to measure out a specific amount of water (in, say, ounces or liters). You can completely fill or empty a jug, but because the jugs don't have markings on them, you can't partially fill them with a specific amount. You can, however, pour one jug into another, stopping when the seconfd is full or the first is empty. ``` class PourProblem(Problem): """Problem about pouring water between jugs to achieve some water level. Each state is a tuples of levels. In the initialization, provide a tuple of capacities, e.g. PourProblem(capacities=(8, 16, 32), initial=(2, 4, 3), goals={7}), which means three jugs of capacity 8, 16, 32, currently filled with 2, 4, 3 units of water, respectively, and the goal is to get a level of 7 in any one of the jugs.""" def actions(self, state): """The actions executable in this state.""" jugs = range(len(state)) return ([('Fill', i) for i in jugs if state[i] != self.capacities[i]] + [('Dump', i) for i in jugs if state[i] != 0] + [('Pour', i, j) for i in jugs for j in jugs if i != j]) def result(self, state, action): """The state that results from executing this action in this state.""" result = list(state) act, i, j = action[0], action[1], action[-1] if act == 'Fill': # Fill i to capacity result[i] = self.capacities[i] elif act == 'Dump': # Empty i result[i] = 0 elif act == 'Pour': a, b = state[i], state[j] result[i], result[j] = ((0, a + b) if (a + b <= self.capacities[j]) else (a + b - self.capacities[j], self.capacities[j])) else: raise ValueError('unknown action', action) return tuple(result) def is_goal(self, state): """True if any of the jugs has a level equal to one of the goal levels.""" return any(level in self.goals for level in state) p7 = PourProblem(initial=(2, 0), capacities=(5, 13), goals={7}) p7.result((2, 0), ('Fill', 1)) result = uniform_cost_search(p7) action_sequence(result) ``` # Visualization Output ``` def showpath(searcher, problem): "Show what happens when searcvher solves problem." problem = Instrumented(problem) print('\n{}:'.format(searcher.__name__)) result = searcher(problem) if result: actions = action_sequence(result) state = problem.initial path_cost = 0 for steps, action in enumerate(actions, 1): path_cost += problem.step_cost(state, action, 0) result = problem.result(state, action) print(' {} =={}==> {}; cost {} after {} steps' .format(state, action, result, path_cost, steps, '; GOAL!' if problem.is_goal(result) else '')) state = result msg = 'GOAL FOUND' if result else 'no solution' print('{} after {} results and {} goal checks' .format(msg, problem._counter['result'], problem._counter['is_goal'])) from collections import Counter class Instrumented: "Instrument an object to count all the attribute accesses in _counter." def __init__(self, obj): self._object = obj self._counter = Counter() def __getattr__(self, attr): self._counter[attr] += 1 return getattr(self._object, attr) showpath(uniform_cost_search, p7) p = PourProblem(initial=(0, 0), capacities=(7, 13), goals={2}) showpath(uniform_cost_search, p) class GreenPourProblem(PourProblem): def step_cost(self, state, action, result=None): "The cost is the amount of water used in a fill." if action[0] == 'Fill': i = action[1] return self.capacities[i] - state[i] return 0 p = GreenPourProblem(initial=(0, 0), capacities=(7, 13), goals={2}) showpath(uniform_cost_search, p) def compare_searchers(problem, searchers=None): "Apply each of the search algorithms to the problem, and show results" if searchers is None: searchers = (breadth_first_search, uniform_cost_search) for searcher in searchers: showpath(searcher, problem) compare_searchers(p) ``` # Random Grid An environment where you can move in any of 4 directions, unless there is an obstacle there. ``` import random N, S, E, W = DIRECTIONS = [(0, 1), (0, -1), (1, 0), (-1, 0)] def Grid(width, height, obstacles=0.1): """A 2-D grid, width x height, with obstacles that are either a collection of points, or a fraction between 0 and 1 indicating the density of obstacles, chosen at random.""" grid = {(x, y) for x in range(width) for y in range(height)} if isinstance(obstacles, (float, int)): obstacles = random.sample(grid, int(width * height * obstacles)) def neighbors(x, y): for (dx, dy) in DIRECTIONS: (nx, ny) = (x + dx, y + dy) if (nx, ny) not in obstacles and 0 <= nx < width and 0 <= ny < height: yield (nx, ny) return {(x, y): list(neighbors(x, y)) for x in range(width) for y in range(height)} Grid(5, 5) class GridProblem(Problem): "Create with a call like GridProblem(grid=Grid(10, 10), initial=(0, 0), goal=(9, 9))" def actions(self, state): return DIRECTIONS def result(self, state, action): #print('ask for result of', state, action) (x, y) = state (dx, dy) = action r = (x + dx, y + dy) return r if r in self.grid[state] else state gp = GridProblem(grid=Grid(5, 5, 0.3), initial=(0, 0), goals={(4, 4)}) showpath(uniform_cost_search, gp) ``` # Finding a hard PourProblem What solvable two-jug PourProblem requires the most steps? We can define the hardness as the number of steps, and then iterate over all PourProblems with capacities up to size M, keeping the hardest one. ``` def hardness(problem): L = breadth_first_search(problem) #print('hardness', problem.initial, problem.capacities, problem.goals, L) return len(action_sequence(L)) if (L is not None) else 0 hardness(p7) action_sequence(breadth_first_search(p7)) C = 9 # Maximum capacity to consider phard = max((PourProblem(initial=(a, b), capacities=(A, B), goals={goal}) for A in range(C+1) for B in range(C+1) for a in range(A) for b in range(B) for goal in range(max(A, B))), key=hardness) phard.initial, phard.capacities, phard.goals showpath(breadth_first_search, PourProblem(initial=(0, 0), capacities=(7, 9), goals={8})) showpath(uniform_cost_search, phard) class GridProblem(Problem): """A Grid.""" def actions(self, state): return ['N', 'S', 'E', 'W'] def result(self, state, action): """The state that results from executing this action in this state.""" (W, H) = self.size if action == 'N' and state > W: return state - W if action == 'S' and state + W < W * W: return state + W if action == 'E' and (state + 1) % W !=0: return state + 1 if action == 'W' and state % W != 0: return state - 1 return state compare_searchers(GridProblem(initial=0, goals={44}, size=(10, 10))) def test_frontier(): #### Breadth-first search with FIFO Q f = FrontierQ(Node(1), LIFO=False) assert 1 in f and len(f) == 1 f.add(Node(2)) f.add(Node(3)) assert 1 in f and 2 in f and 3 in f and len(f) == 3 assert f.pop().state == 1 assert 1 not in f and 2 in f and 3 in f and len(f) == 2 assert f assert f.pop().state == 2 assert f.pop().state == 3 assert not f #### Depth-first search with LIFO Q f = FrontierQ(Node('a'), LIFO=True) for s in 'bcdef': f.add(Node(s)) assert len(f) == 6 and 'a' in f and 'c' in f and 'f' in f for s in 'fedcba': assert f.pop().state == s assert not f #### Best-first search with Priority Q f = FrontierPQ(Node(''), lambda node: len(node.state)) assert '' in f and len(f) == 1 and f for s in ['book', 'boo', 'bookie', 'bookies', 'cook', 'look', 'b']: assert s not in f f.add(Node(s)) assert s in f assert f.pop().state == '' assert f.pop().state == 'b' assert f.pop().state == 'boo' assert {f.pop().state for _ in '123'} == {'book', 'cook', 'look'} assert f.pop().state == 'bookie' #### Romania: Two paths to Bucharest; cheapest one found first S = Node('S') SF = Node('F', S, 'S->F', 99) SFB = Node('B', SF, 'F->B', 211) SR = Node('R', S, 'S->R', 80) SRP = Node('P', SR, 'R->P', 97) SRPB = Node('B', SRP, 'P->B', 101) f = FrontierPQ(S) f.add(SF); f.add(SR), f.add(SRP), f.add(SRPB); f.add(SFB) def cs(n): return (n.path_cost, n.state) # cs: cost and state assert cs(f.pop()) == (0, 'S') assert cs(f.pop()) == (80, 'R') assert cs(f.pop()) == (99, 'F') assert cs(f.pop()) == (177, 'P') assert cs(f.pop()) == (278, 'B') return 'test_frontier ok' test_frontier() %matplotlib inline import matplotlib.pyplot as plt p = plt.plot([i**2 for i in range(10)]) plt.savefig('destination_path.eps', format='eps', dpi=1200) import itertools import random # http://stackoverflow.com/questions/10194482/custom-matplotlib-plot-chess-board-like-table-with-colored-cells from matplotlib.table import Table def main(): grid_table(8, 8) plt.axis('scaled') plt.show() def grid_table(nrows, ncols): fig, ax = plt.subplots() ax.set_axis_off() colors = ['white', 'lightgrey', 'dimgrey'] tb = Table(ax, bbox=[0,0,2,2]) for i,j in itertools.product(range(ncols), range(nrows)): tb.add_cell(i, j, 2./ncols, 2./nrows, text='{:0.2f}'.format(0.1234), loc='center', facecolor=random.choice(colors), edgecolor='grey') # facecolors= ax.add_table(tb) #ax.plot([0, .3], [.2, .2]) #ax.add_line(plt.Line2D([0.3, 0.5], [0.7, 0.7], linewidth=2, color='blue')) return fig main() import collections class defaultkeydict(collections.defaultdict): """Like defaultdict, but the default_factory is a function of the key. >>> d = defaultkeydict(abs); d[-42] 42 """ def __missing__(self, key): self[key] = self.default_factory(key) return self[key] ```
github_jupyter
# Hyper parameters The goal here is to demonstrate how to optimise hyper-parameters of various models The kernel is a short version of https://www.kaggle.com/mlisovyi/featureengineering-basic-model ``` max_events = None import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # needed for 3D scatter plots %matplotlib inline import seaborn as sns import gc import warnings warnings.filterwarnings("ignore") PATH='../input/' import os print(os.listdir(PATH)) ``` Read in data ``` train = pd.read_csv('{}/train.csv'.format(PATH), nrows=max_events) test = pd.read_csv('{}/test.csv'.format(PATH), nrows=max_events) y = train['Cover_Type'] train.drop('Cover_Type', axis=1, inplace=True) train.drop('Id', axis=1, inplace=True) test.drop('Id', axis=1, inplace=True) print('Train shape: {}'.format(train.shape)) print('Test shape: {}'.format(test.shape)) train.info(verbose=False) ``` ## OHE into LE Helper function to transfer One-Hot Encoding (OHE) into a Label Encoding (LE). It was taken from https://www.kaggle.com/mlisovyi/lighgbm-hyperoptimisation-with-f1-macro The reason to convert OHE into LE is that we plan to use a tree-based model and such models are dealing well with simple interger-label encoding. Note, that this way we introduce an ordering between categories, which is not there in reality, but in practice in most use cases GBMs handle it well anyway. ``` def convert_OHE2LE(df): tmp_df = df.copy(deep=True) for s_ in ['Soil_Type', 'Wilderness_Area']: cols_s_ = [f_ for f_ in df.columns if f_.startswith(s_)] sum_ohe = tmp_df[cols_s_].sum(axis=1).unique() #deal with those OHE, where there is a sum over columns == 0 if 0 in sum_ohe: print('The OHE in {} is incomplete. A new column will be added before label encoding' .format(s_)) # dummy colmn name to be added col_dummy = s_+'_dummy' # add the column to the dataframe tmp_df[col_dummy] = (tmp_df[cols_s_].sum(axis=1) == 0).astype(np.int8) # add the name to the list of columns to be label-encoded cols_s_.append(col_dummy) # proof-check, that now the category is complete sum_ohe = tmp_df[cols_s_].sum(axis=1).unique() if 0 in sum_ohe: print("The category completion did not work") tmp_df[s_ + '_LE'] = tmp_df[cols_s_].idxmax(axis=1).str.replace(s_,'').astype(np.uint16) tmp_df.drop(cols_s_, axis=1, inplace=True) return tmp_df def train_test_apply_func(train_, test_, func_): xx = pd.concat([train_, test_]) xx_func = func_(xx) train_ = xx_func.iloc[:train_.shape[0], :] test_ = xx_func.iloc[train_.shape[0]:, :] del xx, xx_func return train_, test_ train_x, test_x = train_test_apply_func(train, test, convert_OHE2LE) ``` One little caveat: looking through the OHE, `Soil_Type 7, 15`, are present in the test, but not in the training data The head of the training dataset ``` train_x.head() ``` # Let's do some feature engineering ``` def preprocess(df_): df_['fe_E_Min_02HDtH'] = (df_['Elevation']- df_['Horizontal_Distance_To_Hydrology']*0.2).astype(np.float32) df_['fe_Distance_To_Hydrology'] = np.sqrt(df_['Horizontal_Distance_To_Hydrology']**2 + df_['Vertical_Distance_To_Hydrology']**2).astype(np.float32) feats_sub = [('Elevation_Min_VDtH', 'Elevation', 'Vertical_Distance_To_Hydrology'), ('HD_Hydrology_Min_Roadways', 'Horizontal_Distance_To_Hydrology', 'Horizontal_Distance_To_Roadways'), ('HD_Hydrology_Min_Fire', 'Horizontal_Distance_To_Hydrology', 'Horizontal_Distance_To_Fire_Points')] feats_add = [('Elevation_Add_VDtH', 'Elevation', 'Vertical_Distance_To_Hydrology')] for f_new, f1, f2 in feats_sub: df_['fe_' + f_new] = (df_[f1] - df_[f2]).astype(np.float32) for f_new, f1, f2 in feats_add: df_['fe_' + f_new] = (df_[f1] + df_[f2]).astype(np.float32) # The feature is advertised in https://douglas-fraser.com/forest_cover_management.pdf df_['fe_Shade9_Mul_VDtH'] = (df_['Hillshade_9am'] * df_['Vertical_Distance_To_Hydrology']).astype(np.float32) # this mapping comes from https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.info climatic_zone = {} geologic_zone = {} for i in range(1,41): if i <= 6: climatic_zone[i] = 2 geologic_zone[i] = 7 elif i <= 8: climatic_zone[i] = 3 geologic_zone[i] = 5 elif i == 9: climatic_zone[i] = 4 geologic_zone[i] = 2 elif i <= 13: climatic_zone[i] = 4 geologic_zone[i] = 7 elif i <= 15: climatic_zone[i] = 5 geologic_zone[i] = 1 elif i <= 17: climatic_zone[i] = 6 geologic_zone[i] = 1 elif i == 18: climatic_zone[i] = 6 geologic_zone[i] = 7 elif i <= 21: climatic_zone[i] = 7 geologic_zone[i] = 1 elif i <= 23: climatic_zone[i] = 7 geologic_zone[i] = 2 elif i <= 34: climatic_zone[i] = 7 geologic_zone[i] = 7 else: climatic_zone[i] = 8 geologic_zone[i] = 7 df_['Climatic_zone_LE'] = df_['Soil_Type_LE'].map(climatic_zone).astype(np.uint8) df_['Geologic_zone_LE'] = df_['Soil_Type_LE'].map(geologic_zone).astype(np.uint8) return df_ train_x = preprocess(train_x) test_x = preprocess(test_x) ``` # Optimise various classifiers ``` from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.model_selection import train_test_split from sklearn.model_selection import RandomizedSearchCV, GridSearchCV from sklearn.linear_model import LogisticRegression import lightgbm as lgb ``` We subtract 1 to have the labels starting with 0, which is required for LightGBM ``` y = y-1 X_train, X_test, y_train, y_test = train_test_split(train_x, y, test_size=0.15, random_state=315, stratify=y) ``` Parameters to be used in optimisation for various models ``` def learning_rate_decay_power_0995(current_iter): base_learning_rate = 0.15 lr = base_learning_rate * np.power(.995, current_iter) return lr if lr > 1e-2 else 1e-2 clfs = {'rf': (RandomForestClassifier(n_estimators=200, max_depth=1, random_state=314, n_jobs=4), {'max_depth': [20,25,30,35,40,45,50]}, {}), 'xt': (ExtraTreesClassifier(n_estimators=200, max_depth=1, max_features='auto',random_state=314, n_jobs=4), {'max_depth': [20,25,30,35,40,45,50]}, {}), 'lgbm': (lgb.LGBMClassifier(max_depth=-1, min_child_samples=400, random_state=314, silent=True, metric='None', n_jobs=4, n_estimators=5000, learning_rate=0.1), {'colsample_bytree': [0.75], 'min_child_weight': [0.1,1,10], 'num_leaves': [18, 20,22], 'subsample': [0.75]}, {'eval_set': [(X_test, y_test)], 'eval_metric': 'multi_error', 'verbose':500, 'early_stopping_rounds':100, 'callbacks':[lgb.reset_parameter(learning_rate=learning_rate_decay_power_0995)]} ) } gss = {} for name, (clf, clf_pars, fit_pars) in clfs.items(): print('--------------- {} -----------'.format(name)) gs = GridSearchCV(clf, param_grid=clf_pars, scoring='accuracy', cv=5, n_jobs=1, refit=True, verbose=True) gs = gs.fit(X_train, y_train, **fit_pars) print('{}: train = {:.4f}, test = {:.4f}+-{:.4f} with best params {}'.format(name, gs.cv_results_['mean_train_score'][gs.best_index_], gs.cv_results_['mean_test_score'][gs.best_index_], gs.cv_results_['std_test_score'][gs.best_index_], gs.best_params_ )) print("Valid+-Std Train : Parameters") for i in np.argsort(gs.cv_results_['mean_test_score'])[-5:]: print('{1:.3f}+-{3:.3f} {2:.3f} : {0}'.format(gs.cv_results_['params'][i], gs.cv_results_['mean_test_score'][i], gs.cv_results_['mean_train_score'][i], gs.cv_results_['std_test_score'][i])) gss[name] = gs # gss = {} # for name, (clf, clf_pars, fit_pars) in clfs.items(): # if name == 'lgbm': # continue # print('--------------- {} -----------'.format(name)) # gs = GridSearchCV(clf, param_grid=clf_pars, # scoring='accuracy', # cv=5, # n_jobs=1, # refit=True, # verbose=True) # gs = gs.fit(X_train, y_train, **fit_pars) # print('{}: train = {:.4f}, test = {:.4f}+-{:.4f} with best params {}'.format(name, # gs.cv_results_['mean_train_score'][gs.best_index_], # gs.cv_results_['mean_test_score'][gs.best_index_], # gs.cv_results_['std_test_score'][gs.best_index_], # gs.best_params_ # )) # print("Valid+-Std Train : Parameters") # for i in np.argsort(gs.cv_results_['mean_test_score'])[-5:]: # print('{1:.3f}+-{3:.3f} {2:.3f} : {0}'.format(gs.cv_results_['params'][i], # gs.cv_results_['mean_test_score'][i], # gs.cv_results_['mean_train_score'][i], # gs.cv_results_['std_test_score'][i])) # gss[name] = gs ```
github_jupyter
``` #hide #default_exp clean from nbdev.showdoc import show_doc #export import io,sys,json,glob,re from fastcore.script import call_parse,Param,bool_arg from fastcore.utils import ifnone from nbdev.imports import Config from nbdev.export import nbglob from pathlib import Path #hide #For tests only from nbdev.imports import * ``` # Clean notebooks > Strip notebooks from superfluous metadata To avoid pointless conflicts while working with jupyter notebooks (with different execution counts or cell metadata), it is recommended to clean the notebooks before committing anything (done automatically if you install the git hooks with `nbdev_install_git_hooks`). The following functions are used to do that. ## Utils ``` #export def rm_execution_count(o): "Remove execution count in `o`" if 'execution_count' in o: o['execution_count'] = None #export colab_json = "application/vnd.google.colaboratory.intrinsic+json" def clean_output_data_vnd(o): "Remove `application/vnd.google.colaboratory.intrinsic+json` in data entries" if 'data' in o: data = o['data'] if colab_json in data: new_data = {k:v for k,v in data.items() if k != colab_json} o['data'] = new_data #export def clean_cell_output(cell): "Remove execution count in `cell`" if 'outputs' in cell: for o in cell['outputs']: rm_execution_count(o) clean_output_data_vnd(o) o.get('metadata', o).pop('tags', None) #export cell_metadata_keep = ["hide_input"] nb_metadata_keep = ["kernelspec", "jekyll", "jupytext", "doc"] #export def clean_cell(cell, clear_all=False): "Clean `cell` by removing superfluous metadata or everything except the input if `clear_all`" rm_execution_count(cell) if 'outputs' in cell: if clear_all: cell['outputs'] = [] else: clean_cell_output(cell) if cell['source'] == ['']: cell['source'] = [] cell['metadata'] = {} if clear_all else {k:v for k,v in cell['metadata'].items() if k in cell_metadata_keep} tst = {'cell_type': 'code', 'execution_count': 26, 'metadata': {'hide_input': True, 'meta': 23}, 'outputs': [{'execution_count': 2, 'data': { 'application/vnd.google.colaboratory.intrinsic+json': { 'type': 'string'}, 'plain/text': ['sample output',] }, 'output': 'super'}], 'source': 'awesome_code'} tst1 = tst.copy() clean_cell(tst) test_eq(tst, {'cell_type': 'code', 'execution_count': None, 'metadata': {'hide_input': True}, 'outputs': [{'execution_count': None, 'data': {'plain/text': ['sample output',]}, 'output': 'super'}], 'source': 'awesome_code'}) clean_cell(tst1, clear_all=True) test_eq(tst1, {'cell_type': 'code', 'execution_count': None, 'metadata': {}, 'outputs': [], 'source': 'awesome_code'}) tst2 = { 'metadata': {'tags':[]}, 'outputs': [{ 'metadata': { 'tags':[] }}], "source": [ "" ]} clean_cell(tst2, clear_all=False) test_eq(tst2, { 'metadata': {}, 'outputs': [{ 'metadata':{}}], 'source': []}) #export def clean_nb(nb, clear_all=False): "Clean `nb` from superfluous metadata, passing `clear_all` to `clean_cell`" for c in nb['cells']: clean_cell(c, clear_all=clear_all) nb['metadata'] = {k:v for k,v in nb['metadata'].items() if k in nb_metadata_keep } tst = {'cell_type': 'code', 'execution_count': 26, 'metadata': {'hide_input': True, 'meta': 23}, 'outputs': [{'execution_count': 2, 'data': { 'application/vnd.google.colaboratory.intrinsic+json': { 'type': 'string'}, 'plain/text': ['sample output',] }, 'output': 'super'}], 'source': 'awesome_code'} nb = {'metadata': {'kernelspec': 'some_spec', 'jekyll': 'some_meta', 'meta': 37}, 'cells': [tst]} clean_nb(nb) test_eq(nb['cells'][0], {'cell_type': 'code', 'execution_count': None, 'metadata': {'hide_input': True}, 'outputs': [{'execution_count': None, 'data': { 'plain/text': ['sample output',]}, 'output': 'super'}], 'source': 'awesome_code'}) test_eq(nb['metadata'], {'kernelspec': 'some_spec', 'jekyll': 'some_meta'}) #export def _print_output(nb): "Print `nb` in stdout for git things" _output_stream = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') x = json.dumps(nb, sort_keys=True, indent=1, ensure_ascii=False) _output_stream.write(x) _output_stream.write("\n") _output_stream.flush() ``` ## Main function ``` #export @call_parse def nbdev_clean_nbs(fname:Param("A notebook name or glob to convert", str)=None, clear_all:Param("Clean all metadata and outputs", bool_arg)=False, disp:Param("Print the cleaned outputs", bool_arg)=False, read_input_stream:Param("Read input stram and not nb folder")=False): "Clean all notebooks in `fname` to avoid merge conflicts" #Git hooks will pass the notebooks in the stdin if read_input_stream and sys.stdin: input_stream = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8') nb = json.load(input_stream) clean_nb(nb, clear_all=clear_all) _print_output(nb) return path = None if fname is None: try: path = get_config().path("nbs_path") except Exception as e: path = Path.cwd() files = nbglob(fname=ifnone(fname,path)) for f in files: if not str(f).endswith('.ipynb'): continue nb = json.loads(open(f, 'r', encoding='utf-8').read()) clean_nb(nb, clear_all=clear_all) if disp: _print_output(nb) else: x = json.dumps(nb, sort_keys=True, indent=1, ensure_ascii=False) with io.open(f, 'w', encoding='utf-8') as f: f.write(x) f.write("\n") ``` By default (`fname` left to `None`), the all the notebooks in `lib_folder` are cleaned. You can opt in to fully clean the notebook by removing every bit of metadata and the cell outputs by passing `clear_all=True`. `disp` is only used for internal use with git hooks and will print the clean notebook instead of saving it. Same for `read_input_stream` that will read the notebook from the input stream instead of the file names. ## Export - ``` #hide from nbdev.export import notebook2script notebook2script() ```
github_jupyter
# openCV Configure for Raspberry PI What is openCV? * Collection of computer vision tools in one place * Computational photography to object detection Where is openCV? * http://opencv.org/ What resources did I use? * http://www.pyimagesearch.com/2016/04/18/install-guide-raspberry-pi-3-raspbian-jessie-opencv-3/ * http://www.pyimagesearch.com/2016/11/21/raspbian-opencv-pre-configured-and-pre-installed/ The step by step of getting it going. 1. Make sure we have enough room. * ```df -h``` * expand the file system with * ```sudo raspi-config``` 1. Make room with removing the wolfram image * ```sudo apt-get purge wolfram-engine``` ## Install the tools 1. Dependencies ``` sudo apt-get update sudo apt-get upgrade ``` Make sure all the dev depencies for Python are installed ``` sudo apt-get install python3-dev sudo apt install python3-matplotlib ``` ``` sudo pip3 opencv-contrib-python ``` Scripts Initial ``` sudo apt-get install build-essential cmake pkg-config sudo apt-get install libjpeg-dev libtiff5-dev libjasper-dev libpng12-dev sudo apt-get install libavcodec-dev libavformat-dev libswscale-dev libv4l-dev sudo apt-get install libxvidcore-dev libx264-dev sudo apt-get install libgtk2.0-dev libgtk-3-dev sudo apt-get install libatlas-base-dev gfortran ``` Extras just in case for camera and qt ``` sudo apt-get install libqtgui4 sudo modprobe bcm2835-v4l2 sudo apt-get install libqt4-test ``` Required but not clearly needed until runtime ``` sudo apt-get install libhdf5-dev sudo apt-get install libhdf5-serial-dev ``` ### Old origninal ----- CMake is needed ``` sudo apt-get install build-essential cmake pkg-config ``` Image file support ``` sudo apt-get install libjpeg-dev libtiff5-dev libjasper-dev libpng12-dev ``` Video I/O packages ``` sudo apt-get install libavcodec-dev libavformat-dev libswscale-dev libv4l-dev sudo apt-get install libxvidcore-dev libx264-dev ``` highGUI gto depedencies ``` sudo apt-get install libgtk2.0-dev ``` FORTRAN optimation matrix ``` sudo apt-get install libatlas-base-dev gfortran ``` ``` ## Get the source code openCV 3.2 Create a directory ``` cd ~ mkdir opencv ``` ``` wget -O opencv.zip https://github.com/Itseez/opencv/archive/3.2.0.zip unzip opencv.zip ``` ``` wget -O opencv_contrib.zip https://github.com/Itseez/opencv_contrib/archive/3.2.0.zip unzip opencv_contrib.zip ``` ``` # setup virtualenv ``` sudo pip3 install virtualenv virtualenvwrapper sudo rm -rf ~/.cache/pip ``` Add this to your .profile ``` # virtualenv and virtualenvwrapper export WORKON_HOME=$HOME/.virtualenvs source /usr/local/bin/virtualenvwrapper.sh ``` Create the virtualenv for opencv for python3 ``` mkvirtualenv cv -p python3 ``` Update the environment ``` source ~/.profile workon cv ``` Now you are ready to start compiling. #Set up python in the virtualenv * Good place to start running tmux Make sure you see the prompt: ``` (cv) pi@cvpi:~/opencv $ ``` Install numpy ``` pip3 install numpy ``` #compile and isntall opencv * get tmux going ``` workon cv cd ~/opencv/opencv-3.2.0/ $ mkdir build $ cd build $ cmake -D CMAKE_BUILD_TYPE=RELEASE \ -D CMAKE_INSTALL_PREFIX=/usr/local \ -D INSTALL_PYTHON_EXAMPLES=ON \ -D OPENCV_EXTRA_MODULES_PATH=~/opencv/opencv_contrib-3.2.0/modules \ -D BUILD_EXAMPLES=ON .. ``` finally nmake it ``` make -j4 ``` ``` sudo make install sudo ldconfig ``` ``` (cv) pi@cvpi:~/opencv/opencv-3.2.0/build $ ls -l /usr/local/lib/python3.4/site-p ackages/ total 3212 -rw-r--r-- 1 root staff 3287708 Feb 12 04:35 cv2.cpython-34m.so ``` ``` Do you really want to exit ([y]/n)? y [11/1984] (cv) pi@cvpi:~/opencv/opencv-3.2.0/build $ cd /usr/local/lib/python3.4/site-pack ages/ (cv) pi@cvpi:/usr/local/lib/python3.4/site-packages $ sudo mv cv2.cpython-34m.so cv2.so (cv) pi@cvpi:/usr/local/lib/python3.4/site-packages $ cd ~/.virtualenvs/cv/lib/p ython3.4/site-packages/ (cv) pi@cvpi:~/.virtualenvs/cv/lib/python3.4/site-packages $ ln -s /usr/local/li b/python3.4/site-packages/cv2.so cv2.so (cv) pi@cvpi:~/.virtualenvs/cv/lib/python3.4/site-packages $ source ~/.profile pi@cvpi:~/.virtualenvs/cv/lib/python3.4/site-packages $ cd pi@cvpi:~ $ workon cv ``` ``` bject? -> Details about 'object', use 'object??' for extra details. In [1]: import cv2 In [2]: cv2.__version__ Out[2]: '3.2.0' ```
github_jupyter
``` %tensorflow_version 1.x import numpy as np import pandas as pd import sklearn import sklearn.metrics from sklearn import tree from matplotlib import pyplot as plt %load_ext autoreload %autoreload 2 import torch from torch.autograd import Variable as V import torchvision.models as models from torchvision import transforms as trn from torch.nn import functional as F import torch.nn as nn import os from PIL import Image import matplotlib.pyplot as plt from google.colab import drive drive.mount('/content/drive') file_name = 'categories_places365.txt' if not os.access(file_name, os.W_OK): synset_url = 'https://raw.githubusercontent.com/csailvision/places365/master/categories_places365.txt' os.system('wget ' + synset_url) classes = list() with open(file_name) as class_file: for line in class_file: classes.append(line.strip().split(' ')[0][3:]) classes = np.array(classes) arch = 'resnet50' model_file = f'{arch}_places365.pth.tar' if not os.access(model_file, os.W_OK): weight_url = 'http://places2.csail.mit.edu/models_places365/' + model_file os.system('wget ' + weight_url) model = models.__dict__[arch](num_classes=365) checkpoint = torch.load(model_file, map_location=lambda storage, loc: storage) state_dict = {str.replace(k,'module.',''): v for k,v in checkpoint['state_dict'].items()} model.load_state_dict(state_dict) model.eval() for param in model.parameters(): param.requires_grad = False 'convert model to evaluation mode with no grad' device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") !cp drive/'My Drive'/'Colab Notebooks'/'Automate local TCAV'/'final notebooks'/resized_imgs.pkl /content/ !cp drive/'My Drive'/'Colab Notebooks'/'Automate local TCAV'/'final notebooks'/masks.pkl /content/ !cp drive/'My Drive'/'Colab Notebooks'/'Automate local TCAV'/'final notebooks'/classes.pkl /content/ import pickle with open('masks.pkl', 'rb') as f: masks = pickle.load(f) with open('resized_imgs.pkl', 'rb') as f: imgs = pickle.load(f) with open('classes.pkl', 'rb') as f: labels = np.array(pickle.load(f)) def get_segments(img, mask, threshold = 0.05): ade_classes = pd.read_csv('https://raw.githubusercontent.com/CSAILVision/sceneparsing/master/objectInfo150.csv') segs = np.unique(mask) segments = [] total = mask.shape[0]*mask.shape[1] segments_classes = [] for seg in segs: idxs = mask==seg sz = np.sum(idxs) if sz < threshold*total: continue segment = img*idxs[..., None] w, h, _ = np.nonzero(segment) segment = segment[np.min(w):np.max(w),np.min(h):np.max(h),:] segments.append(segment) segments_classes.append(ade_classes['Name'].loc[ade_classes['Idx']==seg].iloc[0]) return segments, segments_classes img_segments = [] img_segments_classes = [] for img, msk in zip(imgs, masks): segss, seg_class = get_segments(np.array(img), msk, threshold = 0.005) img_segments_classes.append(seg_class) img_segments.append(segss) centre_crop = trn.Compose([ trn.Resize((256,256)), trn.CenterCrop(224), trn.ToTensor(), # trn.Normalize([0, 0, 0], [255, 255, 225]), trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) outputs = [] def hook(module, input, output): x = nn.AdaptiveAvgPool2d(1)(output) outputs.append(x.cpu().numpy().squeeze()) full_model = model handle = full_model.layer4[2].register_forward_hook(hook) full_model.to(device) 'finished the full model with hook attached' chosen_classes = ['street', 'bedroom', 'living_room', 'bathroom', 'kitchen', 'skyscraper', 'highway', 'conference_room', 'mountain_snowy', 'office', 'corridor', 'airport_terminal', 'attic', 'mountain', 'park', 'coast', 'alley','beach', 'childs_room', 'art_gallery','castle', 'dorm_room', 'nursery', 'lobby', 'reception', 'bar', 'house', 'bridge', 'classroom'] num_classes = len(chosen_classes) idxs = [] c = 0 for ccls in chosen_classes: idx = np.argwhere(classes == ccls) if len(idx) == 0: print(f'class {ccls} is not found in places365, so we will use places365 alternate') c+=1 else: idxs.append(idx[0][0]) idxs = [] c = 0 for ccls in chosen_classes: idx = np.argwhere(classes == ccls) if len(idx) == 0: print(f'class {ccls} is not found in places365, so we will use places365 alternate') c+=1 else: idxs.append(idx[0][0]) del outputs outputs = [] y_model = [] for img in imgs: input_img = V(centre_crop(img).unsqueeze(0)) input_img = input_img.to(device) pred = full_model.forward(input_img) y_model.append(classes[idxs][np.argmax((pred.cpu().detach().numpy()[:,idxs]))]) outputs = np.array(outputs) img_vectors = np.copy(outputs) del outputs outputs = [] img_segmentsid = [] segments_classes = [] for i, img_seg in enumerate(img_segments): img_segmentsid.append((img_segmentsid[-1] if i>0 else 0) + len(img_seg)) for seg in img_seg: img = Image.fromarray(seg, 'RGB') input_img = V(centre_crop(img).unsqueeze(0)) input_img = input_img.to(device) pred = full_model.forward(input_img) segments_classes.append(classes[idxs][np.argmax((pred.cpu().detach().numpy()[:,idxs]))]) outputs = np.array(outputs) all_vectors = np.copy(outputs) feature_vectors = [] for i in range(len(img_segmentsid)): feature_vectors.append(all_vectors[(img_segmentsid[i-1] if i>0 else 0):img_segmentsid[i]]) segment_img = {} c = 0 for j, fvec in enumerate(feature_vectors): c_old = c c += len(fvec) if len(fvec) != 1024 else 1 for i in range(c_old, c): segment_img[i] = j import sklearn.metrics from sklearn.cluster import KMeans from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.decomposition import PCA from sklearn.pipeline import Pipeline from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier def cluster_top_k(cluster_id, k = 40): instances_ids = np.argwhere(clusters_labels==cluster_id).squeeze() instances = X[instances_ids] scores = [] for inst in instances: scores.append(-kmeans.score([inst])) final_ids = np.argsort(scores)[:k] return instances_ids[final_ids] cluster_instances_id = lambda cid: np.argwhere(clusters_labels==cid).squeeze() def is_single_img_clusters(cls_id): instances_ids = cluster_instances_id(cls_id) # instances_ids = cluster_representatives[cls_id] source_imgs = [segment_img[inst_id] for inst_id in instances_ids] unique_imgs = len(np.unique(source_imgs)) return 0 if unique_imgs > 1 else 1 img_distances = sklearn.metrics.pairwise.euclidean_distances(img_vectors) print('clustering the dataset ................') X = np.array(all_vectors) num_segs = X.shape[0] k = int(num_segs**0.5) kmeans = KMeans(n_clusters=k, random_state=0).fit(X) clusters_labels = kmeans.labels_ print('\ndone') cluster_names, cluster_counts = np.unique(clusters_labels, return_counts=1) print('removing small clusters ................') count_threshold = int(k*0.4) ids = cluster_counts > count_threshold good_clusters = cluster_names[ids] cluster_counts = cluster_counts[ids] print(f'\ntotal number of clusters {len(cluster_names)}') print(f'total number of good clusters is {len(good_clusters)}') print('\ndone') print('removing single image clusters ............') final_clusters = [] for j, cluster_id in enumerate(good_clusters): if is_single_img_clusters(cluster_id): continue final_clusters.append(cluster_id) final_clusters = np.array(final_clusters) print('\ndone') print("getting clusters' representatives ............") cluster_representatives = {} for cluster_id in final_clusters: cluster_representatives[cluster_id] = cluster_top_k(cluster_id, k=40) print('\ndone') print('building linear models for each concept/cluster ............') linear_models = {} model_score = {} for cluster in final_clusters: positive_instances = cluster_instances_id(cluster) negative_instances = np.argwhere(clusters_labels!=cluster).squeeze() num_samples = min(len(positive_instances), len(negative_instances)) selected_pos = np.random.choice(positive_instances, num_samples, replace=False) selected_neg = np.random.choice(negative_instances, num_samples, replace=False) train_x = np.append(X[selected_pos], X[selected_neg], axis=0) train_y = np.array([1] * num_samples + [0] * num_samples) train_x, val_x, train_y, val_y = train_test_split(train_x, train_y) n_components = min(256, len(train_x)) pca_model = PCA(n_components=n_components) lr_model = LogisticRegression() pca_lr_model = Pipeline(steps = [('pca', pca_model), ('lr', lr_model)]) pca_lr_model.fit(train_x, train_y) linear_models[cluster] = pca_lr_model model_score[cluster] = pca_lr_model.score(val_x, val_y) print('\ndone') print('removing low scoring clusters based on holdout accuracy ...............\n') c = 0 for k, v in list(model_score.items()): if v < 0.75: c+=1 print(f'removed cluster {k}') linear_models.pop(k, 'None') model_score.pop(k, 'None') print(f'\ndone\nremoved {c} clusters\n') print('converting feature vectors to binary concept vectors .................') for i, lm in enumerate(linear_models.keys()): if i == 0: concept_vecs = linear_models[lm].predict(img_vectors)[:, None] else: concept_vecs = np.concatenate((concept_vecs, linear_models[lm].predict(img_vectors)[:, None]), axis = -1) print(f'\nconcept vector dimension is\t {concept_vecs.shape[0]}x{concept_vecs.shape[1]}') print(f'original dimension is\t\t {img_vectors.shape[0]}x{img_vectors.shape[1]}') print('\ndone\n') random_seed = 0 print('training concept tree ..............') train_x, val_x, train_y, val_y = train_test_split(concept_vecs, y_model, test_size=0.2, random_state=random_seed) surrogate_tree = DecisionTreeClassifier(max_depth = 20, random_state=random_seed) surrogate_tree.fit(train_x, train_y) train_score = surrogate_tree.score(train_x, train_y) test_score = surrogate_tree.score(val_x, val_y) print(f'train accuracy:\t {train_score}') print(f'test accuracy:\t {test_score}') print('\ndone\n') print('training concept forest ..............') train_x, val_x, train_y, val_y = train_test_split(concept_vecs, y_model, test_size=0.2, random_state=random_seed) surrogate_tree = RandomForestClassifier(max_depth = 20, random_state=random_seed) surrogate_tree.fit(train_x, train_y) train_score = surrogate_tree.score(train_x, train_y) test_score = surrogate_tree.score(val_x, val_y) print(f'train accuracy:\t {train_score}') print(f'test accuracy:\t {test_score}') print('\ndone\n') print('training tree on original vectors ..............') train_x, val_x, train_y, val_y = train_test_split(img_vectors, y_model, test_size=0.2, random_state=random_seed) surrogate_tree = DecisionTreeClassifier(max_depth = 20, random_state=random_seed) surrogate_tree.fit(train_x, train_y) train_score = surrogate_tree.score(train_x, train_y) test_score = surrogate_tree.score(val_x, val_y) print(f'train accuracy:\t {train_score}') print(f'test accuracy:\t {test_score}') print('\ndone\n') print('training forest on original vectors ..............') train_x, val_x, train_y, val_y = train_test_split(img_vectors, y_model, test_size=0.2, random_state=random_seed) surrogate_tree = RandomForestClassifier(max_depth = 20, random_state=random_seed) surrogate_tree.fit(train_x, train_y) train_score = surrogate_tree.score(train_x, train_y) test_score = surrogate_tree.score(val_x, val_y) print(f'train accuracy:\t {train_score}') print(f'test accuracy:\t {test_score}') print('\ndone\n') trials = 100 dtc = [] rfc = [] dto = [] rfo = [] for i in range(trials): print('training concept tree ..............') train_x, val_x, train_y, val_y = train_test_split(concept_vecs, y_model, test_size=0.2) surrogate_tree = DecisionTreeClassifier(max_depth = 20) surrogate_tree.fit(train_x, train_y) train_score = surrogate_tree.score(train_x, train_y) test_score = surrogate_tree.score(val_x, val_y) print(f'train accuracy:\t {train_score}') print(f'test accuracy:\t {test_score}') dtc.append([train_score, test_score]) print('\ndone\n') print('training concept forest ..............') train_x, val_x, train_y, val_y = train_test_split(concept_vecs, y_model, test_size=0.2) surrogate_tree = RandomForestClassifier(max_depth = 20) surrogate_tree.fit(train_x, train_y) train_score = surrogate_tree.score(train_x, train_y) test_score = surrogate_tree.score(val_x, val_y) print(f'train accuracy:\t {train_score}') print(f'test accuracy:\t {test_score}') rfc.append([train_score, test_score]) print('\ndone\n') print('training tree on original vectors ..............') train_x, val_x, train_y, val_y = train_test_split(img_vectors, y_model, test_size=0.2) surrogate_tree = DecisionTreeClassifier(max_depth = 20) surrogate_tree.fit(train_x, train_y) train_score = surrogate_tree.score(train_x, train_y) test_score = surrogate_tree.score(val_x, val_y) print(f'train accuracy:\t {train_score}') print(f'test accuracy:\t {test_score}') dto.append([train_score, test_score]) print('\ndone\n') print('training forest on original vectors ..............') train_x, val_x, train_y, val_y = train_test_split(img_vectors, y_model, test_size=0.2) surrogate_tree = RandomForestClassifier(max_depth = 20) surrogate_tree.fit(train_x, train_y) train_score = surrogate_tree.score(train_x, train_y) test_score = surrogate_tree.score(val_x, val_y) print(f'train accuracy:\t {train_score}') print(f'test accuracy:\t {test_score}') rfo.append([train_score, test_score]) print('\ndone\n') print('\nFor decision tree on concept vectors:') print(f'\ttrain accuracy: {np.mean(np.array(dtc)[:,0])} +- {np.std(np.array(dtc)[:,0])}') print(f'\ttest accuracy: {np.mean(np.array(dtc)[:,1])} +- {np.std(np.array(dtc)[:,1])}') print('\nFor random forest on concept vectors:') print(f'\ttrain accuracy: {np.mean(np.array(rfc)[:,0])} +- {np.std(np.array(rfc)[:,0])}') print(f'\ttest accuracy: {np.mean(np.array(rfc)[:,1])} +- {np.std(np.array(rfc)[:,1])}') print('\nFor decision tree on original vectors:') print(f'\ttrain accuracy: {np.mean(np.array(dto)[:,0])} +- {np.std(np.array(dto)[:,0])}') print(f'\ttest accuracy: {np.mean(np.array(dto)[:,1])} +- {np.std(np.array(dto)[:,1])}') print('\nFor random forest on original vectors:') print(f'\ttrain accuracy: {np.mean(np.array(rfo)[:,0])} +- {np.std(np.array(rfo)[:,0])}') print(f'\ttest accuracy: {np.mean(np.array(rfo)[:,1])} +- {np.std(np.array(rfo)[:,1])}') ```
github_jupyter
### Load SEM image Import packages ``` from PIL import Image import numpy as np import time import matplotlib.pyplot as plt import cv2 import copy # from skimage import io # from skimage.io import imread, imshow # from skimage.filters import threshold_otsu # from skimage import color # from skimage.color import label2rgb # from numpy import percentile # from spade.detection_2d import spade2d # from spade.shapes.examples import potatoids5x5_smallest4px ``` ## Read image as Pillow image format and convert to gray scale Could use the ndarray imported in previous code and conver RGB to gray scale but need equation (gray = 0.2989 * r + 0.5870 * g + 0.1140 * b) ``` cmap = copy.copy(plt.cm.get_cmap("gray")) cmap.set_bad(color='black') folder = 'Combined_' # for folder_index in range(67, 68): folder_index = 65 folder_name = 'Combined_' + chr(folder_index) + '/' # for num in range(10, 11): num = 4 sem_name = folder_name + 'SEM_' + str(num) + '.jpg' image_name = folder_name + 'Image_' + str(num) + '.jpg' photo_pattern_noise_removal = folder_name + 'Photo_pattern_' + str(num) + '.jpg' SEM_gray = Image.open(sem_name).convert('LA') photo_gray = Image.open(image_name).convert('LA') # print('SEM_gray type:', type(SEM_gray)) # print('SEM_gray size:', SEM_gray.size) # print('photo_gray:', type(photo_gray)) # print('photo_gray:', photo_gray.size) # The converted ndarray from gray scale image will produce one extra layer SEM_gray_array = np.array(SEM_gray)[:, :, 0] photo_gray_array = np.array(photo_gray)[:, :, 0] # np.rot90: counter clock wise photo_gray_array = np.rot90(photo_gray_array) photo_gray_array = np.rot90(photo_gray_array) photo_gray_array = np.rot90(photo_gray_array) # # Crop the pattern from image top_row = 370 bot_row = 1000 left_col = 260 right_col = 720 # photo_gray_array_crop = photo_gray_array[330:920, 180:740] photo_gray_array_crop = photo_gray_array[top_row:bot_row, left_col:right_col] # # Show SEM image with photo cropped in gray # plt.subplot(121) # plt.imshow(SEM_gray_array, cmap = cmap) # plt.subplot(122) # plt.imshow(photo_gray_array_crop, cmap = cmap) # plt.subplots_adjust(bottom=0.5, right=2, top=2) # print('SEM_gray_array:', SEM_gray_array.shape) # print('photo_gray_array_crop:', photo_gray_array_crop.shape) # # The image hasn't been resized # Resize the sem and image to overlap # Resizing ratio: photo image needs to be resized to match SEM image width = 2.72 height = 2.66 SEM_gray_array_resize = cv2.resize(SEM_gray_array, dsize=(640, 480), interpolation=cv2.INTER_NEAREST) photo_gray_array_crop_resize = cv2.resize(photo_gray_array_crop, dsize=(int((right_col-left_col)/4*width), int((bot_row-top_row)/4*height)), interpolation=cv2.INTER_NEAREST) # print('SEM_gray_array_resize type:', type(SEM_gray_array_resize)) # print('photo_gray_array_crop_resize:', type(photo_gray_array_crop_resize)) # # Show the SEM gray resized and photo gray ropped resized image # plt.subplot(121) # plt.imshow(SEM_gray_array_resize, cmap = cmap) # plt.subplot(122) # plt.imshow(photo_gray_array_crop_resize, cmap = cmap) # plt.subplots_adjust(bottom=0.5, right=2, top=2) # print('SEM_gray_array_resize size:', SEM_gray_array_resize.shape) # print('photo_gray_array_crop_resize size:', photo_gray_array_crop_resize.shape) # Convert sem and image to BW to overlap SEM_gray_array_resize_bw = np.where(SEM_gray_array_resize > 80, 1, 0) photo_gray_array_crop_resize_bw = np.where(photo_gray_array_crop_resize > 40, 1, 0) # plt.subplot(121) # plt.imshow(SEM_gray_array_resize_bw, cmap = cmap) # plt.subplot(122) # plt.imshow(photo_gray_array_crop_resize_bw, cmap = cmap) # plt.subplots_adjust(bottom=0.5, right=2, top=2) # print('SEM_gray_array_resize_bw type:', type(SEM_gray_array_resize_bw)) # print('SEM_gray_array_resize_bw size:', SEM_gray_array_resize_bw.shape) # print('photo_gray_array_crop_resize_bw type:', type(photo_gray_array_crop_resize_bw)) # print('photo_gray_array_crop_resize_bw size:', photo_gray_array_crop_resize_bw.shape) # Remove noise of the SEM and Image file plt.subplot(121) plt.imshow(SEM_gray_array_resize_bw, cmap = cmap) plt.subplot(122) plt.imshow(photo_gray_array_crop_resize_bw, cmap = cmap) plt.subplots_adjust(bottom=0.5, right=2, top=2) # Find the overlapping location SEM_gray_array_resize_bw = 1 - SEM_gray_array_resize_bw plt.subplot(121) plt.imshow(SEM_gray_array_resize_bw, cmap = cmap) plt.subplot(122) plt.imshow(photo_gray_array_crop_resize_bw, cmap = cmap) plt.subplots_adjust(bottom=0.5, right=2, top=2) # diff = float('inf') # result = (0, 0) # data = [] # count = 1 # for i in range(0, SEM_gray_array_resize_bw.shape[0]-photo_gray_array_crop_resize_bw.shape[0], 1): # for j in range(0, SEM_gray_array_resize_bw.shape[1]-photo_gray_array_crop_resize_bw.shape[1], 1): # temp = np.array(SEM_gray_array_resize_bw) # temp[i:i+photo_gray_array_crop_resize_bw.shape[0], j:j+photo_gray_array_crop_resize_bw.shape[1]] = ( # temp[i:i+photo_gray_array_crop_resize_bw.shape[0], j:j+photo_gray_array_crop_resize_bw.shape[1]] - # photo_gray_array_crop_resize_bw) # temp_diff = np.trace(temp @ temp.transpose()) # data.append(temp_diff) # if count % 1000 == 0: # print('count:', count, 'row:', i, 'col:', j) # count = count + 1 # if temp_diff < diff: # diff = temp_diff # result = i, j # # print('number of test:', len(data)) # # print('Minimum of difference:', min(data)) # # plt.plot(data) result = [7, 150] row = result[0] col = result[1] # Show the SEM BW, image cropped BW, Combination of SEM and image cropped # sem_photo_combine = np.array(SEM_gray_array_resize_bw) # sem_photo_combine[result[0]:result[0]+photo_gray_array_crop_resize_bw.shape[0], result[1]:result[1]+photo_gray_array_crop_resize_bw.shape[1]] = photo_gray_array_crop_resize_bw # ax1 = plt.subplot(131) # plt.imshow(SEM_gray_array_resize_bw, cmap = cmap) # ax1.set_title("SEM black-white resized image") # ax2 = plt.subplot(132) # plt.imshow(photo_gray_array_crop_resize_bw, cmap = cmap) # ax2.set_title("Photo cropped black white resized ") # ax3 = plt.subplot(133) # plt.imshow(sem_photo_combine, cmap = cmap) # ax3.set_title("Combination of SEM and photo image") # plt.subplots_adjust(bottom=0, right=2.5, top=2) # Creating the photo pattern 3D image SEM = Image.open(sem_name) photo = Image.open(image_name) SEM_array = np.array(SEM) photo_array = np.array(photo) # Crop the pattern in photo image and resize photo_array = np.rot90(photo_array) photo_array = np.rot90(photo_array) photo_array = np.rot90(photo_array) photo_array_crop = photo_array[top_row:bot_row, left_col:right_col, :] photo_array_crop_resize = cv2.resize(photo_array_crop, dsize=(int((right_col-left_col)*width), int((bot_row-top_row)*height)), interpolation=cv2.INTER_NEAREST) # ax = plt.subplot(111) # plt.imshow(photo_array_crop_resize) # ax.set_title("Photo cropped resized 1600*1600") # plt.subplots_adjust(bottom=0, right=2.5, top=2) # Insert photo pattern data into created photo_pattern_match image photo_pattern_match = np.zeros(SEM_array.shape) photo_pattern_match[row*4:row*4+photo_array_crop_resize.shape[0], col*4:col*4+photo_array_crop_resize.shape[1], :] = photo_array_crop_resize photo_pattern_match = photo_pattern_match.astype(np.uint8) plt.imshow(photo_pattern_match) # plt.imshow(photo_pattern_match) # plt.subplots_adjust(bottom=0, right=2.5, top=2) # Show the SEM, photo pattern match and combined SEM and photo pattern match image # combine_sem_photo_pattern_match = 0.5 * SEM_array + 0.5 * photo_pattern_match # combine_sem_photo_pattern_match = combine_sem_photo_pattern_match.astype(np.uint8) # ax1 = plt.subplot(131) # plt.imshow(SEM_array) # ax1.set_title("SEM image") # ax2 = plt.subplot(132) # plt.imshow(photo_pattern_match) # ax2.set_title("Photo pattern match image") # # This image is created by cropping the droplet pattern (800*800) from photograph image # # and placed at the location as SEM image # # The cropped image was resized to 1600*1600 to fit the size # # The background are 0 # ax3 = plt.subplot(133) # plt.imshow(combine_sem_photo_pattern_match, cmap = cmap) # ax3.set_title("Combination of SEM and photo image") # plt.subplots_adjust(bottom=0, right=2.5, top=2) factor = 3 photo_pattern_match_noise_remove = cv2.medianBlur(photo_pattern_match, factor) # ax1 = plt.subplot(121) # plt.imshow(photo_pattern_match) # ax1.set_title("Photo pattern match image") # ax2 = plt.subplot(122) # plt.imshow(photo_pattern_match_noise_remove) # ax2.set_title("Photo pattern noise Medium filter removed match image") # plt.subplots_adjust(bottom=0, right=2.5, top=2) Image.fromarray(photo_pattern_match_noise_remove).save(photo_pattern_noise_removal) print('Finish sample ', str(num), ' in folder ', chr(folder_index)) # photo_array_crop_resize = cv2.resize(photo_array_crop, dsize=(int((right_col-left_col)/4*width), int((bot_row-top_row)/4*height)), interpolation=cv2.INTER_NEAREST) # print(photo_array_crop_resize.shape) # print(photo_array_crop.shape) # print(590/4*2.66, 560/4*2.72) # print(photo_pattern_match.shape) # print(row, col) # print(photo_array_crop_resize.shape) # print(2560 - col*4) # print(SEM_gray_array_resize_bw.shape, photo_gray_array_crop_resize_bw.shape) ``` #### Convert gray image to black and white to overlap ### Noise removal for EDS image ``` # factor = 3 # Cl = cv2.imread('Cl Kα1.png') # # use INTER_NEAREST method to resize sem-eds image # # temp = np.zeros(Cl.shape) # # temp[:, :, 0] = cv2.resize(Cl[:, :, 0], dsize=(2560, 1920), interpolation=cv2.INTER_NEAREST) # # temp[:, :, 1] = cv2.resize(Cl[:, :, 1], dsize=(2560, 1920), interpolation=cv2.INTER_NEAREST) # # temp[:, :, 2] = cv2.resize(Cl[:, :, 2], dsize=(2560, 1920), interpolation=cv2.INTER_NEAREST) # # Cl = temp # Cl_noise_remove = cv2.medianBlur(Cl, factor) # ax1 = plt.subplot(121) # plt.imshow(Cl, cmap=cmap) # ax1.set_title("Cl EDS image") # ax2 = plt.subplot(122) # plt.imshow(Cl_noise_remove, cmap=cmap) # ax2.set_title("Cl EDS median noise removal image") # plt.subplots_adjust(bottom=0, right=2.5, top=2) # Cl_denoised = np.zeros(Cl.shape) # layer_0 = Cl[:, :, 0] # layer_1 = Cl[:, :, 1] # layer_2 = Cl[:, :, 2] # u, s, vh = np.linalg.svd(layer_0, full_matrices=False) # s_cleaned = np.diag(np.array([si if si > 60 else 0 for si in s])) # layer_0_denoised = np.array(np.matmul(np.matmul(u, s_cleaned), vh), dtype=int) # Cl_denoised[:, :, 0] = layer_0_denoised # u, s, vh = np.linalg.svd(layer_1, full_matrices=False) # s_cleaned = np.diag(np.array([si if si > 60 else 0 for si in s])) # layer_1_denoised = np.array(np.matmul(np.matmul(u, s_cleaned), vh), dtype=int) # Cl_denoised[:, :, 1] = layer_1_denoised # u, s, vh = np.linalg.svd(layer_2, full_matrices=False) # s_cleaned = np.diag(np.array([si if si > 60 else 0 for si in s])) # layer_2_denoised = np.array(np.matmul(np.matmul(u, s_cleaned), vh), dtype=int) # Cl_denoised[:, :, 2] = layer_2_denoised # Cl_denoised = Cl_denoised.astype(np.uint8) # ax1 = plt.subplot(121) # plt.imshow(Cl, cmap = cmap) # ax1.set_title("Cl EDS image") # ax2 = plt.subplot(122) # plt.imshow(Cl_denoised) # ax2.set_title("Cl noise SVD removed match image") # plt.subplots_adjust(bottom=0, right=2.5, top=2) print('process completed') ```
github_jupyter
# Materialien zu <i>zufall</i> Autor: Holger Böttcher - hbomat@posteo.de ## Aufgaben 13 - Simulation (Probleme von Leibniz <br>und de Méré) <br> ### Problem von Leibniz Leibniz nahm fälschlicherweise an, dass beim Werfen von 2 Würfeln die Augensumme<br> 11 genau so oft auftritt wie die Augensumme 12<br> ``` %run zufall\start ``` Die <b>exakten Wahrscheinlichkeiten</b> können z.B. so ermittellt werden ``` W2 = Würfel(2) p11 = W2.P(11); p11, W2.P(11, d=4) p12 = W2.P(12); p12, W2.P(12, d=4) ``` Zur <b>Simulation</b> wird zunächst eine kleine Versuchsanzahl angenommen ``` n = 10 ``` Einmaliges Werfen von zwei Würfeln wird so simuliert (die Funktion <i>zuf_zahl</i> <br> liefert hier zwei Zahlen, die jeweils dem Ergebnis eines Würfels entsprechen) </div> ``` zuf_zahl((1, 6), (1, 6)) # Anweisung mehrfach ausführen ``` $n$-maliges Werfen entsprechend ``` sim = zuf_zahl((1, 6), (1, 6), n); sim ``` Für jeden Wurf wird die Augensumme ermittelt ``` sim1 = [summe(x) for x in sim]; sim1 ``` gezählt, wie oft 11 bzw. 12 auftritt und die entprechenden relativen Häufigkeiten<br> berechnet ``` anz11 = anzahl(11)(sim1); h11 = anz11 / n anz12 = anzahl(12)(sim1); h12 = anz12 / n anz11, h11 anz12, h12 ``` Zur Simulation mit großem $n$ können die obigen Anweisungen wiederholt werden, <br> nachdem $n$ auf den gewünschten Wert gesetzt wurde (die langen Ausgaben sind zu<br> unterdrücken)<br><br> Hier werden sie zur bequemeren Handhabung in eine Prozedur geschrieben, wobei <br> auch die exakten Werte angegeben werden ``` def simulation1(n): sim = zuf_zahl((1, 6), (1, 6), n) sim = [summe(x) for x in sim] anz11 = anzahl(11)(sim) h11 = anz11 / n anz12 = anzahl(12)(sim) h12 = anz12 / n print('11: ' +str(N(h11, 6)) + ' exakt ' + str(N(p11, 6))) print('12: ' +str(N(h12, 6)) + ' exakt ' + str(N(p12, 6))) simulation1(10000) # Anweisung mehrfach ausführen, auch mit größerem n ``` <br> ### Problem von de Méré Er glaubte, dass man bei 4-maligem Werfen eines Würfels ebenso oft eine 6 erhält<br> wie eine Doppelsechs bei 24 Würfen mit 2 Würfeln (die Annahme ist falsch) <br> Die <b>exakten Werte</b> sind folgende<br> $P(\text{mindestens eine Sechs}) = 1 - P(\text{keine Sechs}) = 1-\dfrac{5^4}{6^4} \approx 0.518\qquad$ <br> beim 4-maligen Werfen eines Würfels <br> $P(\text{mindestens eine Doppelsechs}) = 1 - P(\text{keine Doppelsechs}) = 1-\dfrac{35^{24}}{36^{24}} \approx 0.491\qquad$ <br> beim 24-maligen Werfen von zwei Würfeln <br><br><br> Zur <b>Simulation</b> wird zunächst ein kleiner Wert für $n$ angenommen und <br> die Simulation entworfen ``` n = 5 ``` ### 1. 4-maliges Werfen eines Würfels</b> und $n$ solche Versuche werden so simuliert ``` zuf_zahl((1, 6), 4) sim = [ zuf_zahl((1, 6), 4) for i in range(n) ]; sim ``` Ermittlung der Anzahl von Versuchen, bei denen mindestens eine 6 aufgetreten ist<br> sowie der relativen Häufigkeit ``` sim1 = [ x for x in sim if anzahl(6)(x) > 0 ]; sim1 anzahl(sim1) / n ``` Die Anweisungen als Prozedur für große $n$ ``` def simulation2(n): sim = [ zuf_zahl((1, 6), 4) for i in range(n) ] sim = [ x for x in sim if anzahl(6)(x) > 0 ] print('4-mal 1 Würfel ' + str(N(anzahl(sim)/n, 6)) + ' exakt ' \ + str(N(1-5**4/6^4, 6))) simulation2(10000) ``` ### 2. 24-maliges Werfen von 2 Würfeln</b> ``` w24 = zuf_zahl((1, 6), (1, 6), 24); w24 anzahl([x for x in w24 if summe(x) == 12]) ``` Prozedur für $n$ Versuche ``` def simulation3(n): sim = [zuf_zahl((1, 6), (1, 6), 24) for i in range(n)] sim = [anzahl([x for x in y if summe(x) == 12]) for y in sim] anz = anzahl([x for x in sim if x > 0]) print('24-mal 2 Würfel ' + str(N(anz/n, 6)) + ' exakt ' + str(N(1-35^24/36^24, 6))) simulation3(10000) ```
github_jupyter
<img src="data/photutils_banner.svg"> ## Photutils - Code: https://github.com/astropy/photutils - Documentation: http://photutils.readthedocs.org/en/stable/ - Issue Tracker: https://github.com/astropy/photutils/issues ## Photutils Overview - Background and background noise estimation - Source Detection and Extraction - DAOFIND and IRAF's starfind - **Image segmentation** - local peak finder - **Aperture photometry** - PSF photometry - PSF matching - Centroids - Morphological properties - Elliptical isophote analysis ## Preliminaries ``` # initial imports import numpy as np import matplotlib.pyplot as plt # change some default plotting parameters import matplotlib as mpl mpl.rcParams['image.origin'] = 'lower' mpl.rcParams['image.interpolation'] = 'nearest' mpl.rcParams['image.cmap'] = 'viridis' # Run the %matplotlib magic command to enable inline plotting # in the current notebook. Choose one of these: %matplotlib inline # %matplotlib notebook ``` ### Load the data We'll start by reading data and error arrays from FITS files. These are cutouts from the HST Extreme-Deep Field (XDF) taken with WFC3/IR in the F160W filter. ``` from astropy.io import fits sci_fn = 'data/xdf_hst_wfc3ir_60mas_f160w_sci.fits' rms_fn = 'data/xdf_hst_wfc3ir_60mas_f160w_rms.fits' sci_hdulist = fits.open(sci_fn) rms_hdulist = fits.open(rms_fn) sci_hdulist[0].header['BUNIT'] = 'electron/s' ``` Print some info about the data. ``` sci_hdulist.info() ``` Define the data and error arrays. ``` data = sci_hdulist[0].data.astype(np.float) error = rms_hdulist[0].data.astype(np.float) ``` Extract the data header and create a WCS object. ``` from astropy.wcs import WCS hdr = sci_hdulist[0].header wcs = WCS(hdr) ``` Display the data. ``` from astropy.visualization import simple_norm norm = simple_norm(data, 'sqrt', percent=99.5) plt.imshow(data, norm=norm) plt.title('XDF F160W Cutout') ``` ## Part 1: Aperture Photometry Photutils provides circular, elliptical, and rectangular aperture shapes (plus annulus versions of each). These are names of the aperture classes, defined in pixel coordinates: * `CircularAperture` * `CircularAnnulus` * `EllipticalAperture` * `EllipticalAnnulus` * `RectangularAperture` * `RectangularAnnulus` Along with variants of each, defined in celestial coordinates: * `SkyCircularAperture` * `SkyCircularAnnulus` * `SkyEllipticalAperture` * `SkyEllipticalAnnulus` * `SkyRectangularAperture` * `SkyRectangularAnnulus` ## Methods for handling aperture/pixel intersection In general, the apertures will only partially overlap some of the pixels in the data. There are three methods for handling the aperture overlap with the pixel grid of the data array. <img src="data/photutils_aperture_methods.svg"> NOTE: the `subpixels` keyword is ignored for the **'exact'** and **'center'** methods. ### Perform circular-aperture photometry on some sources in the XDF First, we define a circular aperture at a given position and radius (in pixels). ``` from photutils import CircularAperture position = (90.73, 59.43) # (x, y) pixel position radius = 5. # pixels aperture = CircularAperture(position, r=radius) aperture print(aperture) ``` We can plot the aperture on the data using the aperture `plot()` method: ``` plt.imshow(data, norm=norm) aperture.plot(color='red', lw=2) ``` Now let's perform photometry on the data using the `aperture_photometry()` function. **The default aperture method is 'exact'.** Also note that the input data is assumed to have zero background. If that is not the case, please see the documentation for the `photutils.background` subpackage for tools to help subtract the background. See the `photutils_local_background.ipynb` notebook for examples of local background subtraction. The background was already subtracted for our XDF example data. ``` from photutils import aperture_photometry phot = aperture_photometry(data, aperture) phot ``` The output is an Astropy `QTable` (Quantity Table) with sum of data values within the aperture (using the defined pixel overlap method). The table also contains metadata, which is accessed by the `meta` attribute of the table. The metadata is stored as a python (ordered) dictionary: ``` phot.meta phot.meta['version'] ``` Aperture photometry using the **'center'** method gives a slightly different (and less accurate) answer: ``` phot = aperture_photometry(data, aperture, method='center') phot ``` Now perform aperture photometry using the **'subpixel'** method with `subpixels=5`: These parameters are equivalent to SExtractor aperture photometry. ``` phot = aperture_photometry(data, aperture, method='subpixel', subpixels=5) phot ``` ## Photometric Errors We can also input an error array to get the photometric errors. ``` phot = aperture_photometry(data, aperture, error=error) phot ``` The error array in our XDF FITS file represents only the background error. If we want to include the Poisson error of the source we need to calculate the **total** error: $\sigma_{\mathrm{tot}} = \sqrt{\sigma_{\mathrm{b}}^2 + \frac{I}{g}}$ where $\sigma_{\mathrm{b}}$ is the background-only error, $I$ are the data values, and $g$ is the "effective gain". The "effective gain" is the value (or an array if it's variable across an image) needed to convert the data image to count units (e.g. electrons or photons), where Poisson statistics apply. Photutils provides a `calc_total_error()` function to perform this calculation. ``` # this time include the Poisson error of the source from photutils.utils import calc_total_error # our data array is in units of e-/s # so the "effective gain" should be the exposure time eff_gain = hdr['TEXPTIME'] tot_error = calc_total_error(data, error, eff_gain) phot = aperture_photometry(data, aperture, error=tot_error) phot ``` The total error increased only slightly because this is a small faint source. ## Units We can also input the data (and error) units via the `unit` keyword. ``` # input the data units import astropy.units as u unit = u.electron / u.s phot = aperture_photometry(data, aperture, error=tot_error, unit=unit) phot phot['aperture_sum'] ``` Instead of inputting units via the units keyword, `Quantity` inputs for data and error are also allowed. ``` phot = aperture_photometry(data * unit, aperture, error=tot_error * u.adu) phot ``` The `unit` will not override the data or error unit. ``` phot = aperture_photometry(data * unit, aperture, error=tot_error * u.adu, unit=u.photon) phot ``` ## Performing aperture photometry at multiple positions Now let's perform aperture photometry for three sources (all with the same aperture size). We simply define three (x, y) positions. ``` positions = [(90.73, 59.43), (73.63, 139.41), (43.62, 61.63)] radius = 5. apertures = CircularAperture(positions, r=radius) ``` Let's plot these three apertures on the data. ``` plt.imshow(data, norm=norm) apertures.plot(color='red', lw=2) ``` Now let's perform aperture photometry. ``` phot = aperture_photometry(data, apertures, error=tot_error, unit=unit) phot ``` Each source is a row in the table and is given a unique **id** (the first column). ## Adding columns to the photometry table We can add columns to the photometry table. Let's calculate the signal-to-noise (SNR) ratio of our sources and add it as a new column to the table. ``` snr = phot['aperture_sum'] / phot['aperture_sum_err'] # units will cancel phot['snr'] = snr phot ``` Now calculate the F160W AB magnitude and add it to the table. ``` f160w_zpt = 25.9463 # NOTE that the log10() function can be applied only to dimensionless quantities # so we use the value() method to get the number value of the aperture sum abmag = -2.5 * np.log10(phot['aperture_sum'].value) + f160w_zpt phot['abmag'] = abmag phot ``` Now, using the WCS defined above, calculate the sky coordinates for these objects and add it to the table. ``` from astropy.wcs.utils import pixel_to_skycoord # convert pixel positions to sky coordinates x, y = np.transpose(positions) coord = pixel_to_skycoord(x, y, wcs) # we can add the astropy SkyCoord object directly to the table phot['sky coord'] = coord phot ``` We can also add separate RA and Dec columns, if preferred. ``` phot['ra_icrs'] = coord.icrs.ra phot['dec_icrs'] = coord.icrs.dec phot ``` If we write the table to an ASCII file using the ECSV format we can read it back in preserving all of the units, metadata, and SkyCoord objects. ``` phot.write('my_photometry.txt', format='ascii.ecsv') # view the table on disk !cat my_photometry.txt ``` Now read the table in ECSV format. ``` from astropy.table import QTable tbl = QTable.read('my_photometry.txt', format='ascii.ecsv') tbl tbl.meta tbl['aperture_sum'] # Quantity array tbl['sky coord'] # SkyCoord array ``` ## Aperture photometry using Sky apertures First, let's define the sky coordinates by converting our pixel coordinates. ``` positions = [(90.73, 59.43), (73.63, 139.41), (43.62, 61.63)] x, y = np.transpose(positions) coord = pixel_to_skycoord(x, y, wcs) coord ``` Now define circular apertures in sky coordinates. For sky apertures, the aperture radius must be a `Quantity`, in either pixel or angular units. ``` from photutils import SkyCircularAperture radius = 5. * u.pix sky_apers = SkyCircularAperture(coord, r=radius) sky_apers.r radius = 0.5 * u.arcsec sky_apers = SkyCircularAperture(coord, r=radius) sky_apers.r ``` When using a sky aperture in angular units, `aperture_photometry` needs the WCS transformation, which can be provided in two ways. ``` # via the wcs keyword phot = aperture_photometry(data, sky_apers, wcs=wcs) phot # or via a FITS hdu (i.e. header and data) as the input "data" phot = aperture_photometry(sci_hdulist[0], sky_apers) phot ``` ## More on Aperture Photometry in the Extended notebook: - Bad pixel masking - Encircled flux - Aperture photometry at multiple positions using multiple apertures Also see the local background subtraction notebook (`photutils_local_backgrounds.ipynb`). ## Part 2: Image Segmentation Image segmentation is the process where sources are identified and labeled in an image. The sources are detected by using a S/N threshold level and defining the minimum number of pixels required within a source. First, let's define a threshold image at 2$\sigma$ (per pixel) above the background. ``` bkg = 0. # background level in this image nsigma = 2. threshold = bkg + (nsigma * error) # this should be background-only error ``` Now let's detect "8-connected" sources of minimum size 5 pixels where each pixel is 2$\sigma$ above the background. "8-connected" pixels touch along their edges or corners. "4-connected" pixels touch along their edges. For reference, SExtractor uses "8-connected" pixels. The result is a segmentation image (`SegmentationImage` object). The segmentation image is the isophotal footprint of each source above the threshold. ``` from photutils import detect_sources npixels = 5 segm = detect_sources(data, threshold, npixels) print('Found {0} sources'.format(segm.nlabels)) ``` Display the segmentation image. ``` from photutils.utils import random_cmap fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8)) ax1.imshow(data, norm=norm) lbl1 = ax1.set_title('Data') ax2.imshow(segm, cmap=segm.cmap()) lbl2 = ax2.set_title('Segmentation Image') ``` It is better to filter (smooth) the data prior to source detection. Let's use a 5x5 Gaussian kernel with a FWHM of 2 pixels. ``` from astropy.convolution import Gaussian2DKernel from astropy.stats import gaussian_fwhm_to_sigma sigma = 2.0 * gaussian_fwhm_to_sigma # FWHM = 2 pixels kernel = Gaussian2DKernel(sigma, x_size=5, y_size=5) kernel.normalize() ssegm = detect_sources(data, threshold, npixels, filter_kernel=kernel) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8)) ax1.imshow(segm, cmap=segm.cmap()) lbl1 = ax1.set_title('Original Data') ax2.imshow(ssegm, cmap=ssegm.cmap()) lbl2 = ax2.set_title('Smoothed Data') ``` ### Source deblending Note above that some of our detected sources were blended. We can deblend them using the `deblend_sources()` function, which uses a combination of multi-thresholding and watershed segmentation. ``` from photutils import deblend_sources segm2 = deblend_sources(data, ssegm, npixels, filter_kernel=kernel, contrast=0.001, nlevels=32) fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 8)) ax1.imshow(data, norm=norm) ax1.set_title('Data') ax2.imshow(ssegm, cmap=ssegm.cmap()) ax2.set_title('Original Segmentation Image') ax3.imshow(segm2, cmap=segm2.cmap()) ax3.set_title('Deblended Segmentation Image') print('Found {0} sources'.format(segm2.max)) ``` ## Measure the photometry and morphological properties of detected sources ``` from photutils import source_properties catalog = source_properties(data, segm2, error=error, wcs=wcs) ``` `catalog` is a `SourceCatalog` object. It behaves like a list of `SourceProperties` objects, one for each source. ``` catalog catalog[0] # the first source catalog[0].xcentroid # the xcentroid of the first source ``` Please go [here](http://photutils.readthedocs.org/en/latest/api/photutils.segmentation.SourceProperties.html#photutils.segmentation.SourceProperties) to see the complete list of available source properties. We can create a Table of isophotal photometry and morphological properties using the ``to_table()`` method of `SourceCatalog`: ``` tbl = catalog.to_table() tbl ``` Additional properties (not stored in the table) can be accessed directly via the `SourceCatalog` object. ``` # get a single object (id=12) obj = catalog[11] obj.id obj ``` Let's plot the cutouts of the data and error images for this source. ``` fig, ax = plt.subplots(figsize=(12, 8), ncols=3) ax[0].imshow(obj.make_cutout(segm2.data)) ax[0].set_title('Source id={} Segment'.format(obj.id)) ax[1].imshow(obj.data_cutout_ma) ax[1].set_title('Source id={} Data'.format(obj.id)) ax[2].imshow(obj.error_cutout_ma) ax[2].set_title('Source id={} Error'.format(obj.id)) ``` ## More on Image Segmentation in the Extended notebook: - Define a subset of source labels - Define a subset of source properties - Additional sources properties, such a cutout images - Define the approximate isophotal ellipses for each source ## Also see the two notebooks on Photutils PSF-fitting photometry: - `gaussian_psf_photometry.ipynb` - `image_psf_photometry_withNIRCam.ipynb`
github_jupyter
# Extracting condtion-specific trials The aim of this section is to extract the trials according to the trigger channel. We will explain how the events can be generated from the stimulus channels and how to extract condition specific trials (epochs). Once the trials are extracted, bad epochs will be identified and excluded on based on their peak-to-peak signal amplitude. ## Preparation Import the relevant Python modules: ``` import os.path as op import os import sys import numpy as np import mne import matplotlib.pyplot as plt ``` Set the paths for the data and results. Note that these will depend on your local setup. ``` data_path = r'C:\Users\JensenO\Dropbox\FLUX\Development\dataRaw' result_path = r'C:\Users\JensenO\Dropbox\FLUX\Development\dataResults' file_name = 'training_raw' ``` ## Reading the events from the stimulus channels First read all the events from the stimulus channel (in our case, STI01). We will loop over the 2 fif-files created in the previous step. ``` for subfile in range(1, 3): path_file = os.path.join(result_path,file_name + 'ica-' + str(subfile) + '.fif') raw = mne.io.read_raw_fif(path_file,allow_maxshield=True,verbose=True,preload=True) events = mne.find_events(raw, stim_channel='STI101',min_duration=0.001001) # Save the events in a dedicted FIF-file: filename_events = op.join(result_path,file_name + 'eve-' + str(subfile) +'.fif') mne.write_events(filename_events,events) ``` The code above extract the events from the trigger channel STI101. This results are represented in the array *events* where the first column is the sample and the third column the corresponding trigger value. Note that the events are concatenated across the 2 subfiles. To visualize a snippet of the events-array write: ``` %matplotlib qt plt.stem(events[:,0],events[:,2]) plt.xlim(1950000,2000000) plt.xlabel('samples') plt.ylabel('Trigger value (STI101)') plt.show() ``` The figures shows an example for part of the events array. The trigger values indicate specific events of the trials. Here the 'attend left' trials are coded with the trigger '21', whereas the 'attend right' trials with '22'. ## Defining the epochs (trials) according to the event values Next step is to extract the left and right trials ``` events_id = {'left':21,'right':22} raw_list = list() events_list = list() for subfile in range(1, 3): # Read in the data from the Result path path_file = os.path.join(result_path,file_name + 'ica-' + str(subfile) + '.fif') raw = mne.io.read_raw_fif(path_file, allow_maxshield=True,verbose=True) filename_events = op.join(result_path,file_name + 'eve-' + str(subfile) +'.fif') events = mne.read_events(filename_events, verbose=True) raw_list.append(raw) events_list.append(events) ``` Now concatenate raw instances as if they were continuous - i.e combine over the 2 subfiles. ``` raw, events = mne.concatenate_raws(raw_list,events_list=events_list) del raw_list ``` Set the peak-to-peak amplitude thresholds for trial rejection. These values may change depending on the quality of the data. ``` reject = dict(grad=5000e-13, # T / m (gradiometers) mag=5e-12, # T (magnetometers) #eeg=200e-6, # V (EEG channels) #eog=150e-6 # V (EOG channels) ) ``` We will use time-windows of interest starting 2.5 s prior to the stimulus onset and ending 2 s after. Now perform the epoching using the events and events_id as well as the selected channels: ``` epochs = mne.Epochs(raw, events, events_id, tmin=-2.5 , tmax=2, baseline=None, proj=True, picks = 'all', detrend = 1, reject=reject, reject_by_annotation=True, preload=True, verbose=True) # Show epochs details epochs ``` By calling *epochs* we can check that the number of events is 305 of which 152 are left attention trials and 153 right attention trials. Moreover, we can see that no baseline correction was applied at this stage. Now we plot an overview of the rejected epochs: ``` epochs.plot_drop_log(); ``` A few percent of the trials were rejected due to MEG artifacts in the magnetometers. Now we save the epoched data in an FIF-file. Note this file will include trials from the 2 subfiles. ``` path_outfile = os.path.join(result_path,'training_epo.fif') epochs.save(path_outfile,overwrite=True) ``` ## Plotting the trials To show the trials for the left-condition for the MEG gradiometers write: ``` %matplotlib inline epochs.plot(n_epochs=10,picks=['grad'],event_id={'left':21}); ``` The plot above shows 10 trials of type left; only gradiometers shown. To show the trigger (stimulus channels) write: ``` %matplotlib inline epochs.plot(n_epochs=1,picks=['stim'],event_id={'left': 21}); ``` An example of the trigger channels for one trial. Showing the trigger channels is often useful for verifying that correct trials have been selected. Note that STI001 to STI016 denote the individual trigger lines which are 'on' (1) or 'off' (0). The channel STI101 is a combination of the trigger lines ( STI101 = STI001 + 2 * STI002 + 4 * STI003 + 8 * STI004 + ...) To show all the trials belonging to *left* for a representative gradiometer (MEG2343) use the plot_image function. In the following example we also lowpass filter the indvidual trials at 30 Hz and shorten them (crop) to a -100 to 400 ms interval: ``` %matplotlib inline epochs['left'].filter(0.0,30).crop(-0.1,0.4).plot_image(picks=['MEG2343'],vmin=-150,vmax=150); ``` ## Preregistration and publications Publication, example: "The data were segmented into intervals of 4.5 s, ranging from 2.5 s prior to stimulus onset and 2 s after. To ensure that no artefacts were missed, trials in which the gradiometers values exceeded 5000 fT/cm or magnetometers exceeded 5000 fT were rejected as well as trials previously annotated with muscle artefacts."
github_jupyter
# Parte 3 - Machine Learning Workflow Datasets: [Diamanti](https://www.kaggle.com/shivam2503/diamonds) **OBBIETTVO:** In base alle sue caratteristiche provare a predire il prezzo di un diamante <br> Utilizzeremo la libreria python **scikit-learn** per testare alcuni algoritmi di classificiazione! ``` import pandas as pd import numpy as np import sklearn from sklearn import svm, preprocessing diamond_df = pd.read_csv("../datasets/diamonds.csv", index_col = 0) diamond_df.head() ``` **Quale modello algoritmo di classificazione dovremmo utilizzare?** [Come scegliere il corretto algoritmo](https://scikit-learn.org/stable/tutorial/machine_learning_map/index.html) ``` len(diamond_df) ``` ## Linear Regression <img src="../images/regression_1.jpeg" alt="Drawing" style="width: 545px;"/><img src="../images/regression_2.jpeg" alt="Drawing" style="width: 500px;"/> <br> Per utilizzare la Linear Regression deve esserci una relazione lineare tra i dati <img src="../images/regression_3.png" alt="Drawing" style="width: 545px;"/> [Least Square Method](https://www.varsitytutors.com/hotmath/hotmath_help/topics/line-of-best-fit) Come si definisce la best fit line? <br> $$Y = mx + b$$ Data una X dobbiamo trovare la sua Y corrispondente, ma prima dobbiamo risolvere _m_ e _b_: _m_ è la pendenza<br> _b_ è l'intersezione di y $$m = \frac{\overline{x}\cdot\overline{y} - \overline{xy}}{(\overline{x})^2-\overline{x^2}}$$ $$b = \overline{y}-m\overline{x} $$ In questo caso abbiamo dati su 2 dimensioni, ma appena incrementiamo la dimensione dello spazio verriale incrementerà anche la complessità dei calcoli **Per allenare il nostro modello vogliamo utilizzare tutti i paramentri tranne il prezzo** ``` diamond_df["cut"].unique() ``` Abbiamo bisogno di categorie numeriche! ``` diamond_df["cut"].astype("category").cat.codes ``` **Problema**: Dobbiamo preservare il significato delle labels: per esempio Premium sarà migliore di Fair e così via.. ``` cut_dizionario = {"Fair":1, "Good":2, "Very Good":3, "Premium":4, "Ideal":5} ``` Stessa cosa per: ``` clarity_dizionario = {"I3": 1, "I2": 2, "I1": 3, "SI2": 4, "SI1": 5, "VS2": 6, "VS1": 7, "VVS2": 8, "VVS1": 9, "IF": 10, "FL": 11} color_dizionario = {"J": 1,"I": 2,"H": 3,"G": 4,"F": 5,"E": 6,"D": 7} ``` Bisognerà mappare queste classi alle varie colonne del dataset ``` diamond_df['cut'] = diamond_df['cut'].map(cut_dizionario) ``` ## Esercizio 13: - Mappare le colonne "clarity" e "color" con i rispettivi dizionari! ``` #Esercizio diamond_df['clarity'] = diamond_df['clarity'].map(clarity_dizionario) diamond_df['color'] = diamond_df['color'].map(color_dizionario) diamond_df.head() ``` Prima di allenare il nostro modello è importante mescolare i dati per evitare che si formi del biasing analizzando i dati in ordine. <br> Per esempio potrebbe essere, come nel nostro caso, che i dati siano ordinati per prezzo. ``` diamond_df diamond_df = sklearn.utils.shuffle(diamond_df) ``` Separiamo il set di features dalla label che dobbiamo predire ``` X = diamond_df.drop("price", axis=1).values #Feature Set --> Ogni label tranne quella che dobbiamo predire y = diamond_df["price"].values X ``` **Bonus**: Avremmo potuto barare caricando il dataframe con l'index, poiché il dataset era ordinato per prezzo e facendo ciò avremmo lasciato un informazione in più che avrebbe compromesso la nostra regressione, poiché l'indice sarebbe stato ordinato come il prezzo. **Preprocessing**: Permette di normalizzare i valori, in questo modo ridurremo la sparsità e il modello lavorerà con dati più uniformi con conseguente miglioramente nelle performance ``` print(np.mean(X)) X = preprocessing.scale(X) #Cerca print(np.mean(X)) test_size = 200 ``` Il **train** è una porzione del dataset per cui il modello viene allenato Il **test** è una porzione del dataset che il nostro modello non vedrà mai, e sarà usato per valutarne le performance. ``` X_train = X[:-test_size] y_train = y[:-test_size] X_test = X[-test_size:] y_test = y[-test_size:] ``` Nel caso utilizzassimo il dataset di test per allenare il modello l'accurattezza finale risulterebbe compromessa poiché i dati sono già stati visionati dal modello durante la fase di train, in questo modo non possiamo verificare tramite il test se il modello abbia realmente imparato a predire un valore o abbia solamente imparato a memoria il dataset di train. Per questo motivo i dati di test devono essere utilizzati solo per testare il modello! Andiamo a selezionare il [modello](https://scikit-learn.org/stable/tutorial/machine_learning_map/index.html) da utilizzare ``` clf = svm.SVR(kernel="linear") clf.fit(X_train, y_train) clf.score(X_test, y_test) ``` Coefficiente di determinazione (R quadro): - 0.0 Caso peggiore - 1.0 Caso migliore Esso è calcolato partendo dall'errore quadratico medio Fin troppo bello per essere vero! Andiamo a verificare quello che è successo! ``` for X,y in zip(X_test, y_test): print(f"Model: {clf.predict([X])}, Actual: {y}") ``` Testiamo un altro modello! ``` clf = svm.SVR(kernel="rbf") clf.fit(X_train, y_train) clf.score(X_test, y_test) for X,y in zip(X_test, y_test): print(f"Model: {clf.predict([X])}, Actual: {y}") ``` **BONUS:** Si potrebbero usare più classifier e alla fine fare una media delle prestazioni!
github_jupyter
# cadCAD Tutorials: The Robot and the Marbles, part 3 In parts [1](../robot-marbles-part-1/robot-marbles-part-1.ipynb) and [2](../robot-marbles-part-2/robot-marbles-part-2.ipynb) we introduced the 'language' in which a system must be described in order for it to be interpretable by cadCAD and some of the basic concepts of the library: * State Variables * Timestep * State Update Functions * Partial State Update Blocks * Simulation Configuration Parameters * Policies In this notebook we'll look at how subsystems within a system can operate in different frequencies. But first let's copy the base configuration with which we ended Part 2. Here's the description of that system: __The robot and the marbles__ * Picture a box (`box_A`) with ten marbles in it; an empty box (`box_B`) next to the first one; and __two__ robot arms capable of taking a marble from any one of the boxes and dropping it into the other one. * The robots are programmed to take one marble at a time from the box containing the largest number of marbles and drop it in the other box. They repeat that process until the boxes contain an equal number of marbles. * The robots act simultaneously; in other words, they assess the state of the system at the exact same time, and decide what their action will be based on that information. ``` %%capture # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # List of all the state variables in the system and their initial values genesis_states = { 'box_A': 10, # as per the description of the example, box_A starts out with 10 marbles in it 'box_B': 0 # as per the description of the example, box_B starts out empty } # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Settings of general simulation parameters, unrelated to the system itself # `T` is a range with the number of discrete units of time the simulation will run for; # `N` is the number of times the simulation will be run (Monte Carlo runs) # In this example, we'll run the simulation once (N=1) and its duration will be of 10 timesteps # We'll cover the `M` key in a future article. For now, let's omit it sim_config_dict = { 'T': range(10), 'N': 1, #'M': {} } # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # We specify the robot arm's logic in a Policy Function def robot_arm(params, step, sH, s): add_to_A = 0 if (s['box_A'] > s['box_B']): add_to_A = -1 elif (s['box_A'] < s['box_B']): add_to_A = 1 return({'add_to_A': add_to_A, 'add_to_B': -add_to_A}) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # We make the state update functions less "intelligent", # ie. they simply add the number of marbles specified in _input # (which, per the policy function definition, may be negative) def increment_A(params, step, sH, s, _input): y = 'box_A' x = s['box_A'] + _input['add_to_A'] return (y, x) def increment_B(params, step, sH, s, _input): y = 'box_B' x = s['box_B'] + _input['add_to_B'] return (y, x) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # In the Partial State Update Blocks, # the user specifies if state update functions will be run in series or in parallel # and the policy functions that will be evaluated in that block partial_state_update_blocks = [ { 'policies': { # The following policy functions will be evaluated and their returns will be passed to the state update functions 'robot_arm_1': robot_arm, 'robot_arm_2': robot_arm }, 'variables': { # The following state variables will be updated simultaneously 'box_A': increment_A, 'box_B': increment_B } } ] # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #imported some addition utilities to help with configuration set-up from cadCAD.configuration.utils import config_sim from cadCAD.configuration import Experiment from cadCAD import configs del configs[:] # Clear any prior configs exp = Experiment() c = config_sim(sim_config_dict) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # The configurations above are then packaged into a `Configuration` object exp.append_configs(initial_state=genesis_states, #dict containing variable names and initial values partial_state_update_blocks=partial_state_update_blocks, #dict containing state update functions sim_configs=c #preprocessed dictionaries containing simulation parameters ) from cadCAD.engine import ExecutionMode, ExecutionContext, Executor exec_mode = ExecutionMode() local_mode_ctx = ExecutionContext(exec_mode.local_mode) simulation = Executor(local_mode_ctx, configs) # Pass the configuration object inside an array raw_result, tensor, sessions = simulation.execute() # The `execute()` method returns a tuple; its first elements contains the raw results %matplotlib inline import pandas as pd df = pd.DataFrame(raw_result) df.plot('timestep', ['box_A', 'box_B'], grid=True, xticks=list(df['timestep'].drop_duplicates()), colormap = 'RdYlGn', yticks=list(range(1+(df['box_A']+df['box_B']).max()))); ``` # Asynchronous Subsystems We have defined that the robots operate simultaneously on the boxes of marbles. But it is often the case that agents within a system operate asynchronously, each having their own operation frequencies or conditions. Suppose that instead of acting simultaneously, the robots in our examples operated in the following manner: * Robot 1: acts once every 2 timesteps * Robot 2: acts once every 3 timesteps One way to simulate the system with this change is to introduce a check of the current timestep before the robots act, with the definition of separate policy functions for each robot arm. ``` %%capture # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # We specify each of the robots logic in a Policy Function robots_periods = [2,3] # Robot 1 acts once every 2 timesteps; Robot 2 acts once every 3 timesteps def get_current_timestep(cur_substep, s): if cur_substep == 1: return s['timestep']+1 return s['timestep'] def robot_arm_1(params, step, sH, s): _robotId = 1 if get_current_timestep(step, s)%robots_periods[_robotId-1]==0: # on timesteps that are multiple of 2, Robot 1 acts return robot_arm(params, step, sH, s) else: return({'add_to_A': 0, 'add_to_B': 0}) # for all other timesteps, Robot 1 doesn't interfere with the system def robot_arm_2(params, step, sH, s): _robotId = 2 if get_current_timestep(step, s)%robots_periods[_robotId-1]==0: # on timesteps that are multiple of 3, Robot 2 acts return robot_arm(params, step, sH, s) else: return({'add_to_A': 0, 'add_to_B': 0}) # for all other timesteps, Robot 2 doesn't interfere with the system # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # In the Partial State Update Blocks, # the user specifies if state update functions will be run in series or in parallel # and the policy functions that will be evaluated in that block partial_state_update_blocks = [ { 'policies': { # The following policy functions will be evaluated and their returns will be passed to the state update functions 'robot_arm_1': robot_arm_1, 'robot_arm_2': robot_arm_2 }, 'variables': { # The following state variables will be updated simultaneously 'box_A': increment_A, 'box_B': increment_B } } ] # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # del configs[:] # Clear any prior configs # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # The configurations above are then packaged into a `Configuration` object exp.append_configs(initial_state=genesis_states, #dict containing variable names and initial values partial_state_update_blocks=partial_state_update_blocks, #dict containing state update functions sim_configs=c #preprocessed dictionaries containing simulation parameters ) executor = Executor(local_mode_ctx, configs) # Pass the configuration object inside an array raw_result, tensor, sessions = executor.execute() # The `execute()` method returns a tuple; its first elements contains the raw results simulation_result = pd.DataFrame(raw_result) simulation_result.plot('timestep', ['box_A', 'box_B'], grid=True, xticks=list(simulation_result['timestep'].drop_duplicates()), yticks=list(range(1+max(simulation_result['box_A'].max(),simulation_result['box_B'].max()))), colormap = 'RdYlGn' ) ``` Let's take a step-by-step look at what the simulation tells us: * Timestep 1: the number of marbles in the boxes does not change, as none of the robots act * Timestep 2: Robot 1 acts, Robot 2 doesn't; resulting in one marble being moved from box A to box B * Timestep 3: Robot 2 acts, Robot 1 doesn't; resulting in one marble being moved from box A to box B * Timestep 4: Robot 1 acts, Robot 2 doesn't; resulting in one marble being moved from box A to box B * Timestep 5: the number of marbles in the boxes does not change, as none of the robots act * Timestep 6: Robots 1 __and__ 2 act, as 6 is a multiple of 2 __and__ 3; resulting in two marbles being moved from box A to box B and an equilibrium being reached.
github_jupyter
``` import numpy as np import pandas as pd import os from pathlib import Path import selfies as sf from rdkit import Chem import pandas as pd ``` # Molecule retrieval from Zinc20 smi files ``` Is_data_prepared = True if not Is_data_prepared: tranche_dirs = ['FK', 'DC', 'BB', 'JA', 'HE', 'GA', 'KG', 'IC', 'CB', 'HJ'] ``` ### location of zinc20 files and resulting h5 store ### The files containing SMILES from ZINC 20 db can be download by running ```ZINC-downloader-2D-smi.wget``` - these requires a lot of free space ``` if not Is_data_prepared: zinc20_path = Path("/storage/hdd1/smiles/zinc20/tranche_2.5_375/") #Path to place where datasets are downloaded tranches_name = "_".join(tranche_dirs) store_path = Path("../data/zinc20_"+tranches_name+".h5") ``` ### main retrieval loop #pip install tables ``` if not Is_data_prepared: from_raw_files = True if from_raw_files: store = pd.HDFStore(store_path.absolute().as_posix(), "w") for subdir in tranche_dirs: smiles_df = pd.DataFrame(columns=["smiles"]) dir = (zinc20_path/subdir).absolute().as_posix() smiles_files = os.listdir(dir) for smiles_file in smiles_files: path = (zinc20_path/subdir/smiles_file).absolute().as_posix() try: df = pd.read_csv(path, sep=" ").set_index("zinc_id") smiles_df = pd.concat([smiles_df, df], axis=0) except pd.errors.EmptyDataError: pass store[subdir] = smiles_df store.close() ``` ### data sampling, from each subdirectory certain number of compounds is randomly sampled ``` if not Is_data_prepared: store = pd.HDFStore(store_path.absolute().as_posix(), "r") smiles_df = pd.DataFrame(columns=["smiles"]) n_sample = 100000 for tranche in store.keys(): df = store[tranche] n_sample_ = min(df.shape[0], n_sample) df = df.sample(n_sample_) smiles_df = pd.concat([smiles_df, df], axis=0) store.close() del df if not Is_data_prepared: resulting_file_name = "zinc20_"+tranches_name+"_processed.parquet" resulting_df_path = Path("../data")/resulting_file_name smiles_df.to_parquet(resulting_df_path.absolute().as_posix()) if not Is_data_prepared: os.remove("zinc20_FK_DC_BB_JA_HE_GA_KG_IC_CB_HJ.h5") ``` ### Check length of translated SELFIES ``` data_to_read = pd.read_parquet('zinc20_FK_DC_BB_JA_HE_GA_KG_IC_CB_HJ_processed.parquet') data_to_read = data_to_read.reset_index() data_to_read.shape data_to_read = data_to_read.drop_duplicates(subset=['smiles']) data_to_read = data_to_read.reset_index() del data_to_read['level_0'] data_to_read.shape data_to_read.head() ``` ## OLD code File_with_cannonical_SMILES_exist = True #change to false if file is not present if File_with_cannonical_SMILES_exist: data_to_read = pd.read_parquet('zinc20_FK_DC_BB_JA_HE_GA_KG_IC_CB_HJ_processed_neutralized.parquet') data_to_read['mol'] = [Chem.MolFromSmiles(smi) for smi in data_to_read['smiles']] data_to_read['canonical_SMILES'] = data_to_read['mol'] for i in range(len(data_to_read['mol'])): try: data_to_read['canonical_SMILES'][i] = Chem.MolToSmiles(data_to_read['mol'][i], isomericSmiles=False) except: data_to_read['canonical_SMILES'][i] = None data_to_read = data_to_read[data_to_read.canonical_SMILES != None] data_to_read = data_to_read.reset_index() #drop=True print(data_to_read.shape) data_to_read = data_to_read.drop_duplicates(subset=['canonical_SMILES']) data_to_read = data_to_read.reset_index() #drop=True print(data_to_read.shape) del data_to_read['mol'] data_to_read.to_parquet('zinc20_FK_DC_BB_JA_HE_GA_KG_IC_CB_HJ_processed_canonical_.parquet') else: data_to_read = pd.read_parquet('zinc20_FK_DC_BB_JA_HE_GA_KG_IC_CB_HJ_processed.parquet') data_to_read = data_to_read.reset_index() data_to_read['smiles'] = [Chem.MolToSmiles(get_parent_mol(Chem.MolFromSmiles(smiles), neutralize=True, check_exclusion=True, verbose=False)[0]) for smiles in data_to_read['smiles']] print(data_to_read.shape) data_to_read = data_to_read.drop_duplicates(subset=['smiles']) print(data_to_read.shape) data_to_read['mol'] = [Chem.MolFromSmiles(smi) for smi in data_to_read['smiles']] data_to_read['canonical_SMILES'] = data_to_read['mol'] for i in range(len(data_to_read['mol'])): try: data_to_read['canonical_SMILES'][i] = Chem.MolToSmiles(data_to_read['mol'][i], isomericSmiles=False) except: data_to_read['canonical_SMILES'][i] = None data_to_read = data_to_read[data_to_read.canonical_SMILES != None] data_to_read = data_to_read.reset_index() #drop=True print(data_to_read.shape) data_to_read = data_to_read.drop_duplicates(subset=['canonical_SMILES']) data_to_read = data_to_read.reset_index() #drop=True print(data_to_read.shape) del data_to_read['mol'] data_to_read.to_parquet('zinc20_FK_DC_BB_JA_HE_GA_KG_IC_CB_HJ_processed_canonical_.parquet') ``` data_to_read['SELFIES'] = [sf.encoder(smiles) for smiles in data_to_read['smiles']] data_to_read.shape def SELFIES_length(SELFIES_mol): length_selfies = [] try: length_selfies.append(SELFIES_mol.count('[')) except: print('Something went wrong, check source code...') return max(length_selfies) data_to_read['SELFIES_length'] = [SELFIES_length(SELFIES) for SELFIES in data_to_read['SELFIES']] data_to_read['SELFIES_length'].hist() data_to_read['SELFIES_length'].max() data_to_read['SELFIES_length'][:250000].hist() data_to_read['SELFIES_length'][:250000].max() dataa = data_to_read[(data_to_read['SELFIES_length'] <= 50) & (data_to_read['SELFIES_length'] >= 30)] dataa dataa['SELFIES_length'].hist() data = dataa.sort_values(by=['SELFIES_length'], ascending=True) data = data.reset_index() del data['level_0'] data = data.drop_duplicates(subset=['smiles']) data.shape data ``` ## Gaussian distribution of tranining data ``` bins = [] for i in range(11): bins.append(i) bins[1:] one_part_size = [] for element in bins[1:11]: one_part_size.append(element*2) one_part_size.append(11) one_part_size sum(one_part_size) one_size = (2*121000)/sum(one_part_size) one_size bins_SELFIES_length_half = [] for i in range(30,40): bins_SELFIES_length_half.append(i) bins_SELFIES_length_half bins_SELFIES_length_half_2 = [] for i in range(41,51): bins_SELFIES_length_half_2.append(i) bins_SELFIES_length_half_2.reverse() bins_SELFIES_length_half_2 center = 11 #size of median value first_bin = data[data['SELFIES_length'] == 30][:int(one_size)] first_bin['SELFIES_length'].hist() ``` # Dataset to training ``` bins_SELFIES_length_to_be_used_half = bins_SELFIES_length_half.copy() bins_SELFIES_length_to_be_used_half_2 = bins_SELFIES_length_half_2.copy() #bins_SELFIES_length_to_be_used_half_2.reverse() center_h = center for i, element in enumerate(bins[1:11]): bins_SELFIES_length_to_be_used_half[i] = data[data['SELFIES_length'] == bins_SELFIES_length_half[i]][:int(one_size*element)] bins_SELFIES_length_to_be_used_half_2[i] = data[data['SELFIES_length'] == bins_SELFIES_length_half_2[i]][:int(one_size*element)] bins_SELFIES_length_to_be_used_half[0].shape bins_SELFIES_length_to_be_used_half[2].shape bins_SELFIES_length_to_be_used_half_2[0].shape center_h = data[data['SELFIES_length'] == 40][:int(one_size*center)] center_h.shape frames = (bins_SELFIES_length_to_be_used_half[0], bins_SELFIES_length_to_be_used_half[1], bins_SELFIES_length_to_be_used_half[2], bins_SELFIES_length_to_be_used_half[3], bins_SELFIES_length_to_be_used_half[4], bins_SELFIES_length_to_be_used_half[5], bins_SELFIES_length_to_be_used_half[6], bins_SELFIES_length_to_be_used_half[7], bins_SELFIES_length_to_be_used_half[8], bins_SELFIES_length_to_be_used_half[9], bins_SELFIES_length_to_be_used_half_2[0], bins_SELFIES_length_to_be_used_half_2[1], bins_SELFIES_length_to_be_used_half_2[2], bins_SELFIES_length_to_be_used_half_2[3], bins_SELFIES_length_to_be_used_half_2[4], bins_SELFIES_length_to_be_used_half_2[5], bins_SELFIES_length_to_be_used_half_2[6], bins_SELFIES_length_to_be_used_half_2[7], bins_SELFIES_length_to_be_used_half_2[8], bins_SELFIES_length_to_be_used_half_2[9], center_h) data_to_training_and_validation = pd.concat(frames) data_to_training_and_validation.shape #data = data_to_training_and_validation.sort_values(by=['SELFIES_length'], ascending=True) data_to_training_and_validation = data_to_training_and_validation.reset_index() del data_to_training_and_validation['level_0'] data_to_training_and_validation.shape data_to_training_and_validation['SELFIES_length'].hist(bins=21) ``` ## Make use of canonical form of SMILES ``` #Prepare molecule mols = [Chem.MolFromSmiles(smi) for smi in data_to_training_and_validation['smiles']] data_to_training_and_validation['SMILES_canonical'] = [Chem.MolToSmiles(mol, isomericSmiles=False) for mol in mols] data_to_training_and_validation.head() data_to_training_and_validation['SELFIES_canonical'] = [sf.encoder(smiles) for smiles in data_to_training_and_validation['SMILES_canonical']] data_to_training_and_validation['SELFIES_length_canonical'] = [SELFIES_length(SELFIES) for SELFIES in data_to_training_and_validation['SELFIES_canonical']] data_to_training_and_validation['SELFIES_length_canonical'].hist(bins=21) data_to_training_and_validation.head() #drop duplicates data_to_training_and_validation = data_to_training_and_validation.drop_duplicates(subset=['SMILES_canonical']) data_to_training_and_validation = data_to_training_and_validation.reset_index() del data_to_training_and_validation['level_0'] ``` ### to be removed -> Sn -> Se -> B -> =P -> P ``` data_to_training_and_validation = data_to_training_and_validation.reset_index() del data_to_training_and_validation['level_0'] data_to_training_and_validation = data_to_training_and_validation[data_to_training_and_validation["SMILES_canonical"].str.contains("Sn") == False] print(data_to_training_and_validation.shape) data_to_training_and_validation = data_to_training_and_validation[data_to_training_and_validation["SMILES_canonical"].str.contains("Si") == False] print(data_to_training_and_validation.shape) data_to_training_and_validation = data_to_training_and_validation[data_to_training_and_validation["SELFIES_canonical"].str.contains("P") == False] print(data_to_training_and_validation.shape) data_to_training_and_validation = data_to_training_and_validation[data_to_training_and_validation["SELFIES_canonical"].str.contains("=P") == False] print(data_to_training_and_validation.shape) data_to_training_and_validation_bromium = data_to_training_and_validation[data_to_training_and_validation["SMILES_canonical"].str.contains("Br") == True] print(data_to_training_and_validation_bromium.shape) data_to_training_and_validation = data_to_training_and_validation[data_to_training_and_validation["SMILES_canonical"].str.contains("B") == False] print(data_to_training_and_validation.shape) frames = (data_to_training_and_validation, data_to_training_and_validation_bromium) data_to_training_and_validation_to_be_used = pd.concat(frames) print(data_to_training_and_validation_to_be_used.shape) data_to_training_and_validation_to_be_used = data_to_training_and_validation_to_be_used.reset_index() del data_to_training_and_validation_to_be_used['level_0'] print(data_to_training_and_validation_to_be_used.shape) data_to_training_and_validation_to_be_used = data_to_training_and_validation_to_be_used.sort_values(by=['SELFIES_length'], ascending=True) one_size = 121000/sum(one_part_size) one_size bins_SELFIES_length_to_be_used_half = bins_SELFIES_length_half.copy() bins_SELFIES_length_to_be_used_half_2 = bins_SELFIES_length_half_2.copy() #bins_SELFIES_length_to_be_used_half_2.reverse() center_h = center for i, element in enumerate(bins[1:11]): bins_SELFIES_length_to_be_used_half[i] = data_to_training_and_validation_to_be_used[data_to_training_and_validation_to_be_used['SELFIES_length_canonical'] == bins_SELFIES_length_half[i]][:int(one_size*element)] bins_SELFIES_length_to_be_used_half_2[i] = data_to_training_and_validation_to_be_used[data_to_training_and_validation_to_be_used['SELFIES_length_canonical'] == bins_SELFIES_length_half_2[i]][:int(one_size*element)] center_h = data_to_training_and_validation_to_be_used[data_to_training_and_validation_to_be_used['SELFIES_length_canonical'] == 40][:int(one_size*center)] frames = (bins_SELFIES_length_to_be_used_half[0], bins_SELFIES_length_to_be_used_half[1], bins_SELFIES_length_to_be_used_half[2], bins_SELFIES_length_to_be_used_half[3], bins_SELFIES_length_to_be_used_half[4], bins_SELFIES_length_to_be_used_half[5], bins_SELFIES_length_to_be_used_half[6], bins_SELFIES_length_to_be_used_half[7], bins_SELFIES_length_to_be_used_half[8], bins_SELFIES_length_to_be_used_half[9], bins_SELFIES_length_to_be_used_half_2[0], bins_SELFIES_length_to_be_used_half_2[1], bins_SELFIES_length_to_be_used_half_2[2], bins_SELFIES_length_to_be_used_half_2[3], bins_SELFIES_length_to_be_used_half_2[4], bins_SELFIES_length_to_be_used_half_2[5], bins_SELFIES_length_to_be_used_half_2[6], bins_SELFIES_length_to_be_used_half_2[7], bins_SELFIES_length_to_be_used_half_2[8], bins_SELFIES_length_to_be_used_half_2[9], center_h) data_to_training_and_validation = pd.concat(frames) data_to_training_and_validation.head() data_to_training_and_validation['SELFIES_length_canonical'].hist(bins=21) data_to_training_and_validation = data_to_training_and_validation.reset_index() del data_to_training_and_validation['level_0'] resulting_file_name = "zinc20_"+'selected_to_create_model'+"_processed.parquet" resulting_df_path = Path("../data")/resulting_file_name ##Important bug data_to_training_and_validation.to_parquet(resulting_df_path.absolute().as_posix()) ``` ### RORgamma active compounds : doi: 10.1038/aps.2014.120 ``` list_of_compounds_names = ['20-Hydroxycholesterol', '22(R)-Hydroxy cholesterol', '25-Hydroxycholesterol','Ursolic acid','Digoxin','T0901317', 'SR1001', 'SR1078', 'SR-1555', 'SR2211', 'ML209', 'N-(1-(4-(1,1,1,3,3,3-hexafluoro-2-hydroxypropan-2-yl)benzyl)-1,2,3,4-tetrahydroquinolin-6-yl)acetamide', '2,4-difluoro-N-(1-((4-fluorophenyl)sulfonyl)-1,2,3,4-tetrahydroquinolin-7-yl)benzenesulfonamide', '2-Chloro-6-fluoro-N-(1-((4-fluorophenyl)sulfonyl)-1,2,3,4-tetrahydroquinolin-7-yl)benzamide', '(S)-2-fluoro-N-(3-methyl-1-(m-tolylsulfonyl)-2,3-dihydro-1H-pyrido[2,3-b][1,4]oxazin-7-yl)-6-(trifluoromethyl)benzamide', '(S)-2-fluoro-N-(3-methyl-1-(m-tolylsulfonyl)-2,3-dihydro-1H-pyrido[2,3-b][1,4]oxazin-7-yl)-6-(trifluoromethyl)benzamide', '4-(1-(2-Chloro-6-cyclopropylbenzoyl)-7-fluoro-1H-indazol-3-yl)-3-fluorobenzoicacid', '4-(1-(2-Chloro-6-(trifluoromethyl)benzoyl)-7-fluoro-1H-indazol-3-yl)-2-hydroxycyclohex-3-enecarboxylic acid', 'GSK-1a', 'GSK-1b', 'GSK-1c', 'GSK-6a', 'GSK-8h', 'GSK-9g', 'GSK-2', 'GSK-13', 'GSK-21', '2-(4-(Ethylsulfonyl)phenyl)-N-(6-(3-fluorophenoxy)-[1,1′-biphenyl]-3-yl)acetamide', 'N-(6-(3,5-difluorophenoxy)-3′-fluoro-[1,1′-biphenyl]-3-yl)-2-(4-(N-methylsulfamoyl)phenyl)acetamide', 'N-(4-Ethylphenyl)-3-(hydroxymethyl)-Nisobutyl-4-((tetrahydro-2H-pyran-4-yl)methoxy)benzenesulfonamide', 'N-(4-chlorophenyl)-4-((3,5-dimethylisoxazol-4-yl)methoxy)-N-isobutylbenzenesulfonamide', 'N-(2,4-dimethylphenyl)-4-(2-hydroxy2-(pyridin-4-yl)ethoxy)-N-isobutylbenzenesulfonamide', 'N-isobutyl-N-((5-(4-(methylsulfonyl)phenyl)thiophen-2-yl)methyl)-1-phenylmethanesulfonamide', 'N-(4-(4-acetylpiperazin-1-yl)benzyl)-Nisobutyl-1-phenylmethanesulfonamide', 'N-(3,4-dimethoxyphenyl)-1-ethyl-2-oxo-1,2-dihydrobenzo[cd]indole-6-sulfonamide', 'JTE-151'] list_of_compounds_smiles = ['CC(C)CCC[C@@](C)([C@H]1CC[C@@H]2[C@@]1(CC[C@H]3[C@H]2CC=C4[C@@]3(CC[C@@H](C4)O)C)C)O', 'C[C@@H]([C@H]1CC[C@@H]2[C@@]1(CC[C@H]3[C@H]2CC=C4[C@@]3(CC[C@@H](C4)O)C)C)[C@@H](CCC(C)C)O', 'C[C@H](CCCC(C)(C)O)[C@H]1CC[C@@H]2[C@@]1(CC[C@H]3[C@H]2CC=C4[C@@]3(CC[C@@H](C4)O)C)C', 'C[C@@H]1CC[C@@]2(CC[C@@]3(C(=CC[C@H]4[C@]3(CC[C@@H]5[C@@]4(CC[C@@H](C5(C)C)O)C)C)[C@@H]2[C@H]1C)C)C(=O)O', 'C[C@@H]1[C@H]([C@H](C[C@@H](O1)O[C@@H]2[C@H](O[C@H](C[C@@H]2O)O[C@@H]3[C@H](O[C@H](C[C@@H]3O)O[C@H]4CC[C@]5([C@@H](C4)CC[C@@H]6[C@@H]5C[C@H]([C@]7([C@@]6(CC[C@@H]7C8=CC(=O)OC8)O)C)O)C)C)C)O)O', 'C1=CC=C(C=C1)S(=O)(=O)N(CC(F)(F)F)C2=CC=C(C=C2)C(C(F)(F)F)(C(F)(F)F)O', 'CC1=C(SC(=N1)NC(=O)C)S(=O)(=O)NC2=CC=C(C=C2)C(C(F)(F)F)(C(F)(F)F)O', 'C1=CC(=CC=C1C(=O)NC2=CC=C(C=C2)C(C(F)(F)F)(C(F)(F)F)O)C(F)(F)F', 'CC(=O)N1CCN(CC1)CC2=CC=C(C=C2)C3=CC=C(C=C3)C(C(F)(F)F)(C(F)(F)F)O', 'C1CN(CCN1CC2=CC=C(C=C2)C3=C(C=C(C=C3)C(C(F)(F)F)(C(F)(F)F)O)F)CC4=CC=NC=C4', 'C[C@@H]1C[C@@H](CN(C1)C(=O)CC(C2=CC3=C(C=C2)OCO3)C4=C(C=C(C=C4OC)OC)O)C', 'CC(=O)NC1=CC2=C(C=C1)N(CC1=CC=C(C=C1)C(O)(C(F)(F)F)C(F)(F)F)CCC2', 'C1CC2=C(C=C(C=C2)NS(=O)(=O)C3=C(C=C(C=C3)F)F)N(C1)S(=O)(=O)C4=CC=C(C=C4)F', 'C1CC2=C(C=C(C=C2)NC(=O)C3=C(C=CC=C3Cl)F)N(C1)S(=O)(=O)C4=CC=C(C=C4)F', 'CC1CN(C2=C(O1)N=CC(NC(=O)C1=C(F)C=CC=C1C(F)(F)F)=C2)S(=O)(=O)C1=CC=CC(C)=C1', 'OC(=O)C1=CC(F)=C(C=C1)C1=NN(C(=O)C2=C(C=CC=C2Cl)C2CC2)C2=C1C=CC=C2F', 'OC1C=C(CCC1C(O)=O)C1=NN(C(=O)C2=C(Cl)C=CC=C2C(F)(F)F)C2=C1C=CC=C2F', 'OC1C=C(CCC1C(O)=O)C1=NN(C(=O)C2=C(Cl)C=CC=C2C(F)(F)F)C2=C(F)C=CC=C12', 'CC1=CC=CC(=C1)C(=O)NC1=CC(=NO1)C1=CC=CC=C1', 'CC1=C(NC(=O)C2=NC3=C(C)C=C(C)C=C3S2)SC=C1', 'CCC(=O)NC1=CC2=NN(N=C2C=C1)C1=CC=C(CC)C=C1', 'CCS(=O)(=O)C1=CC=C(CC(=O)NC2=NC(=CS2)C2=CC(Cl)=CC=C2Cl)C=C1', 'CCS(=O)(=O)C1=CC=C(CC(=O)NC2=NC(=C(S2)C(=O)C2=C(Cl)C=CC=C2)C2=CC=CC(Cl)=C2)C=C1', 'CCS(=O)(=O)C1=CC=C(CC(=O)NC2=CC(=C(S2)C(=O)C2=CC(F)=CC=C2)C2=CC(Cl)=CC=C2)C=C1', 'CCCN(CC1=CC=CC=C1)C1=CC=C(NC(=O)CC2=CC=C(C=C2)S(=O)(=O)CC)C=C1', 'CCS(=O)(=O)C1=CC=C(CC(=O)NC2=CC=C3N(CCC4=CC=C(C=C4)C(F)(F)F)C=CC3=C2)C=C1', 'CCS(=O)(=O)C1=CC=C(CC(=O)NC2=CC=C3N(CCC4=C(Cl)C=C(Cl)C=C4)C=CC3=C2)C=C1', 'CCS(=O)(=O)C1=CC=C(CC(=O)NC2=CC(=C(OC3=CC=CC(F)=C3)C=C2)C2=CC=CC=C2)C=C1', 'CNS(=O)(=O)C1=CC=C(CC(=O)NC2=CC(=C(OC3=CC(F)=CC(F)=C3)C=C2)C2=CC=CC(F)=C2)C=C1', 'CCC1=CC=C(C=C1)N(CC(C)C)S(=O)(=O)C2=CC(=C(C=C2)OCC3CCOCC3)CO', 'CC(C)CN(C1=CC=C(Cl)C=C1)S(=O)(=O)C1=CC=C(OCC2=C(C)ON=C2C)C=C1', 'CC(C)CN(C1=CC=C(C)C=C1C)S(=O)(=O)C1=CC=C(OCC(O)C2=CC=NC=C2)C=C1', 'CC(C)CN(CC1=CC=C(S1)C1=CC=C(C=C1)S(C)(=O)=O)S(=O)(=O)CC1=CC=CC=C1', 'CC(C)CN(CC1=CC=C(C=C1)N1CCN(CC1)C(C)=O)S(=O)(=O)CC1=CC=CC=C1', 'CCN1C(=O)C2=CC=CC3=C(C=CC1=C23)S(=O)(=O)NC1=CC(OC)=C(OC)C=C1', 'CC(C)CC1=CC(=NO1)C1=C(C2CC2)C(=NO1)C(CCC(O)=O)CC(=O)NC1=CC=C(C)C=C1Cl'] compounds_activity = ['RORγ agonist', 'RORγ agonist', 'RORγ agonist','RORγ inverse agonist','RORγ inverse agonist','RORα/γ inverse agonist', 'RORα/γ inverse agonist', 'RORα/γ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ agonist', 'RORγ agonist', 'RORγ agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ inverse agonist', 'RORγ antagonist'] len(list_of_compounds_names) len(list_of_compounds_smiles) len(compounds_activity) list_of_compounds_names list_of_compounds_smiles def print_name_and_structure(name, structure): print(list_of_compounds_names[name]) mol = Chem.MolFromSmiles(list_of_compounds_smiles[structure]) return mol print_name_and_structure(int(input("Number of name")), int(input("Number of structure"))) def SELFIES_length(SELFIES_mol): length_selfies = [] try: length_selfies.append(SELFIES_mol.count('[')) except: print('Something went wrong, check source code...') return max(length_selfies) ``` ## Create dataframe from RORgamma drugs ``` dataframe = pd.DataFrame(data=list_of_compounds_names, columns=['Compound name']) mols = [Chem.MolFromSmiles(smi) for smi in list_of_compounds_smiles] dataframe['SMILES_canonical'] = [Chem.MolToSmiles(mol, isomericSmiles=False) for mol in mols] dataframe['SELFIES'] = [sf.encoder(smiles) for smiles in dataframe['SMILES_canonical']] dataframe['Activity type'] = compounds_activity dataframe['SELFIES_length'] = [SELFIES_length(SELFIES) for SELFIES in dataframe['SELFIES']] dataframe['SELFIES_length'] dataframe.to_excel('RORgamma_active_compounds.xlsx') ```
github_jupyter
# Python: ## basic features https://www.python.org/ ``` print("Hello, World!") a = 5 b = 2 a + b 1 + a * b a ** b # different in python 3: a//b # for same behaviour run: from __future__ import division a / b a / float(b) a % b min(a, b) a == b a != b a += 3 a # Python Lists a = [1, "hello", 5.5] a len(a) a[2] a.append("how are you?") a for x in a: print(x) for i, x in enumerate(a): print("element {}: {}".format(i, x)) a[0] = 10 a # Python Tuples: b = (-1, "bye", 'c') b b[-1] b[0] = 10 b x, y = b x y # Python Dictionaries (Keys, values) a = {"name":"Mary", "age":23, "sign":"capricorn"} a a[1] a["job"] = "student" a # Python Funtions def f(a, b=4, c=5): if a > 2 and b < 10: return a elif c == 5: return b else: return a + b + c f(4) f(4, 11) f(4, c=6, b=11) ``` # NumPy: multi-dimensional arrays and scientific computing https://www.numpy.org/ ``` import numpy as np a = np.array([0, 2, 4, 6, 8, 10, 12, 14, 16]) a a.ndim a.shape a[2] a[2:] a[:4] a[2:7] a[2:7:2] a[-1] a[::-1] a[[0, 4, 5]] b = a > 3 b a[b] a = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]) a a.ndim a.shape a[1, 2] a[0] a[:, 1:3] a.T a + 10 a ** 2 a * [10, 20, 30, 40] np.sin(a) np.mean(a) a.mean(axis=1) np.max(a) np.max(a, axis=1) np.arange(10) np.linspace(2, 4, 5) np.zeros((2, 3)) np.full((2, 3), 2.5) ``` # matplotlib: plotting https://matplotlib.org/ ``` import matplotlib.pyplot as plt #%matplotlib notebook %matplotlib inline x = np.linspace(-5, 5, 50) y = np.sin(x) y2 = y ** 2 y3 = -x / 5 plt.figure() plt.plot(x, y, label='sin') plt.plot(x, y2, '.', label='$\sin^{2}$') plt.plot(x, y3, linewidth=3) plt.annotate('example text', xy=(0.5, -0.75)) plt.xlabel("X axis") plt.ylabel("Y axis") plt.title("Example plot") plt.legend() plt.show() fig, ax = plt.subplots(2, sharex=True) ax[0].plot(x, y) ax[1].plot(x, y2) ax[1].set_ylabel('y axis') plt.show() y, x = np.mgrid[0:20, 0:30] z = (x - 4)**2+ y**2 plt.figure() plt.pcolormesh(x, y, z, shading='auto') plt.show() ``` # SciPy: extra modules for scientific computation https://www.scipy.org/ ``` from scipy.optimize import curve_fit import numpy as np import matplotlib.pyplot as plt def f(x, a, b, c): return a * np.exp(-b * x) + c n = 60 x = np.linspace(0, 5, n) y = f(x, 5, 2, 0.5) + 2 * np.random.rand(n) popt, pcov = curve_fit(f, x, y) perr = np.sqrt(np.diag(pcov)) y_fit = f(x, *popt) msd = np.sum((y - y_fit) ** 2) / n pnames = ['a', 'b', 'c'] results = '' for name, value, error in zip(pnames, popt, perr): results += '{} = {:.2f}$\pm${:.2f}\n'.format(name, value, error) results += 'MSD = {:.2f}'.format(msd) plt.plot(x, y, '.', label='data') plt.plot(x, y_fit, label='fit: $ae^{-bx} + c$') plt.annotate(results, xy=(0.7, 0.55), xycoords='axes fraction') plt.legend() plt.show() %run langmuir_fit.py ```
github_jupyter
# Compare tangential shear profiles from the extragalactic and object catalogs for DC2 Run 2.1i This notebook can be run at NERSC or CC-IN2P3 where the DESC DC2 products are stored. You need to be a DESC member to be able to access those. The DC2 catalog-related imports below (`FoFCatalogMatching`, `GCR` and `GCRCatalogs`) are readily available from the `desc` conda environement at NERC or CC-IN2P3. If working outside such environment, these packagea first need to be installed. This was put together using: - the DC2 analysis tutorials (in particular [matching_fof.ipynb](https://github.com/LSSTDESC/DC2-analysis/blob/master/tutorials/matching_fof.ipynb) and [object_gcr_2_lensing_cuts.ipynb](https://github.com/LSSTDESC/DC2-analysis/blob/master/tutorials/object_gcr_2_lensing_cuts.ipynb)) - the CLMM usage examples ``` # General imports import numpy as np import matplotlib.pyplot as plt %matplotlib inline from astropy.table import Table # DC2 catalog-related imports import FoFCatalogMatching import GCRCatalogs from GCR import GCRQuery #CLMM imports try: import clmm except: import notebook_install notebook_install.install_clmm_pipeline(upgrade=False) import clmm ``` ### 1. Load the catalogs - DC2 object catalog - DC2 extragalactic catalog (cosmoDC2) ``` object_cat = GCRCatalogs.load_catalog('dc2_object_run2.1i_dr1') extragalactic_cat = GCRCatalogs.load_catalog('cosmoDC2_v1.1.4_small') ``` ### 2. Identify one halo in the extragalactic catalog Choosing the most massive one below z = 0.4. The `halo_mass` field of the cosmoDC2 catalog gives the mass in units of M$_{\odot}$. ``` # get list of massive halos in a given redshift and mass range mmin = 5.e14 #Msun zmax = 0.4 massive_halos = extragalactic_cat.get_quantities(['halo_mass','hostHaloMass','redshift','ra', 'dec', 'halo_id'],\ filters=[f'halo_mass > {mmin}','is_central==True', f'redshift<{zmax}']) N_cl = len(massive_halos['halo_mass']) print(f'There are {N_cl} clusters in that mass and redshift ranges') # Selecting the most massive one select = massive_halos['halo_mass'] == np.max(massive_halos['halo_mass']) ra_cl = massive_halos['ra'][select][0] dec_cl = massive_halos['dec'][select][0] z_cl = massive_halos['redshift'][select][0] mass_cl =massive_halos['halo_mass'][select][0] id_cl = massive_halos['halo_id'][select][0] print (f'The most massive cluster is halo {id_cl}, in ra = {ra_cl:.2f} deg, dec = {dec_cl:.2f} deg, z = {z_cl:.2f}, with mass = {mass_cl:.2e} Msun') ``` ### 3. Selection of background galaxies around the cluster - Define cuts on the cosmoDC2 and object catalogs. - Box of 0.7 deg around the cluster center - Galaxies with z > z_cluster + 0.1 - Galaxies with mag_i < 24.5 - We also add some WL quality cuts for the object catalog. - The two catalogs will then be matched to end up with the same selection of galaxies. #### 3.1 Cut definition NB: the object catalog quality cuts follow that given in the [object_gcr_2_lensing_cuts.ipynb](https://github.com/LSSTDESC/DC2-analysis/blob/master/tutorials/object_gcr_2_lensing_cuts.ipynb) notebook. ``` # Coordinate filter to be applied applied to both extragalactic and object catalog ra_min, ra_max = ra_cl-0.35, ra_cl+0.35 dec_min, dec_max = dec_cl-0.35, dec_cl+0.35 coord_filters = [ f'ra >= {ra_min}', f'ra < {ra_max}', f'dec >= {dec_min}', f'dec < {dec_max}', ] # Redshift cut to be applied to the extragalactic catalog. The object catalog does not have redshift information. z_min = z_cl + 0.1 redshift_filters = [ (np.isfinite, 'redshift'), f'redshift > {z_min}', ] # Magnitude cut to be applied to both catalogs mag_filters = [ (np.isfinite, 'mag_i'), 'mag_i < 24.5', ] # Following DC2 tutorials, basics cuts to be applied to the object catalog object_basic_cuts = [ GCRQuery('extendedness > 0'), # Extended objects GCRQuery((np.isfinite, 'mag_i')), # Select objects that have i-band magnitudes GCRQuery('clean'), # The source has no flagged pixels (interpolated, saturated, edge, clipped...) # and was not skipped by the deblender GCRQuery('xy_flag == 0'), # Flag for centroid measurement (0 if OK) GCRQuery('ext_shapeHSM_HsmShapeRegauss_flag == 0'), # Flag returned by shape measurement code (0 if OK) GCRQuery((np.isfinite, 'ext_shapeHSM_HsmShapeRegauss_sigma')), # Shape measurement uncertainty should not be NaN ] # Adding the total ellipticity quantity to the object catalog object_cat.add_quantity_modifier('shape_hsm_regauss_etot', (np.hypot, 'ext_shapeHSM_HsmShapeRegauss_e1', 'ext_shapeHSM_HsmShapeRegauss_e2'), overwrite=True) # Following DC2 tutorials, additional WL quality cuts to be applied to the object catalog object_properties_cuts = [ GCRQuery('snr_i_cModel > 10'), # SNR > 10 GCRQuery('mag_i_cModel < 24.5'), # cModel imag brighter than 24.5 GCRQuery('ext_shapeHSM_HsmShapeRegauss_resolution >= 0.3'), # Sufficiently resolved galaxies compared to PSF GCRQuery('shape_hsm_regauss_etot < 2'), # Total distortion in reasonable range GCRQuery('ext_shapeHSM_HsmShapeRegauss_sigma <= 0.4'), # Shape measurement errors reasonable GCRQuery('blendedness < 10**(-0.375)') # Avoid spurious detections and those contaminated by blends ] ``` #### 3.2 Load quantities from both catalogs, given the cuts defined above ``` extragal_data = extragalactic_cat.get_quantities(['ra', 'dec', 'shear_1', 'shear_2', 'ellipticity_1_true', 'ellipticity_2_true', 'redshift', 'convergence', 'galaxy_id'], filters=(coord_filters + mag_filters + redshift_filters)) ``` For the object catalog below, the field under scrutiny falls in tract 3448. A DM-stack installation is required to identify a tract given a set of coordinates (this was done separately from this notebook). In any case, specifying that tract using `native_filters` speeds up the process but is not required. ``` object_data = object_cat.get_quantities(['ra', 'dec', 'ext_shapeHSM_HsmShapeRegauss_e1','ext_shapeHSM_HsmShapeRegauss_e2', 'id'], native_filters=['tract == 3448'], filters=(coord_filters + object_basic_cuts + object_properties_cuts)) ``` ### 4. Match the 2 catalogs Using the `FoFCatalogMatching` method; this was examplified in the [DC2 analysis tutorial](https://github.com/LSSTDESC/DC2-analysis/blob/master/tutorials/matching_fof.ipynb) and adapted to our purpose here. As mentioned in the tutorial, *`FoFCatalogMatching.match` takes a dictionary of catalogs to match and a friends-of-friends linking length. Because the "catalog" is not an astropy table or pandas dataframe, `len(truth_coord)` won't give the actual length of the table so we need to specify `catalog_len_getter` so that the code knows how to get the length of the catalog.* NB: `linking_lengths` is in arcsec. Here, we ask `FoFCatalogMatching` to use a linking length of 1 arcsec. #### 4.1 Perform the matching ``` results = FoFCatalogMatching.match( catalog_dict={'extragal': extragal_data, 'object': object_data}, linking_lengths=1., catalog_len_getter=lambda x: len(x['ra']), ) # identify which rows are from the extragalactic catalog and which are from the object extragal_mask = results['catalog_key'] == 'extragal' object_mask = ~extragal_mask # np.bincount will give up the number of id occurrences (like histogram but with integer input) n_groups = results['group_id'].max() + 1 n_extragal = np.bincount(results['group_id'][extragal_mask], minlength=n_groups) n_object = np.bincount(results['group_id'][object_mask], minlength=n_groups) ``` #### 4.2 Identify one-to-one extragal/object matches ``` one_to_one_group_mask = np.in1d(results['group_id'], np.flatnonzero((n_extragal == 1) & (n_object == 1))) # Row indices in the *original* extragal/object catalogs for those 1-to-1 groups extragal_idx = results['row_index'][one_to_one_group_mask & extragal_mask] object_idx = results['row_index'][one_to_one_group_mask & object_mask] print(f'Number of 1-to-1 matched objects: {len(extragal_idx)}, {len(object_idx)}') ``` ### 5. Computes the reduced tangential shear profiles from both datasets, using CLMM #### 5.1 First, dealing with the cosmoDC2 data. To measure a reduced tangential shear profile, the shape measurements must be made according to the $\epsilon$ or reduced shear definition $g$. So first , we convert cosmoDC2 `shear1` and `shear2` quantities to reduced shear using the `convergence`. These become the `e1` and `e2` fields of the CLMM cluster galaxy catalog. ``` e1, e2 = clmm.utils.convert_shapes_to_epsilon(extragal_data['shear_1'][extragal_idx],extragal_data['shear_2'][extragal_idx], shape_definition='shear',kappa=extragal_data['convergence'][extragal_idx]) # Create the background galaxy catalog as a CLMM GCData (= astropy table) dat = clmm.GCData([extragal_data['ra'][extragal_idx],extragal_data['dec'][extragal_idx],e1, e2,extragal_data['redshift'][extragal_idx],extragal_data['galaxy_id'][extragal_idx]], names=('ra','dec', 'e1', 'e2', 'z','id')) # Instantiate a CLMM cluster object and save it for later use. cl_from_cosmoDC2 = clmm.GalaxyCluster(str(id_cl), ra_cl, dec_cl, z_cl, dat) cl_from_cosmoDC2.save('cosmoDC2_GC.pkl') ``` #### 5.2 Second, doing the same for the DC2 object catalog In the object catalog, shapes are measured by `shapeHSM` which return ellipticities according to the $\chi$ definition. Need to convert to the $\epsilon$ definition, once again using the conversion helper function from CLMM. ``` e1, e2 = clmm.utils.convert_shapes_to_epsilon(object_data['ext_shapeHSM_HsmShapeRegauss_e1'][object_idx], object_data['ext_shapeHSM_HsmShapeRegauss_e2'][object_idx], shape_definition='chi') # The conversion may create NaN, so avoid these by creating a mask mask = np.isfinite(e1) ``` The object catalog has no redshift information so we'll use the redshift of the matched galaxies in cosmoDC2 to create the GalaxyCluster object. ``` # Create the background galaxy catalog as a CLMM GCData (= astropy table) dat = clmm.GCData([object_data['ra'][object_idx][mask],object_data['dec'][object_idx][mask], e1[mask], e2[mask], extragal_data['redshift'][extragal_idx][mask], object_data['id'][object_idx][mask]], names=('ra','dec', 'e1', 'e2', 'z','id'), masked=True) # Create the background galaxy catalog as astropy table and save it for later use cl_from_objectDC2 = clmm.GalaxyCluster(str(id_cl), ra_cl, dec_cl, z_cl, dat) cl_from_objectDC2.save('objectDC2_GC.pkl') ``` #### 5.3 Build the reduced tangential shear profile from both datasets ``` cl_from_objectDC2 = clmm.GalaxyCluster.load('objectDC2_GC.pkl') cl_from_cosmoDC2 = clmm.GalaxyCluster.load('cosmoDC2_GC.pkl') dc2_cosmo = extragalactic_cat.cosmology cosmo = clmm.Cosmology(H0 = dc2_cosmo.H0, Omega_dm0 = dc2_cosmo.Om0-dc2_cosmo.Ob0, Omega_b0 = dc2_cosmo.Ob0) bin_edges = clmm.dataops.make_bins(0.15, 4, 10, method='evenlog10width') cl_from_cosmoDC2.compute_tangential_and_cross_components(geometry="flat") profile_from_cosmoDC2 = cl_from_cosmoDC2.make_radial_profile("Mpc", bins=bin_edges,cosmo=cosmo) cl_from_objectDC2.compute_tangential_and_cross_components(geometry="flat") profile_from_objectDC2 = cl_from_objectDC2.make_radial_profile("Mpc", bins=bin_edges,cosmo=cosmo) ``` #### 5.4 Taking into account intrinsic ellipticities from cosmoDC2 So far, we've used the `shear1` and `shear2` fields of cosmoDC2, i.e., we neglected the intrinsic ellipticities of the galaxies. To account for shape noise from intrinsic ellipticities, we can use the shears and unlensed ellipticities available in the cosmoDC2 catalog to build lensed ellipticities (this is done using the `compute_lensed_ellipticity` function available in CLMM - see the documentation for details). The latter can then be used to bluid a CLMM cluster object. The resulting tangential shear profile will then include shape noise. ``` es1 = extragal_data['ellipticity_1_true'] es2 = extragal_data['ellipticity_2_true'] gamma1 = extragal_data['shear_1'] gamma2 = extragal_data['shear_2'] kappa = extragal_data['convergence'] extragal_data['ellipticity_1'] = clmm.utils.compute_lensed_ellipticity(es1, es2, gamma1, gamma2, kappa)[0] extragal_data['ellipticity_2'] = clmm.utils.compute_lensed_ellipticity(es1, es2, gamma1, gamma2, kappa)[1] ``` Make a new CLMM cluster object ``` dat = clmm.GCData([extragal_data['ra'][extragal_idx],extragal_data['dec'][extragal_idx], extragal_data['ellipticity_1'][extragal_idx], extragal_data['ellipticity_2'][extragal_idx], extragal_data['redshift'][extragal_idx], extragal_data['galaxy_id'][extragal_idx]], names=('ra','dec', 'e1', 'e2', 'z','id')) cl_from_cosmoDC2_with_e1e2 = clmm.GalaxyCluster(str(id_cl), ra_cl, dec_cl, z_cl, dat) ``` Compute the reduced shear profile ``` cl_from_cosmoDC2_with_e1e2.compute_tangential_and_cross_components(geometry="flat") profile_from_cosmoDC2_with_e1e2 = cl_from_cosmoDC2_with_e1e2.make_radial_profile("Mpc", bins=bin_edges,cosmo=cosmo) ``` ### 6. Visualize the results for the three profiles, obtained from the same galaxies in the two catalogs - from cosmoDC2, neglecting shape noise (blue points) - from cosmoDC2, including shape noise (orange) - for the DC2 object catalog (green, where the galaxies redshifts taken from cosmoDC2) ``` plt.errorbar(profile_from_cosmoDC2['radius'],profile_from_cosmoDC2['gt'],profile_from_cosmoDC2['gt_err'], marker='o',label='from cosmoDC2 g1g2') plt.errorbar(profile_from_cosmoDC2_with_e1e2['radius'],profile_from_cosmoDC2_with_e1e2['gt'], profile_from_cosmoDC2['gt_err'],label='from cosmoDC2 e1e2') plt.errorbar(profile_from_objectDC2['radius'],profile_from_objectDC2['gt'],profile_from_objectDC2['gt_err'], label='from DC2 objects e1e2') plt.legend() plt.xscale('log') plt.yscale('log') plt.xlabel('R (Mpc)') plt.ylabel(r'$\langle g_t \rangle$') plt.ylim([2.e-3,0.3]) ``` From cosmoDC2 (orange and blue profiles above), we see the impact of shape noise at low radii (orange/blue =w/wo intrinsic ellipticities), where the number of galaxies per bin is small (see below). The error bars on the data computed by `make_shear_profile` simply corresponds to the standard error of the mean in the bin ($\sigma_{\rm bin}/\sqrt{N_{\rm gal\_in\_bin}}$). The errors on individual shape measurements on the DC2 object catalog have been negelected. ``` plt.scatter(profile_from_cosmoDC2['radius'], profile_from_cosmoDC2['n_src'], marker='o') [plt.axvline(x=r, ymin=0, ymax=1e3, color='k', linestyle=':') for r in profile_from_cosmoDC2['radius_min']] plt.ylabel('Ngal in the bin') plt.xlabel('R (Mpc)') plt.xscale('log') plt.yscale('log') plt.title('Number of galaxies in each bin') ```
github_jupyter
# User Study ``` import pandas as pd import numpy as np import math import time eval_dir = "gc_imdb" # df = pd.read_csv("../data/" + eval_dir + "/test.csv", header=None, sep="\t", names=[0, 1, "mutant", "template", "gender", "label", "country"]) df = pd.read_csv("../data/" + eval_dir + "/test.csv", header=None, sep="\t", names=["label", "mutant", "template", "original", "identifier", "type", "gender", "country"]) df def read_txt(fpath): pred = [] file = open(fpath) lines = file.readlines() for l in lines : pred.append(int(l)) file.close() return pred output_dir = "gc_imdb" result_dir = "../result/" + output_dir + "/" path = result_dir + "results_data.txt" pred = read_txt(path) print(len(pred)) df["prediction"] = pred df df[df["label"] == 0].reset_index(drop=True)["original"][0] ``` ### Use Groupby to Group the text by Template ``` df["template"] = df["template"].astype("category") df["template_id"] = df["template"].cat.codes gb = df.groupby("template_id") gb.count() len(gb.size()) df ``` ### Get DF template only ``` dft = df.iloc[:,[2,3,9]] dft = dft.drop_duplicates() dft ## template dft = dft.sort_values(by=["template_id"]) dft = dft.reset_index(drop=True) ## mutant df = df.reset_index(drop=True) df dft dft.to_csv("gender-template.csv") ``` ## Get Number of Discordant Pairs for Each Template There is a memory limitation that make us can't directly produce +- 240M pairs. Fortunately, the number of discordant pairs for each template can be calculate theoritically without crossing th data to get +- 240M pairs. This will solve the memory issue. For each template, we will give an example of the male mutant and female mutant for user study ``` gb = df.groupby("template_id") gb.count() ``` ### Data crossing ``` import time start = time.time() identifier = "gender" mutant_example = [] mutant_prediction_stat = [] key = [] for i in range(len(gb.size())) : # for i in range(10) : data = gb.get_group(i) dc = data.groupby(identifier) me = {} # mutant example mp = {} # mutant prediction key = [] for k, v in dict(iter(dc)).items() : key.append(k) is_first_instance = True pos_counter = 0 # positive counter neg_counter = 0 # negative counter for m, p in zip(v["mutant"].values, v["prediction"].values) : if is_first_instance : me[k] = m is_first_instance = False if p == 1 : pos_counter += 1 else : neg_counter += 1 mp[k] = {"pos": pos_counter, "neg" : neg_counter} mutant_example.append(me) mutant_prediction_stat.append(mp) end = time.time() print("Execution time: ", end-start) dft["mutant_example"] = mutant_example dft["mutant_prediction_stat"] = mutant_prediction_stat dft key btcs = [] pairs = [] for mp in dft["mutant_prediction_stat"].values : if len(mp) > 0 : btc = 0 pair = 0 already_processed = [] for k1 in key : for k2 in key : if k1 != k2 : k = k1 + "-" + k2 if k1 > k2 : k = k2 + "-" + k1 if k not in already_processed : already_processed.append(k) btc += ((mp[k1]["pos"] * mp[k2]["neg"]) + (mp[k1]["neg"] * mp[k2]["pos"])) pair += (mp[k1]["pos"] + mp[k1]["neg"]) * (mp[k2]["pos"] + mp[k2]["neg"]) # double_counting_divider = len(key) * (len(key)-1) # dp.append(int(_dp/double_counting_divider)) # we must divide the number with the number of key to reduce the double counting btcs.append(btc) pairs.append(pair) else : btcs.append(0) pairs.append(0) dft["btc"] = btcs dft["possible_pair"] = pairs dft ``` ### Number of Bias-uncvering Test Case ``` int(dft["btc"].sum()) ``` ### BTC Rate ``` dft["btc"].sum() / dft["possible_pair"].sum() ``` ### Get Data that Have number of BTC more than one ``` d = dft[dft["btc"] > 0] d.head() ``` ### Sort Data based on the number of BTC ``` d = d.sort_values(["btc", "template"], ascending=False) d = d.reset_index(drop=True) d d.to_csv("occ-age/gender-btc.csv") d.iloc[0]["mutant_prediction_stat"] d.groupby("template_id").get_group(2760).iloc[0]["mutant_prediction_stat"] # d.groupby("template_id").get_group(2760).iloc[0]["mutant_example"] # d.groupby("template_id").get_group(2760).iloc[0]["template"] ``` ### Get Data BTC for train and test ``` df template_that_produce_btc = d["template_id"].tolist() # template_that_produce_btc start = time.time() mutant_text_1 = [] mutant_text_2 = [] prediction_1 = [] prediction_2 = [] identifier_1 = [] identifier_2 = [] template = [] label = [] for i in template_that_produce_btc: # only processing from template that produce BTC data = gb.get_group(i) dc = data.groupby(identifier) already_processed = [] for k1, v1 in dict(iter(dc)).items() : for k2, v2 in dict(iter(dc)).items() : if k1 != k2 : key = k1 + "-" + k2 if k1 > k2 : key = k2 + "-" + k1 if key not in already_processed : already_processed.append(key) for m_1, p_1, i_1, t, l in zip(v1["mutant"].values, v1["prediction"].values, v1[identifier].values, v1["template"].values, v1["label"].values) : for m_2, p_2, i_2 in zip(v2["mutant"].values, v2["prediction"].values, v2[identifier].values) : if p_1 != p_2 : # only add discordant pairs mutant_text_1.append(m_1) prediction_1.append(p_1) identifier_1.append(i_1) mutant_text_2.append(m_2) prediction_2.append(p_2) identifier_2.append(i_2) template.append(t) label.append(l) end = time.time() print("Execution time: ", end-start) btc = pd.DataFrame(data={"mutant_1" : mutant_text_1, "mutant_2" : mutant_text_2, "prediction_1": prediction_1, "prediction_2" : prediction_2, "identifier_1": identifier_1, "identifier_2" : identifier_2, "template": template, "label": label}) btc import os data_dir = "../data/rq2/biasfinder_btc/" if not os.path.exists(data_dir) : os.makedirs(data_dir) train = btc.sample(frac=1, random_state=123) train.to_csv(data_dir + "train.csv", index=None, header=None, sep="\t") data_dir = "../data/rq2/biasfinder_btc/" if not os.path.exists(data_dir) : os.makedirs(data_dir) train.to_csv(data_dir + "test.csv", index=None, header=None, sep="\t") ```
github_jupyter
# SageMaker Debugger Profiling Report SageMaker Debugger auto generated this report. You can generate similar reports on all supported training jobs. The report provides summary of training job, system resource usage statistics, framework metrics, rules summary, and detailed analysis from each rule. The graphs and tables are interactive. **Legal disclaimer:** This report and any recommendations are provided for informational purposes only and are not definitive. You are responsible for making your own independent assessment of the information. ``` import json import pandas as pd import glob import matplotlib.pyplot as plt import numpy as np import datetime from smdebug.profiler.utils import us_since_epoch_to_human_readable_time, ns_since_epoch_to_human_readable_time import bokeh from bokeh.io import output_notebook, show from bokeh.layouts import column, row from bokeh.plotting import figure from bokeh.models.widgets import DataTable, DateFormatter, TableColumn from bokeh.models import ColumnDataSource, PreText from math import pi from bokeh.transform import cumsum import warnings from bokeh.models.widgets import Paragraph from bokeh.models import Legend from bokeh.util.warnings import BokehDeprecationWarning, BokehUserWarning warnings.simplefilter('ignore', BokehDeprecationWarning) warnings.simplefilter('ignore', BokehUserWarning) output_notebook(hide_banner=True) def create_piechart(data_dict, title=None, height=400, width=400, x1=0, x2=0.1, radius=0.4, toolbar_location='right'): plot = figure(plot_height=height, plot_width=width, toolbar_location=toolbar_location, tools="hover,wheel_zoom,reset,pan", tooltips="@phase:@value", title=title, x_range=(-radius-x1, radius+x2)) data = pd.Series(data_dict).reset_index(name='value').rename(columns={'index':'phase'}) data['angle'] = data['value']/data['value'].sum() * 2*pi data['color'] = bokeh.palettes.viridis(len(data_dict)) plot.wedge(x=0, y=0., radius=radius, start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'), line_color="white", source=data, fill_color='color', legend='phase' ) plot.legend.label_text_font_size = "8pt" plot.legend.location = 'center_right' plot.axis.axis_label=None plot.axis.visible=False plot.grid.grid_line_color = None plot.outline_line_color = "white" return plot from IPython.display import display, HTML, Markdown, Image def pretty_print(df): raw_html = df.to_html().replace("\\n","<br>").replace('<tr>','<tr style="text-align: left;">') return display(HTML(raw_html)) ``` ## Training job summary ``` def load_report(rule_name): try: report = json.load(open('/opt/ml/processing/output/rule/profiler-output/profiler-reports/'+rule_name+'.json')) return report except FileNotFoundError: print (rule_name + ' not triggered') job_statistics = {} report = load_report('MaxInitializationTime') if report: if "first" in report['Details']["step_num"] and "last" in report['Details']["step_num"]: first_step = report['Details']["step_num"]["first"] last_step = report['Details']["step_num"]["last"] tmp = us_since_epoch_to_human_readable_time(report['Details']['job_start'] * 1000000) date = datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S:%f') day = date.date().strftime("%m/%d/%Y") hour = date.time().strftime("%H:%M:%S") job_statistics["Start time"] = f"{hour} {day}" tmp = us_since_epoch_to_human_readable_time(report['Details']['job_end'] * 1000000) date = datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S:%f') day = date.date().strftime("%m/%d/%Y") hour = date.time().strftime("%H:%M:%S") job_statistics["End time"] = f"{hour} {day}" job_duration_in_seconds = int(report['Details']['job_end'] - report['Details']['job_start']) job_statistics["Job duration"] = f"{job_duration_in_seconds} seconds" if "first" in report['Details']["step_num"] and "last" in report['Details']["step_num"]: tmp = us_since_epoch_to_human_readable_time(first_step) date = datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S:%f') day = date.date().strftime("%m/%d/%Y") hour = date.time().strftime("%H:%M:%S") job_statistics["Training loop start"] = f"{hour} {day}" tmp = us_since_epoch_to_human_readable_time(last_step) date = datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S:%f') day = date.date().strftime("%m/%d/%Y") hour = date.time().strftime("%H:%M:%S") job_statistics["Training loop end"] = f"{hour} {day}" training_loop_duration_in_seconds = int((last_step - first_step) / 1000000) job_statistics["Training loop duration"] = f"{training_loop_duration_in_seconds} seconds" initialization_in_seconds = int(first_step/1000000 - report['Details']['job_start']) job_statistics["Initialization time"] = f"{initialization_in_seconds} seconds" finalization_in_seconds = int(np.abs(report['Details']['job_end'] - last_step/1000000)) job_statistics["Finalization time"] = f"{finalization_in_seconds} seconds" initialization_perc = int(initialization_in_seconds / job_duration_in_seconds * 100) job_statistics["Initialization"] = f"{initialization_perc} %" training_loop_perc = int(training_loop_duration_in_seconds / job_duration_in_seconds * 100) job_statistics["Training loop"] = f"{training_loop_perc} %" finalization_perc = int(finalization_in_seconds / job_duration_in_seconds * 100) job_statistics["Finalization"] = f"{finalization_perc} %" if report: text = """The following table gives a summary about the training job. The table includes information about when the training job started and ended, how much time initialization, training loop and finalization took.""" if len(job_statistics) > 0: df = pd.DataFrame.from_dict(job_statistics, orient='index') start_time = us_since_epoch_to_human_readable_time(report['Details']['job_start'] * 1000000) date = datetime.datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S:%f') day = date.date().strftime("%m/%d/%Y") hour = date.time().strftime("%H:%M:%S") duration = job_duration_in_seconds text = f"""{text} \n Your training job started on {day} at {hour} and ran for {duration} seconds.""" #pretty_print(df) if "first" in report['Details']["step_num"] and "last" in report['Details']["step_num"]: if finalization_perc < 0: job_statistics["Finalization%"] = 0 if training_loop_perc < 0: job_statistics["Training loop"] = 0 if initialization_perc < 0: job_statistics["Initialization"] = 0 else: text = f"""{text} \n Your training job started on {day} at {hour} and ran for {duration} seconds.""" if len(job_statistics) > 0: df2 = df.reset_index() df2.columns = ["0", "1"] source = ColumnDataSource(data=df2) columns = [TableColumn(field='0', title=""), TableColumn(field='1', title="Job Statistics"),] table = DataTable(source=source, columns=columns, width=450, height=380) plot = None if "Initialization" in job_statistics: piechart_data = {} piechart_data["Initialization"] = initialization_perc piechart_data["Training loop"] = training_loop_perc piechart_data["Finalization"] = finalization_perc plot = create_piechart(piechart_data, height=350, width=500, x1=0.15, x2=0.15, radius=0.15, toolbar_location=None) if plot != None: paragraph = Paragraph(text=f"""{text}""", width = 800) show(column(paragraph, row(table, plot))) else: paragraph = Paragraph(text=f"""{text}. No step information was profiled from your training job. The time spent on initialization and finalization cannot be computed.""" , width = 800) show(column(paragraph, row(table))) ``` ## System usage statistics ``` report = load_report('OverallSystemUsage') text1 = '' if report: if "GPU" in report["Details"]: for node_id in report["Details"]["GPU"]: gpu_p95 = report["Details"]["GPU"][node_id]["p95"] gpu_p50 = report["Details"]["GPU"][node_id]["p50"] cpu_p95 = report["Details"]["CPU"][node_id]["p95"] cpu_p50 = report["Details"]["CPU"][node_id]["p50"] if gpu_p95 < 70 and cpu_p95 < 70: text1 = f"""{text1}The 95th percentile of the total GPU utilization on node {node_id} is only {int(gpu_p95)}%. The 95th percentile of the total CPU utilization is only {int(cpu_p95)}%. Node {node_id} is underutilized. You may want to consider switching to a smaller instance type.""" elif gpu_p95 < 70 and cpu_p95 > 70: text1 = f"""{text1}The 95th percentile of the total GPU utilization on node {node_id} is only {int(gpu_p95)}%. However, the 95th percentile of the total CPU utilization is {int(cpu_p95)}%. GPUs on node {node_id} are underutilized likely because of CPU bottlenecks""" elif gpu_p50 > 70: text1 = f"""{text1}The median total GPU utilization on node {node_id} is {int(gpu_p50)}%. GPUs on node {node_id} are well utilized""" else: text1 = f"""{text1}The median total GPU utilization on node {node_id} is {int(gpu_p50)}%. The median total CPU utilization is {int(cpu_p50)}%.""" else: for node_id in report["Details"]["CPU"]: cpu_p95 = report["Details"]["CPU"][node_id]["p95"] if cpu_p95 > 70: text1 = f"""{text1}The 95th percentile of the total CPU utilization on node {node_id} is {int**(cpu_p95)}%. GPUs on node {node_id} are well utilized""" text1 = Paragraph(text=f"""{text1}""", width=1100) text2 = Paragraph(text=f"""The following table shows statistics of resource utilization per worker (node), such as the total CPU and GPU utilization, and the memory utilization on CPU and GPU. The table also includes the total I/O wait time and the total amount of data sent or received in bytes. The table shows min and max values as well as p99, p90 and p50 percentiles.""", width=900) pd.set_option('display.float_format', lambda x: '%.2f' % x) rows = [] units = {"CPU": "percentage", "CPU memory": "percentage", "GPU": "percentage", "Network": "bytes", "GPU memory": "percentage", "I/O": "percentage"} if report: for metric in report['Details']: for node_id in report['Details'][metric]: values = report['Details'][metric][node_id] rows.append([node_id, metric, units[metric], values['max'], values['p99'], values['p95'], values['p50'], values['min']]) df = pd.DataFrame(rows) df.columns = ['Node', 'metric', 'unit', 'max', 'p99', 'p95', 'p50', 'min'] df2 = df.reset_index() source = ColumnDataSource(data=df2) columns = [TableColumn(field='Node', title="node"), TableColumn(field='metric', title="metric"), TableColumn(field='unit', title="unit"), TableColumn(field='max', title="max"), TableColumn(field='p99', title="p99"), TableColumn(field='p95', title="p95"), TableColumn(field='p50', title="p50"), TableColumn(field='min', title="min"),] table = DataTable(source=source, columns=columns, width=800, height=df2.shape[0]*30) show(column( text1, text2, row(table))) report = load_report('OverallFrameworkMetrics') if report: if 'Details' in report: display(Markdown(f"""## Framework metrics summary""")) plots = [] text = '' if 'phase' in report['Details']: text = f"""The following two pie charts show the time spent on the TRAIN phase, the EVAL phase, and others. The 'others' includes the time spent between steps (after one step has finished and before the next step has started). Ideally, most of the training time should be spent on the TRAIN and EVAL phases. If TRAIN/EVAL were not specified in the training script, steps will be recorded as GLOBAL.""" if 'others' in report['Details']['phase']: others = float(report['Details']['phase']['others']) if others > 25: text = f"""{text} Your training job spent quite a significant amount of time ({round(others,2)}%) in phase "others". You should check what is happening in between the steps.""" plot = create_piechart(report['Details']['phase'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between the time spent on the TRAIN/EVAL phase and others") plots.append(plot) if 'forward_backward' in report['Details']: event = max(report['Details']['forward_backward'], key=report['Details']['forward_backward'].get) perc = report['Details']['forward_backward'][event] text = f"""{text} The pie chart on the right shows a more detailed breakdown. It shows that {int(perc)}% of the time was spent in event "{event}".""" if perc > 70: text = f"""There is quite a significant difference between the time spent on forward and backward pass.""" else: text = f"""{text} It shows that {int(perc)}% of the training time was spent on "{event}".""" plot = create_piechart(report['Details']['forward_backward'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between forward and backward pass") plots.append(plot) if len(plots) > 0: paragraph = Paragraph(text=text, width=1100) show(column(paragraph, row(plots))) plots = [] text='' if 'ratio' in report['Details'] and len(report['Details']['ratio']) > 0: key = list(report['Details']['ratio'].keys())[0] ratio = report['Details']['ratio'][key] text = f"""The following piechart shows a breakdown of the CPU/GPU operators. It shows that {int(ratio)}% of training time was spent on executing the "{key}" operator.""" plot = create_piechart(report['Details']['ratio'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between the time spent on CPU/GPU operators") plots.append(plot) if 'general' in report['Details']: event = max(report['Details']['general'], key=report['Details']['general'].get) perc = report['Details']['general'][event] plot = create_piechart(report['Details']['general'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="General framework operations") plots.append(plot) if len(plots) > 0: paragraph = Paragraph(text=text, width=1100) show(column(paragraph, row(plots))) plots = [] text = '' if 'horovod' in report['Details']: display(Markdown(f"""#### Overview: Horovod metrics""")) event = max(report['Details']['horovod'], key=report['Details']['horovod'].get) perc = report['Details']['horovod'][event] text = f"""{text} The following pie chart shows a detailed breakdown of the Horovod metrics profiled from your training job. The most expensive function was "{event}" with {int(perc)}%.""" plot = create_piechart(report['Details']['horovod'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="Horovod metrics ") paragraph = Paragraph(text=text, width=1100) show(column(paragraph, row(plot))) pd.set_option('display.float_format', lambda x: '%.2f' % x) rows = [] values = [] if report: if 'CPU_total' in report['Details']: display(Markdown(f"""#### Overview: CPU operators""")) event = max(report['Details']['CPU'], key=report['Details']['CPU'].get) perc = report['Details']['CPU'][event] for function in report['Details']['CPU']: percentage = round(report['Details']['CPU'][function],2) time = report['Details']['CPU_total'][function] rows.append([percentage, time, function]) df = pd.DataFrame(rows) df.columns = ['percentage', 'time', 'operator'] df = df.sort_values(by=['percentage'], ascending=False) source = ColumnDataSource(data=df) columns = [TableColumn(field='percentage', title="Percentage"), TableColumn(field='time', title="Cumulative time in microseconds"), TableColumn(field='operator', title="CPU operator"),] table = DataTable(source=source, columns=columns, width=550, height=350) text = Paragraph(text=f"""The following table shows a list of operators that ran on the CPUs. The most expensive operator on the CPUs was "{event}" with {int(perc)} %.""") plot = create_piechart(report['Details']['CPU'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, ) show(column(text, row(table, plot))) pd.set_option('display.float_format', lambda x: '%.2f' % x) rows = [] values = [] if report: if 'GPU_total' in report['Details']: display(Markdown(f"""#### Overview: GPU operators""")) event = max(report['Details']['GPU'], key=report['Details']['GPU'].get) perc = report['Details']['GPU'][event] for function in report['Details']['GPU']: percentage = round(report['Details']['GPU'][function],2) time = report['Details']['GPU_total'][function] rows.append([percentage, time, function]) df = pd.DataFrame(rows) df.columns = ['percentage', 'time', 'operator'] df = df.sort_values(by=['percentage'], ascending=False) source = ColumnDataSource(data=df) columns = [TableColumn(field='percentage', title="Percentage"), TableColumn(field='time', title="Cumulative time in microseconds"), TableColumn(field='operator', title="GPU operator"),] table = DataTable(source=source, columns=columns, width=450, height=350) text = Paragraph(text=f"""The following table shows a list of operators that your training job ran on GPU. The most expensive operator on GPU was "{event}" with {int(perc)} %""") plot = create_piechart(report['Details']['GPU'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, ) show(column(text, row(table, plot))) ``` ## Rules summary ``` description = {} description['CPUBottleneck'] = 'Checks if the CPU utilization is high and the GPU utilization is low. \ It might indicate CPU bottlenecks, where the GPUs are waiting for data to arrive \ from the CPUs. The rule evaluates the CPU and GPU utilization rates, and triggers the issue \ if the time spent on the CPU bottlenecks exceeds a threshold percent of the total training time. The default threshold is 50 percent.' description['IOBottleneck'] = 'Checks if the data I/O wait time is high and the GPU utilization is low. \ It might indicate IO bottlenecks where GPU is waiting for data to arrive from storage. \ The rule evaluates the I/O and GPU utilization rates and triggers the issue \ if the time spent on the IO bottlenecks exceeds a threshold percent of the total training time. The default threshold is 50 percent.' description['Dataloader'] = 'Checks how many data loaders are running in parallel and whether the total number is equal the number \ of available CPU cores. The rule triggers if number is much smaller or larger than the number of available cores. \ If too small, it might lead to low GPU utilization. If too large, it might impact other compute intensive operations on CPU.' description['GPUMemoryIncrease'] = 'Measures the average GPU memory footprint and triggers if there is a large increase.' description['BatchSize'] = 'Checks if GPUs are underutilized because the batch size is too small. \ To detect this problem, the rule analyzes the average GPU memory footprint, \ the CPU and the GPU utilization. ' description['LowGPUUtilization'] = 'Checks if the GPU utilization is low or fluctuating. \ This can happen due to bottlenecks, blocking calls for synchronizations, \ or a small batch size.' description['MaxInitializationTime'] = 'Checks if the time spent on initialization exceeds a threshold percent of the total training time. \ The rule waits until the first step of training loop starts. The initialization can take longer \ if downloading the entire dataset from Amazon S3 in File mode. The default threshold is 20 minutes.' description['LoadBalancing'] = 'Detects workload balancing issues across GPUs. \ Workload imbalance can occur in training jobs with data parallelism. \ The gradients are accumulated on a primary GPU, and this GPU might be overused \ with regard to other GPUs, resulting in reducing the efficiency of data parallelization.' description['StepOutlier'] = 'Detects outliers in step duration. The step duration for forward and backward pass should be \ roughly the same throughout the training. If there are significant outliers, \ it may indicate a system stall or bottleneck issues.' recommendation = {} recommendation['CPUBottleneck'] = 'Consider increasing the number of data loaders \ or applying data pre-fetching.' recommendation['IOBottleneck'] = 'Pre-fetch data or choose different file formats, such as binary formats that \ improve I/O performance.' recommendation['Dataloader'] = 'Change the number of data loader processes.' recommendation['GPUMemoryIncrease'] = 'Choose a larger instance type with more memory if footprint is close to maximum available memory.' recommendation['BatchSize'] = 'The batch size is too small, and GPUs are underutilized. Consider running on a smaller instance type or increasing the batch size.' recommendation['LowGPUUtilization'] = 'Check if there are bottlenecks, minimize blocking calls, \ change distributed training strategy, or increase the batch size.' recommendation['MaxInitializationTime'] = 'Initialization takes too long. \ If using File mode, consider switching to Pipe mode in case you are using TensorFlow framework.' recommendation['LoadBalancing'] = 'Choose a different distributed training strategy or \ a different distributed training framework.' recommendation['StepOutlier'] = 'Check if there are any bottlenecks (CPU, I/O) correlated to the step outliers.' files = glob.glob('/opt/ml/processing/output/rule/profiler-output/profiler-reports/*json') summary = {} for i in files: rule_name = i.split('/')[-1].replace('.json','') if rule_name == "OverallSystemUsage" or rule_name == "OverallFrameworkMetrics": continue rule_report = json.load(open(i)) summary[rule_name] = {} summary[rule_name]['Description'] = description[rule_name] summary[rule_name]['Recommendation'] = recommendation[rule_name] summary[rule_name]['Number of times rule triggered'] = rule_report['RuleTriggered'] #summary[rule_name]['Number of violations'] = rule_report['Violations'] summary[rule_name]['Number of datapoints'] = rule_report['Datapoints'] summary[rule_name]['Rule parameters'] = rule_report['RuleParameters'] df = pd.DataFrame.from_dict(summary, orient='index') df = df.sort_values(by=['Number of times rule triggered'], ascending=False) display(Markdown(f"""The following table shows a profiling summary of the Debugger built-in rules. The table is sorted by the rules that triggered the most frequently. During your training job, the {df.index[0]} rule was the most frequently triggered. It processed {df.values[0,3]} datapoints and was triggered {df.values[0,2]} times.""")) with pd.option_context('display.colheader_justify','left'): pretty_print(df) analyse_phase = "training" if job_statistics and "initialization_in_seconds" in job_statistics: if job_statistics["initialization_in_seconds"] > job_statistics["training_loop_duration_in_seconds"]: analyse_phase = "initialization" time = job_statistics["initialization_in_seconds"] perc = job_statistics["initialization_%"] display(Markdown(f"""The initialization phase took {int(time)} seconds, which is {int(perc)}%* of the total training time. Since the training loop has taken the most time, we dive deep into the events occurring during this phase""")) display(Markdown("""## Analyzing initialization\n\n""")) time = job_statistics["training_loop_duration_in_seconds"] perc = job_statistics["training_loop_%"] display(Markdown(f"""The training loop lasted for {int(time)} seconds which is {int(perc)}% of the training job time. Since the training loop has taken the most time, we dive deep into the events occured during this phase.""")) if analyse_phase == 'training': display(Markdown("""## Analyzing the training loop\n\n""")) if analyse_phase == "initialization": display(Markdown("""### MaxInitializationTime\n\nThis rule helps to detect if the training initialization is taking too much time. \nThe rule waits until first step is available. The rule takes the parameter `threshold` that defines how many minutes to wait for the first step to become available. Default is 20 minutes.\nYou can run the rule locally in the following way: """)) _ = load_report("MaxInitializationTime") if analyse_phase == "training": display(Markdown("""### Step duration analysis""")) report = load_report('StepOutlier') if report: parameters = report['RuleParameters'] params = report['RuleParameters'].split('\n') stddev = params[3].split(':')[1] mode = params[1].split(':')[1] n_outlier = params[2].split(':')[1] triggered = report['RuleTriggered'] datapoints = report['Datapoints'] text = f"""The StepOutlier rule measures step durations and checks for outliers. The rule returns True if duration is larger than {stddev} times the standard deviation. The rule also takes the parameter mode, that specifies whether steps from training or validation phase should be checked. In your processing job mode was specified as {mode}. Typically the first step is taking significantly more time and to avoid the rule triggering immediately, one can use n_outliers to specify the number of outliers to ignore. n_outliers was set to {n_outlier}. The rule analysed {datapoints} datapoints and triggered {triggered} times. """ paragraph = Paragraph(text=text, width=900) show(column(paragraph)) if report and len(report['Details']['step_details']) > 0: for node_id in report['Details']['step_details']: tmp = report['RuleParameters'].split('threshold:') threshold = tmp[1].split('\n')[0] n_outliers = report['Details']['step_details'][node_id]['number_of_outliers'] mean = report['Details']['step_details'][node_id]['step_stats']['mean'] stddev = report['Details']['step_details'][node_id]['stddev'] phase = report['Details']['step_details'][node_id]['phase'] display(Markdown(f"""**Step durations on node {node_id}:**""")) display(Markdown(f"""The following table is a summary of the statistics of step durations measured on node {node_id}. The rule has analyzed the step duration from {phase} phase. The average step duration on node {node_id} was {round(mean, 2)}s. The rule detected {n_outliers} outliers, where step duration was larger than {threshold} times the standard deviation of {stddev}s \n""")) step_stats_df = pd.DataFrame.from_dict(report['Details']['step_details'][node_id]['step_stats'], orient='index').T step_stats_df.index = ['Step Durations in [s]'] pretty_print(step_stats_df) display(Markdown(f"""The following histogram shows the step durations measured on the different nodes. You can turn on or turn off the visualization of histograms by selecting or unselecting the labels in the legend.""")) plot = figure(plot_height=450, plot_width=850, title=f"""Step durations""") colors = bokeh.palettes.viridis(len(report['Details']['step_details'])) for index, node_id in enumerate(report['Details']['step_details']): probs = report['Details']['step_details'][node_id]['probs'] binedges = report['Details']['step_details'][node_id]['binedges'] plot.quad( top=probs, bottom=0, left=binedges[:-1], right=binedges[1:], line_color="white", fill_color=colors[index], fill_alpha=0.7, legend=node_id) plot.add_layout(Legend(), 'right') plot.y_range.start = 0 plot.xaxis.axis_label = f"""Step durations in [s]""" plot.yaxis.axis_label = "Occurrences" plot.grid.grid_line_color = "white" plot.legend.click_policy="hide" plot.legend.location = 'center_right' show(plot) if report['RuleTriggered'] > 0: text=f"""To get a better understanding of what may have caused those outliers, we correlate the timestamps of step outliers with other framework metrics that happened at the same time. The left chart shows how much time was spent in the different framework metrics aggregated by event phase. The chart on the right shows the histogram of normal step durations (without outliers). The following chart shows how much time was spent in the different framework metrics when step outliers occurred. In this chart framework metrics are not aggregated byphase.""" plots = [] if 'phase' in report['Details']: text = f"""{text} The chart (in the middle) shows whether step outliers mainly happened during TRAIN or EVAL phase. """ plot = create_piechart(report['Details']['phase'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between the time spent on the TRAIN/EVAL phase") plots.append(plot) if 'forward_backward' in report['Details'] and len(report['Details']['forward_backward']) > 0: event = max(report['Details']['forward_backward'], key=report['Details']['forward_backward'].get) perc = report['Details']['forward_backward'][event] text = f"""{text} The pie chart on the right shows a detailed breakdown. It shows that {int(perc)}% of the training time was spent on event "{event}".""" plot = create_piechart(report['Details']['forward_backward'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The Ratio between forward and backward pass") plots.append(plot) if len(plots) > 0: paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plots))) plots = [] text = "" if 'ratio' in report['Details'] and len(report['Details']['ratio']) > 0: key = list(report['Details']['ratio'].keys())[0] ratio = report['Details']['ratio'][key] text = f"""The following pie chart shows a breakdown of the CPU/GPU operators executed during the step outliers. It shows that {int(ratio)}% of the training time was spent on executing operators in "{key}".""" plot = create_piechart(report['Details']['ratio'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between CPU/GPU operators") plots.append(plot) if 'general' in report['Details'] and len(report['Details']['general']) > 0: event = max(report['Details']['general'], key=report['Details']['general'].get) perc = report['Details']['general'][event] plot = create_piechart(report['Details']['general'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="General metrics recorded in framework ") plots.append(plot) if len(plots) > 0: paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plots))) plots = [] text = "" if 'horovod' in report['Details'] and len(report['Details']['horovod']) > 0: event = max(report['Details']['horovod'], key=report['Details']['horovod'].get) perc = report['Details']['horovod'][event] text = f"""The following pie chart shows a detailed breakdown of the Horovod metrics that have been recorded when step outliers happened. The most expensive function was {event} with {int(perc)}%""" plot = create_piechart(report['Details']['horovod'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="General metrics recorded in framework ") paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plot))) if analyse_phase == "training": display(Markdown("""### GPU utilization analysis\n\n""")) display(Markdown("""**Usage per GPU** \n\n""")) report = load_report('LowGPUUtilization') if report: params = report['RuleParameters'].split('\n') threshold_p95 = params[0].split(':')[1] threshold_p5 = params[1].split(':')[1] window = params[2].split(':')[1] patience = params[3].split(':')[1] violations = report['Violations'] triggered = report['RuleTriggered'] datapoints = report['Datapoints'] text=Paragraph(text=f"""The LowGPUUtilization rule checks for a low and fluctuating GPU usage. If the GPU usage is consistently low, it might be caused by bottlenecks or a small batch size. If usage is heavily fluctuating, it can be due to bottlenecks or blocking calls. The rule computed the 95th and 5th percentile of GPU utilization on {window} continuous datapoints and found {violations} cases where p95 was above {threshold_p95}% and p5 was below {threshold_p5}%. If p95 is high and p5 is low, it might indicate that the GPU usage is highly fluctuating. If both values are very low, it would mean that the machine is underutilized. During initialization, the GPU usage is likely zero, so the rule skipped the first {patience} data points. The rule analysed {datapoints} datapoints and triggered {triggered} times.""", width=800) show(text) if len(report['Details']) > 0: timestamp = us_since_epoch_to_human_readable_time(report['Details']['last_timestamp']) date = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S:%f') day = date.date().strftime("%m/%d/%Y") hour = date.time().strftime("%H:%M:%S") text = Paragraph(text=f"""Your training job is underutilizing the instance. You may want to consider to either switch to a smaller instance type or to increase the batch size. The last time that the LowGPUUtilization rule was triggered in your training job was on {day} at {hour}. The following boxplots are a snapshot from the timestamps. They show the utilization per GPU (without outliers). To get a better understanding of the workloads throughout the whole training, you can check the workload histogram in the next section.""", width=800) show(text) del report['Details']['last_timestamp'] for node_id in report['Details']: plot = figure(plot_height=350, plot_width=1000, toolbar_location='right', tools="hover,wheel_zoom,reset,pan", title=f"Node {node_id}", x_range=(0,17), ) for index, key in enumerate(report['Details'][node_id]): display(Markdown(f"""**GPU utilization of {key} on node {node_id}:**""")) text = "" gpu_max = report['Details'][node_id][key]['gpu_max'] p_95 = report['Details'][node_id][key]['gpu_95'] p_5 = report['Details'][node_id][key]['gpu_5'] text = f"""{text} The max utilization of {key} on node {node_id} was {gpu_max}%""" if p_95 < int(threshold_p95): text = f"""{text} and the 95th percentile was only {p_95}%. {key} on node {node_id} is underutilized""" if p_5 < int(threshold_p5): text = f"""{text} and the 5th percentile was only {p_5}%""" if p_95 - p_5 > 50: text = f"""{text} The difference between 5th percentile {p_5}% and 95th percentile {p_95}% is quite significant, which means that utilization on {key} is fluctuating quite a lot.\n""" upper = report['Details'][node_id][key]['upper'] lower = report['Details'][node_id][key]['lower'] p75 = report['Details'][node_id][key]['p75'] p25 = report['Details'][node_id][key]['p25'] p50 = report['Details'][node_id][key]['p50'] plot.segment(index+1, upper, index+1, p75, line_color="black") plot.segment(index+1, lower, index+1, p25, line_color="black") plot.vbar(index+1, 0.7, p50, p75, fill_color="#FDE725", line_color="black") plot.vbar(index+1, 0.7, p25, p50, fill_color="#440154", line_color="black") plot.rect(index+1, lower, 0.2, 0.01, line_color="black") plot.rect(index+1, upper, 0.2, 0.01, line_color="black") plot.xaxis.major_label_overrides[index+1] = key plot.xgrid.grid_line_color = None plot.ygrid.grid_line_color = "white" plot.grid.grid_line_width = 0 plot.xaxis.major_label_text_font_size="10px" text=Paragraph(text=f"""{text}""", width=900) show(text) plot.yaxis.axis_label = "Utilization in %" plot.xaxis.ticker = np.arange(index+2) show(plot) if analyse_phase == "training": display(Markdown("""**Workload balancing**\n\n""")) report = load_report('LoadBalancing') if report: params = report['RuleParameters'].split('\n') threshold = params[0].split(':')[1] patience = params[1].split(':')[1] triggered = report['RuleTriggered'] datapoints = report['Datapoints'] paragraph = Paragraph(text=f"""The LoadBalancing rule helps to detect issues in workload balancing between multiple GPUs. It computes a histogram of GPU utilization values for each GPU and compares then the similarity between histograms. The rule checked if the distance of histograms is larger than the threshold of {threshold}. During initialization utilization is likely zero, so the rule skipped the first {patience} data points. """, width=900) show(paragraph) if len(report['Details']) > 0: for node_id in report['Details']: text = f"""The following histogram shows the workload per GPU on node {node_id}. You can enable/disable the visualization of a workload by clicking on the label in the legend. """ if len(report['Details']) == 1 and len(report['Details'][node_id]['workloads']) == 1: text = f"""{text} Your training job only used one GPU so there is no workload balancing issue.""" plot = figure(plot_height=450, plot_width=850, x_range=(-1,100), title=f"""Workloads on node {node_id}""") colors = bokeh.palettes.viridis(len(report['Details'][node_id]['workloads'])) for index, gpu_id2 in enumerate(report['Details'][node_id]['workloads']): probs = report['Details'][node_id]['workloads'][gpu_id2] plot.quad( top=probs, bottom=0, left=np.arange(0,98,2), right=np.arange(2,100,2), line_color="white", fill_color=colors[index], fill_alpha=0.8, legend=gpu_id2 ) plot.y_range.start = 0 plot.xaxis.axis_label = f"""Utilization""" plot.yaxis.axis_label = "Occurrences" plot.grid.grid_line_color = "white" plot.legend.click_policy="hide" paragraph = Paragraph(text=text) show(column(paragraph, plot)) if "distances" in report['Details'][node_id]: text = f"""The rule identified workload balancing issues on node {node_id} where workloads differed by more than threshold {threshold}. """ for index, gpu_id2 in enumerate(report['Details'][node_id]['distances']): for gpu_id1 in report['Details'][node_id]['distances'][gpu_id2]: distance = round(report['Details'][node_id]['distances'][gpu_id2][gpu_id1], 2) text = f"""{text} The difference of workload between {gpu_id2} and {gpu_id1} is: {distance}.""" paragraph = Paragraph(text=f"""{text}""", width=900) show(column(paragraph)) if analyse_phase == "training": display(Markdown("""### Dataloading analysis\n\n""")) report = load_report('Dataloader') if report: params = report['RuleParameters'].split("\n") min_threshold = params[0].split(':')[1] max_threshold = params[1].split(':')[1] triggered = report['RuleTriggered'] datapoints = report['Datapoints'] text=f"""The number of dataloader workers can greatly affect the overall performance of your training job. The rule analyzed the number of dataloading processes that have been running in parallel on the training instance and compares it against the total number of cores. The rule checked if the number of processes is smaller than {min_threshold}% or larger than {max_threshold}% the total number of cores. Having too few dataloader workers can slowdown data preprocessing and lead to GPU underutilization. Having too many dataloader workers may hurt the overall performance if you are running other compute intensive tasks on the CPU. The rule analysed {datapoints} datapoints and triggered {triggered} times.""" paragraph = Paragraph(text=f"{text}", width=900) show(paragraph) text = "" if 'cores' in report['Details']: cores = int(report['Details']['cores']) dataloaders = report['Details']['dataloaders'] if dataloaders < cores: text=f"""{text} Your training instance provided {cores} CPU cores, however your training job only ran on average {dataloaders} dataloader workers in parallel. We recommend you to increase the number of dataloader workers.""" if dataloaders > cores: text=f"""{text} Your training instance provided {cores} CPU cores, however your training job ran on average {dataloaders} dataloader workers. We recommed you to decrease the number of dataloader workers.""" if 'pin_memory' in report['Details'] and report['Details']['pin_memory'] == False: text=f"""{text} Using pinned memory also improves performance because it enables fast data transfer to CUDA-enabled GPUs. The rule detected that your training job was not using pinned memory. In case of using PyTorch Dataloader, you can enable this by setting pin_memory=True.""" if 'prefetch' in report['Details'] and report['Details']['prefetch'] == False: text=f"""{text} It appears that your training job did not perform any data pre-fetching. Pre-fetching can improve your data input pipeline as it produces the data ahead of time.""" paragraph = Paragraph(text=f"{text}", width=900) show(paragraph) colors=bokeh.palettes.viridis(10) if "dataloading_time" in report['Details']: median = round(report['Details']["dataloading_time"]['p50'],4) p95 = round(report['Details']["dataloading_time"]['p95'],4) p25 = round(report['Details']["dataloading_time"]['p25'],4) binedges = report['Details']["dataloading_time"]['binedges'] probs = report['Details']["dataloading_time"]['probs'] text=f"""The following histogram shows the distribution of dataloading times that have been measured throughout your training job. The median dataloading time was {median}s. The 95th percentile was {p95}s and the 25th percentile was {p25}s""" plot = figure(plot_height=450, plot_width=850, toolbar_location='right', tools="hover,wheel_zoom,reset,pan", x_range=(binedges[0], binedges[-1]) ) plot.quad( top=probs, bottom=0, left=binedges[:-1], right=binedges[1:], line_color="white", fill_color=colors[0], fill_alpha=0.8, legend="Dataloading events" ) plot.y_range.start = 0 plot.xaxis.axis_label = f"""Dataloading in [s]""" plot.yaxis.axis_label = "Occurrences" plot.grid.grid_line_color = "white" plot.legend.click_policy="hide" paragraph = Paragraph(text=f"{text}", width=900) show(column(paragraph, plot)) if analyse_phase == "training": display(Markdown(""" ### Batch size""")) report = load_report('BatchSize') if report: params = report['RuleParameters'].split('\n') cpu_threshold_p95 = int(params[0].split(':')[1]) gpu_threshold_p95 = int(params[1].split(':')[1]) gpu_memory_threshold_p95 = int(params[2].split(':')[1]) patience = int(params[3].split(':')[1]) window = int(params[4].split(':')[1]) violations = report['Violations'] triggered = report['RuleTriggered'] datapoints = report['Datapoints'] text = Paragraph(text=f"""The BatchSize rule helps to detect if GPU is underutilized because of the batch size being too small. To detect this the rule analyzes the GPU memory footprint, CPU and GPU utilization. The rule checked if the 95th percentile of CPU utilization is below cpu_threshold_p95 of {cpu_threshold_p95}%, the 95th percentile of GPU utilization is below gpu_threshold_p95 of {gpu_threshold_p95}% and the 95th percentile of memory footprint \ below gpu_memory_threshold_p95 of {gpu_memory_threshold_p95}%. In your training job this happened {violations} times. \ The rule skipped the first {patience} datapoints. The rule computed the percentiles over window size of {window} continuous datapoints.\n The rule analysed {datapoints} datapoints and triggered {triggered} times. """, width=800) show(text) if len(report['Details']) >0: timestamp = us_since_epoch_to_human_readable_time(report['Details']['last_timestamp']) date = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S:%f') day = date.date().strftime("%m/%d/%Y") hour = date.time().strftime("%H:%M:%S") del report['Details']['last_timestamp'] text = Paragraph(text=f"""Your training job is underutilizing the instance. You may want to consider either switch to a smaller instance type or to increase the batch size. The last time the BatchSize rule triggered in your training job was on {day} at {hour}. The following boxplots are a snapshot from the timestamps. They the total CPU utilization, the GPU utilization, and the GPU memory usage per GPU (without outliers).""", width=800) show(text) for node_id in report['Details']: xmax = max(20, len(report['Details'][node_id])) plot = figure(plot_height=350, plot_width=1000, toolbar_location='right', tools="hover,wheel_zoom,reset,pan", title=f"Node {node_id}", x_range=(0,xmax) ) for index, key in enumerate(report['Details'][node_id]): upper = report['Details'][node_id][key]['upper'] lower = report['Details'][node_id][key]['lower'] p75 = report['Details'][node_id][key]['p75'] p25 = report['Details'][node_id][key]['p25'] p50 = report['Details'][node_id][key]['p50'] plot.segment(index+1, upper, index+1, p75, line_color="black") plot.segment(index+1, lower, index+1, p25, line_color="black") plot.vbar(index+1, 0.7, p50, p75, fill_color="#FDE725", line_color="black") plot.vbar(index+1, 0.7, p25, p50, fill_color="#440154", line_color="black") plot.rect(index+1, lower, 0.2, 0.01, line_color="black") plot.rect(index+1, upper, 0.2, 0.01, line_color="black") plot.xaxis.major_label_overrides[index+1] = key plot.xgrid.grid_line_color = None plot.ygrid.grid_line_color = "white" plot.grid.grid_line_width = 0 plot.xaxis.major_label_text_font_size="10px" plot.xaxis.ticker = np.arange(index+2) plot.yaxis.axis_label = "Utilization in %" show(plot) if analyse_phase == "training": display(Markdown("""### CPU bottlenecks\n\n""")) report = load_report('CPUBottleneck') if report: params = report['RuleParameters'].split('\n') threshold = int(params[0].split(':')[1]) cpu_threshold = int(params[1].split(':')[1]) gpu_threshold = int(params[2].split(':')[1]) patience = int(params[3].split(':')[1]) violations = report['Violations'] triggered = report['RuleTriggered'] datapoints = report['Datapoints'] if report['Violations'] > 0: perc = int(report['Violations']/report['Datapoints']*100) else: perc = 0 if perc < threshold: string = 'below' else: string = 'above' text = f"""The CPUBottleneck rule checked when the CPU utilization was above cpu_threshold of {cpu_threshold}% and GPU utilization was below gpu_threshold of {gpu_threshold}%. During initialization utilization is likely to be zero, so the rule skipped the first {patience} datapoints. With this configuration the rule found {violations} CPU bottlenecks which is {perc}% of the total time. This is {string} the threshold of {threshold}% The rule analysed {datapoints} data points and triggered {triggered} times.""" paragraph = Paragraph(text=text, width=900) show(paragraph) if report: plots = [] text = "" if report['RuleTriggered'] > 0: low_gpu = report['Details']['low_gpu_utilization'] cpu_bottleneck = {} cpu_bottleneck["GPU usage above threshold"] = report["Datapoints"] - report["Details"]["low_gpu_utilization"] cpu_bottleneck["GPU usage below threshold"] = report["Details"]["low_gpu_utilization"] - len(report["Details"]) cpu_bottleneck["Low GPU usage due to CPU bottlenecks"] = len(report["Details"]["bottlenecks"]) n_bottlenecks = round(len(report['Details']['bottlenecks'])/datapoints * 100, 2) text = f"""The following chart (left) shows how many datapoints were below the gpu_threshold of {gpu_threshold}% and how many of those datapoints were likely caused by a CPU bottleneck. The rule found {low_gpu} out of {datapoints} datapoints which had a GPU utilization below {gpu_threshold}%. Out of those datapoints {n_bottlenecks}% were likely caused by CPU bottlenecks. """ plot = create_piechart(cpu_bottleneck, height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="Low GPU usage caused by CPU bottlenecks") plots.append(plot) if 'phase' in report['Details']: text = f"""{text} The chart (in the middle) shows whether CPU bottlenecks mainly happened during train/validation phase. """ plot = create_piechart(report['Details']['phase'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between time spent on TRAIN/EVAL phase") plots.append(plot) if 'forward_backward' in report['Details'] and len(report['Details']['forward_backward']) > 0: event = max(report['Details']['forward_backward'], key=report['Details']['forward_backward'].get) perc = report['Details']['forward_backward'][event] text = f"""{text} The pie charts on the right shows a more detailed breakdown. It shows that {int(perc)}% of the training time was spent on event {event}""" plot = create_piechart(report['Details']['forward_backward'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between forward and backward pass") plots.append(plot) if len(plots) > 0: paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plots))) plots = [] text = "" if 'ratio' in report['Details'] and len(report['Details']['ratio']) > 0: key = list(report['Details']['ratio'].keys())[0] ratio = report['Details']['ratio'][key] text = f"""The following pie chart shows a breakdown of the CPU/GPU operators that happened during CPU bottlenecks. It shows that {int(ratio)}% of the training time was spent on executing operators in "{key}".""" plot = create_piechart(report['Details']['ratio'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between CPU/GPU operators") plots.append(plot) if 'general' in report['Details'] and len(report['Details']['general']) > 0: event = max(report['Details']['general'], key=report['Details']['general'].get) perc = report['Details']['general'][event] plot = create_piechart(report['Details']['general'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="General metrics recorded in framework ") plots.append(plot) if len(plots) > 0: paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plots))) plots = [] text = "" if 'horovod' in report['Details'] and len(report['Details']['horovod']) > 0: event = max(report['Details']['horovod'], key=report['Details']['horovod'].get) perc = report['Details']['horovod'][event] text = f"""The following pie chart shows a detailed breakdown of the Horovod metrics that have been recorded when the CPU bottleneck happened. The most expensive function was {event} with {int(perc)}%""" plot = create_piechart(report['Details']['horovod'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="General metrics recorded in framework ") paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plot))) if analyse_phase == "training": display(Markdown("""### I/O bottlenecks\n\n""")) report = load_report('IOBottleneck') if report: params = report['RuleParameters'].split('\n') threshold = int(params[0].split(':')[1]) io_threshold = int(params[1].split(':')[1]) gpu_threshold = int(params[2].split(':')[1]) patience = int(params[3].split(':')[1]) violations = report['Violations'] triggered = report['RuleTriggered'] datapoints = report['Datapoints'] if report['Violations'] > 0: perc = int(report['Violations']/report['Datapoints']*100) else: perc = 0 if perc < threshold: string = 'below' else: string = 'above' text = f"""The IOBottleneck rule checked when I/O wait time was above io_threshold of {io_threshold}% and GPU utilization was below gpu_threshold of {gpu_threshold}. During initialization utilization is likely to be zero, so the rule skipped the first {patience} datapoints. With this configuration the rule found {violations} I/O bottlenecks which is {perc}% of the total time. This is {string} the threshold of {threshold}%. The rule analysed {datapoints} datapoints and triggered {triggered} times.""" paragraph = Paragraph(text=text, width=900) show(paragraph) if report: plots = [] text = "" if report['RuleTriggered'] > 0: low_gpu = report['Details']['low_gpu_utilization'] cpu_bottleneck = {} cpu_bottleneck["GPU usage above threshold"] = report["Datapoints"] - report["Details"]["low_gpu_utilization"] cpu_bottleneck["GPU usage below threshold"] = report["Details"]["low_gpu_utilization"] - len(report["Details"]) cpu_bottleneck["Low GPU usage due to I/O bottlenecks"] = len(report["Details"]["bottlenecks"]) n_bottlenecks = round(len(report['Details']['bottlenecks'])/datapoints * 100, 2) text = f"""The following chart (left) shows how many datapoints were below the gpu_threshold of {gpu_threshold}% and how many of those datapoints were likely caused by a I/O bottleneck. The rule found {low_gpu} out of {datapoints} datapoints which had a GPU utilization below {gpu_threshold}%. Out of those datapoints {n_bottlenecks}% were likely caused by I/O bottlenecks. """ plot = create_piechart(cpu_bottleneck, height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="Low GPU usage caused by I/O bottlenecks") plots.append(plot) if 'phase' in report['Details']: text = f"""{text} The chart (in the middle) shows whether I/O bottlenecks mainly happened during trianing or validation phase. """ plot = create_piechart(report['Details']['phase'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between the time spent on the TRAIN/EVAL phase") plots.append(plot) if 'forward_backward' in report['Details'] and len(report['Details']['forward_backward']) > 0: event = max(report['Details']['forward_backward'], key=report['Details']['forward_backward'].get) perc = report['Details']['forward_backward'][event] text = f"""{text} The pie charts on the right shows a more detailed breakdown. It shows that {int(perc)}% of the training time was spent on event "{event}".""" plot = create_piechart(report['Details']['forward_backward'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="The ratio between forward and backward pass") plots.append(plot) if len(plots) > 0: paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plots))) plots = [] text = "" if 'ratio' in report['Details'] and len(report['Details']['ratio']) > 0: key = list(report['Details']['ratio'].keys())[0] ratio = report['Details']['ratio'][key] text = f"""The following pie chart shows a breakdown of the CPU/GPU operators that happened during I/O bottlenecks. It shows that {int(ratio)}% of the training time was spent on executing operators in "{key}".""" plot = create_piechart(report['Details']['ratio'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="Ratio between CPU/GPU operators") plots.append(plot) if 'general' in report['Details'] and len(report['Details']['general']) > 0: event = max(report['Details']['general'], key=report['Details']['general'].get) perc = report['Details']['general'][event] plot = create_piechart(report['Details']['general'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="General metrics recorded in framework ") plots.append(plot) if len(plots) > 0: paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plots))) plots = [] text = "" if 'horovod' in report['Details'] and len(report['Details']['horovod']) > 0: event = max(report['Details']['horovod'], key=report['Details']['horovod'].get) perc = report['Details']['horovod'][event] text = f"""The following pie chart shows a detailed breakdown of the Horovod metrics that have been recorded when I/O bottleneck happened. The most expensive function was {event} with {int(perc)}%""" plot = create_piechart(report['Details']['horovod'], height=350, width=600, x1=0.2, x2=0.6, radius=0.3, title="General metrics recorded in framework ") paragraph = Paragraph(text=text, width=900) show(column(paragraph, row(plot))) if analyse_phase == "training": display(Markdown("""### GPU memory\n\n""")) report = load_report('GPUMemoryIncrease') if report: params = report['RuleParameters'].split('\n') increase = float(params[0].split(':')[1]) patience = params[1].split(':')[1] window = params[2].split(':')[1] violations = report['Violations'] triggered = report['RuleTriggered'] datapoints = report['Datapoints'] text=Paragraph(text=f"""The GPUMemoryIncrease rule helps to detect large increase in memory usage on GPUs. The rule checked if the moving average of memory increased by more than {increase}%. So if the moving average increased for instance from 10% to {11+increase}%, the rule would have triggered. During initialization utilization is likely 0, so the rule skipped the first {patience} datapoints. The moving average was computed on a window size of {window} continuous datapoints. The rule detected {violations} violations where the moving average between previous and current time window increased by more than {increase}%. The rule analysed {datapoints} datapoints and triggered {triggered} times.""", width=900) show(text) if len(report['Details']) > 0: timestamp = us_since_epoch_to_human_readable_time(report['Details']['last_timestamp']) date = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S:%f') day = date.date().strftime("%m/%d/%Y") hour = date.time().strftime("%H:%M:%S") text = Paragraph(text=f"""Your training job triggered memory spikes. The last time the GPUMemoryIncrease rule triggered in your training job was on {day} at {hour}. The following boxplots are a snapshot from the timestamps. They show for each node and GPU the corresponding memory utilization (without outliers).""", width=900) show(text) del report['Details']['last_timestamp'] for node_id in report['Details']: plot = figure(plot_height=350, plot_width=1000, toolbar_location='right', tools="hover,wheel_zoom,reset,pan", title=f"Node {node_id}", x_range=(0,17), ) for index, key in enumerate(report['Details'][node_id]): display(Markdown(f"""**Memory utilization of {key} on node {node_id}:**""")) text = "" gpu_max = report['Details'][node_id][key]['gpu_max'] text = f"""{text} The max memory utilization of {key} on node {node_id} was {gpu_max}%.""" p_95 = int(report['Details'][node_id][key]['p95']) p_5 = report['Details'][node_id][key]['p05'] if p_95 < int(50): text = f"""{text} The 95th percentile was only {p_95}%.""" if p_5 < int(5): text = f"""{text} The 5th percentile was only {p_5}%.""" if p_95 - p_5 > 50: text = f"""{text} The difference between 5th percentile {p_5}% and 95th percentile {p_95}% is quite significant, which means that memory utilization on {key} is fluctuating quite a lot.""" text = Paragraph(text=f"""{text}""", width=900) show(text) upper = report['Details'][node_id][key]['upper'] lower = report['Details'][node_id][key]['lower'] p75 = report['Details'][node_id][key]['p75'] p25 = report['Details'][node_id][key]['p25'] p50 = report['Details'][node_id][key]['p50'] plot.segment(index+1, upper, index+1, p75, line_color="black") plot.segment(index+1, lower, index+1, p25, line_color="black") plot.vbar(index+1, 0.7, p50, p75, fill_color="#FDE725", line_color="black") plot.vbar(index+1, 0.7, p25, p50, fill_color="#440154", line_color="black") plot.rect(index+1, lower, 0.2, 0.01, line_color="black") plot.rect(index+1, upper, 0.2, 0.01, line_color="black") plot.xaxis.major_label_overrides[index+1] = key plot.xgrid.grid_line_color = None plot.ygrid.grid_line_color = "white" plot.grid.grid_line_width = 0 plot.xaxis.major_label_text_font_size="10px" plot.xaxis.ticker = np.arange(index+2) plot.yaxis.axis_label = "Utilization in %" show(plot) ```
github_jupyter
``` import pandas as pd import numpy as np data = pd.Series(np.random.randn(9), index=[['a', 'a', 'a', 'b', 'b', 'c', 'c', 'd', 'd'], [1, 2, 3, 1, 3, 1, 2, 2, 3]]) data data.index data['b'] data['b':'c'] data.loc[['b','d']] data.loc[:,2] data.unstack() data.unstack().stack() frame = pd.DataFrame(np.arange(12).reshape((4,3)), index=[['a','a','b','b'],[1,2,1,2]], columns=[['Ohio', 'Ohio', 'Colorado'],['Green','Red','Green']]) frame frame.index.names=['key1', 'key2'] frame.columns.names = ['state','color'] frame frame['Ohio'] pd.MultiIndex.from_arrays([['Ohio', 'Ohio', 'Colorado'],['Green', 'Red', 'Green']], names = ['state','color']) frame.swaplevel('key1','key2') frame.sort_index(level=1) frame.swaplevel(0,1).sort_index(level=0) frame.sum(level='key2') frame.sum(level='color', axis=1) frame = pd.DataFrame({'a': range(7), 'b': range(7, 0, -1), 'c': ['one', 'one', 'one', 'two', 'two', 'two', 'two'], 'd': [0, 1, 2, 0, 1, 2, 3]}) frame frame2 = frame.set_index(['c','d']) frame2 frame2 = frame.set_index(['c','d'], drop=False) frame2 ``` Data Frame joins ``` df1 = pd.DataFrame({'key':['b','b','a','c','a','a','b'], 'data1':range(7)}) df2 = pd.DataFrame({'key':['a','b','d'], 'data2':range(3)}) df1 df2 pd.merge(df1,df2) pd.merge(df1, df2, on = 'key') df3 = pd.DataFrame({'lkey':['b','b','a','c','a','a','b'], 'data1':range(7)}) df4 = pd.DataFrame({'rkey':['a','b','d'], 'data2':range(3)}) pd.merge(df3, df4, left_on='lkey', right_on='rkey') pd.merge(df1, df2, on = 'key', how='outer') df1 = pd.DataFrame({'key':['b','b','a','c','a','b'], 'data1':range(6)}) df2 = pd.DataFrame({'key':['a','b','a','b','d'], 'data2':range(5)}) df1 df2 pd.merge(df1,df2, on='key', how='left') pd.merge(df1, df2, how='inner') left = pd.DataFrame({'key1': ['foo', 'foo', 'bar'], 'key2': ['one', 'two', 'one'], 'lval': [1, 2, 3]}) right = pd.DataFrame({'key1': ['foo', 'foo', 'bar', 'bar'], 'key2': ['one', 'one', 'one', 'two'], 'rval': [4, 5, 6, 7]}) pd.merge(left, right, on=['key1', 'key2'], how='outer') pd.merge(left, right, on = 'key1') pd.merge(left, right, on='key1', suffixes=('_left','_right')) left1 = pd.DataFrame({'key': ['a', 'b', 'a', 'a', 'b', 'c'], 'value': range(6)}) right1 = pd.DataFrame({'group_val': [3.5, 7]}, index=['a', 'b']) left1 right1 pd.merge(left1, right1, left_on='key', right_index=True, how='outer') ```
github_jupyter
``` import numpy as np import regex as re import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import statistics import math import os import keras.backend as K from sklearn.model_selection import StratifiedKFold from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split import tensorflow as tf import tensorflow.keras.backend as K import tokenizers from transformers import RobertaTokenizer, TFRobertaModel import tensorflow as tf import pandas as pd from sklearn.metrics import classification_report from transformers import RobertaTokenizerFast, TFRobertaForSequenceClassification, pipeline from collections import Counter import tensorflow_addons as tfa from collections import Counter import warnings warnings.filterwarnings("ignore") # Detect hardware, return appropriate distribution strategy (you can see that it is pretty easy to set up). try: # TPU detection. No parameters necessary if TPU_NAME environment variable is set (always set in Kaggle) tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) print('Running on TPU ', tpu.master()) except ValueError: # Default distribution strategy in Tensorflow. Works on CPU and single GPU. strategy = tf.distribute.get_strategy() print('Number of replicas:', strategy.num_replicas_in_sync) MODEL_NAME = 'roberta-base' MAX_LEN = 200 ARTIFACTS_PATH = '../artifacts/' BATCH_SIZE = 32 * strategy.num_replicas_in_sync EPOCHS = 2 if not os.path.exists(ARTIFACTS_PATH): os.makedirs(ARTIFACTS_PATH) df = pd.read_csv('../datasets/tweet_emotions.csv') df.head() X_data = df[['content']].to_numpy().reshape(-1) y_data = df[['sentiment']].to_numpy().reshape(-1) categories = df[['sentiment']].values.reshape(-1) counter_categories = Counter(categories) category_names = counter_categories.keys() category_values = counter_categories.values() y_pos = np.arange(len(category_names)) plt.figure(1, figsize=(10, 5)) plt.bar(y_pos, category_values, align='center', alpha=0.5) plt.xticks(y_pos, category_names) plt.ylabel('Number of texts') plt.title('Distribution of texts per category') plt.gca().yaxis.grid(True) plt.show() print(counter_categories) def calculate_stats(df, split_char=' '): categories = df['sentiment'].unique() all_lengths = [] per_category = { 'lengths': {c:[] for c in categories}, 'mean': {c:0 for c in categories}, 'stdev': {c:0 for c in categories} } for index, row in df.iterrows(): text = row['content'] text = re.sub(r"\s+", ' ', text) # Normalize text = text.split(split_char) l = len(text) category = row['sentiment'] all_lengths.append(l) per_category['lengths'][category].append(l) for c in categories: per_category['mean'][c] = statistics.mean(per_category['lengths'][c]) per_category['stdev'][c] = statistics.stdev(per_category['lengths'][c]) global_stats = { 'mean': statistics.mean(all_lengths), 'stdev': statistics.stdev(all_lengths), 'lengths': all_lengths } return { 'global': global_stats, 'per_category': pd.DataFrame(per_category) } def display_lengths_histograms(df_stats, n_cols=3): categories = df['sentiment'].unique() n_rows = math.ceil(len(categories) / n_cols) plt.figure(figsize=(15, 8)) plt.suptitle('Distribution of lengths') # Subplot of all lengths plt.subplot(n_rows, n_cols, 1) plt.title('All categories') lengths = df_stats['global']['lengths'] plt.hist(lengths, color='r') # Subplot of each category index_subplot = 2 for c in categories: plt.subplot(n_rows, n_cols, index_subplot) plt.title('Category: %s' % c) lengths = df_stats['per_category']['lengths'][c] plt.hist(lengths, color='b') index_subplot += 1 plt.show() df_stats = calculate_stats(df) df_stats['per_category'] display_lengths_histograms(df_stats) n_texts = len(X_data) print('Texts in dataset: %d' % n_texts) categories = df['sentiment'].unique() n_categories = len(categories) print('Number of categories: %d' % n_categories) print('Done!') def roberta_encode(texts, tokenizer): ct = len(texts) input_ids = np.ones((ct, MAX_LEN), dtype='int32') attention_mask = np.zeros((ct, MAX_LEN), dtype='int32') token_type_ids = np.zeros((ct, MAX_LEN), dtype='int32') # Not used in text classification for k, text in enumerate(texts): # Tokenize tok_text = tokenizer.tokenize(text) # Truncate and convert tokens to numerical IDs enc_text = tokenizer.convert_tokens_to_ids(tok_text[:(MAX_LEN-2)]) input_length = len(enc_text) + 2 input_length = input_length if input_length < MAX_LEN else MAX_LEN # Add tokens [CLS] and [SEP] at the beginning and the end input_ids[k,:input_length] = np.asarray([0] + enc_text + [2], dtype='int32') # Set to 1s in the attention input attention_mask[k,:input_length] = 1 return { 'input_word_ids': input_ids, 'input_mask': attention_mask, 'input_type_ids': token_type_ids } # Transform categories into numbers category_to_id = {} category_to_name = {} for index, c in enumerate(y_data): if c in category_to_id: category_id = category_to_id[c] else: category_id = len(category_to_id) category_to_id[c] = category_id category_to_name[category_id] = c y_data[index] = category_id # Display dictionary category_to_name datasets = pd.read_csv('../datasets/train_preprocessed.csv').dropna() X_train = datasets["content"].astype("string").to_numpy() y_train = datasets["sentiment"].astype("category").cat.codes.to_numpy() datasets_t = pd.read_csv('../datasets/test_preprocessed.csv').dropna() X_test = datasets_t["content"].astype("string").to_numpy() y_test = datasets_t["sentiment"].astype("category").cat.codes.to_numpy() tokenizer = RobertaTokenizer.from_pretrained(MODEL_NAME) X_train = roberta_encode(X_train, tokenizer) X_test = roberta_encode(X_test, tokenizer) y_train = np.asarray(y_train, dtype='int32') y_test = np.asarray(y_test, dtype='int32') def build_model(n_categories): with strategy.scope(): input_word_ids = tf.keras.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_word_ids') input_mask = tf.keras.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_mask') input_type_ids = tf.keras.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_type_ids') # Import RoBERTa model from HuggingFace roberta_model = TFRobertaModel.from_pretrained(MODEL_NAME) x = roberta_model(input_word_ids, attention_mask=input_mask, token_type_ids=input_type_ids) # Huggingface transformers have multiple outputs, embeddings are the first one, # so let's slice out the first position x = x[0] x = tf.keras.layers.Dropout(0.1)(x) x = tf.keras.layers.Flatten()(x) x = tf.keras.layers.Dense(256, activation='relu')(x) x = tf.keras.layers.Dense(n_categories, activation='softmax')(x) model = tf.keras.Model(inputs=[input_word_ids, input_mask, input_type_ids], outputs=x) model.compile( optimizer=tf.keras.optimizers.Adam(lr=1e-4), loss='sparse_categorical_crossentropy', metrics=['accuracy']) return model with strategy.scope(): model = build_model(n_categories) model.summary() def macro_double_soft_f1(y, y_hat): """Compute the macro soft F1-score as a cost (average 1 - soft-F1 across all labels). Use probability values instead of binary predictions. This version uses the computation of soft-F1 for both positive and negative class for each label. Args: y (int32 Tensor): targets array of shape (BATCH_SIZE, N_LABELS) y_hat (float32 Tensor): probability matrix from forward propagation of shape (BATCH_SIZE, N_LABELS) Returns: cost (scalar Tensor): value of the cost function for the batch """ y = tf.cast(y, tf.float32) y_hat = tf.cast(y_hat, tf.float32) tp = tf.reduce_sum(y_hat * y, axis=0) fp = tf.reduce_sum(y_hat * (1 - y), axis=0) fn = tf.reduce_sum((1 - y_hat) * y, axis=0) tn = tf.reduce_sum((1 - y_hat) * (1 - y), axis=0) soft_f1_class1 = 2*tp / (2*tp + fn + fp + 1e-16) soft_f1_class0 = 2*tn / (2*tn + fn + fp + 1e-16) cost_class1 = 1 - soft_f1_class1 # reduce 1 - soft-f1_class1 in order to increase soft-f1 on class 1 cost_class0 = 1 - soft_f1_class0 # reduce 1 - soft-f1_class0 in order to increase soft-f1 on class 0 cost = 0.5 * (cost_class1 + cost_class0) # take into account both class 1 and class 0 macro_cost = tf.reduce_mean(cost) # average on all labels return macro_cost callbacks = [ tf.keras.callbacks.ModelCheckpoint(filepath="./checkpoints/usual/",save_best_only=True,save_weights_only=False) ] BATCH_SIZE with strategy.scope(): print('Training...') history = model.fit(X_train, y_train, epochs=2, batch_size=BATCH_SIZE, verbose=1, callbacks = callbacks, validation_split=0.2, workers = -1) model.save_weights('./weights/usual/saved_weights.h5') counts = Counter(y_train) weights = {i:1/j for i,j in counts.items()} with strategy.scope(): print('Training...') history = model.fit(X_train, y_train, epochs=2, batch_size=BATCH_SIZE, verbose=1, callbacks = callbacks, validation_split=0.2, class_weight = weights) model.save_weights('./weights/weighted/saved_weights.h5') # -augment 1 with strategy.scope(): print('Training...') history = model.fit(X_train, y_train, epochs=2, batch_size=BATCH_SIZE, verbose=1, callbacks = callbacks, validation_split=0.2) model.save_weights('./weights/prepr/saved_weights.h5') def build_model(n_categories): with strategy.scope(): input_word_ids = tf.keras.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_word_ids') input_mask = tf.keras.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_mask') input_type_ids = tf.keras.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_type_ids') # Import RoBERTa model from HuggingFace roberta_model = TFRobertaModel.from_pretrained(MODEL_NAME) x = roberta_model(input_word_ids, attention_mask=input_mask, token_type_ids=input_type_ids) # Huggingface transformers have multiple outputs, embeddings are the first one, # so let's slice out the first position x = x[0] x = tf.keras.layers.Dropout(0.1)(x) x = tf.keras.layers.Flatten()(x) x = tf.keras.layers.Dense(256, activation='relu')(x) x = tf.keras.layers.Dense(n_categories, activation='softmax')(x) model = tf.keras.Model(inputs=[input_word_ids, input_mask, input_type_ids], outputs=x) model.compile( optimizer=tf.keras.optimizers.Adam(lr=1e-5), loss=macro_double_soft_f1, metrics=['accuracy']) return model with strategy.scope(): model = build_model(n_categories) model.summary() with strategy.scope(): print('Training...') history = model.fit(X_train, y_train, epochs=2, batch_size=BATCH_SIZE, verbose=1, callbacks = callbacks, validation_split=0.2) model.save_weights('./weights/f1_loss/saved_weights.h5') #usual model.load_weights('weights/usual/saved_weights.h5') y_pred = tf.argmax(model.predict(X_test,workers = -1,verbose=1),axis = 1) print(classification_report(y_test, y_pred, zero_division = 0)) # weighted model.load_weights('weights/weighted/saved_weights.h5') y_pred = tf.argmax(model.predict(X_test,workers = -1,verbose=1),axis = 1) print(classification_report(y_test, y_pred,zero_division = 0)) # f1 loss model.load_weights('weights/f1_loss/saved_weights.h5') y_pred = tf.argmax(model.predict(X_test,workers = -1,verbose=1),axis = 1) print(classification_report(y_test, y_pred,zero_division = 0)) # augmented model.load_weights('weights/prepr/saved_weights.h5') y_pred = tf.argmax(model.predict(X_test,workers = -1,verbose=1),axis = 1) print(classification_report(y_test, y_pred,zero_division = 0)) scores = model.evaluate(X_test, y_test, verbose=0) print("Accuracy: %.2f%%" % (scores[1] * 100)) ```
github_jupyter
``` %load_ext autoreload %autoreload 2 %matplotlib inline %config InlineBackend.figure_format = 'retina' ``` ## Introduction Because of the relational structure in a graph, we can begin to think about "importance" of a node that is induced because of its relationships to the rest of the nodes in the graph. Before we go on, let's think about a pertinent and contemporary example. ### An example: contact tracing At the time of writing (April 2020), finding important nodes in a graph has actually taken on a measure of importance that we might not have appreciated before. With the COVID-19 virus spreading, contact tracing has become quite important. In an infectious disease contact network, where individuals are nodes and contact between individuals of some kind are the edges, an "important" node in this contact network would be an individual who was infected who also was in contact with many people during the time that they were infected. ### Our dataset: "Sociopatterns" The dataset that we will use in this chapter is the "[sociopatterns network][sociopatterns]" dataset. Incidentally, it's also about infectious diseases. [sociopatterns]: http://konect.uni-koblenz.de/networks/sociopatterns-infectious Here is the description of the dataset. > This network describes the face-to-face behavior of people > during the exhibition INFECTIOUS: STAY AWAY in 2009 > at the Science Gallery in Dublin. > Nodes represent exhibition visitors; > edges represent face-to-face contacts that were active for at least 20 seconds. > Multiple edges between two nodes are possible and denote multiple contacts. > The network contains the data from the day with the most interactions. To simplify the network, we have represented only the last contact between individuals. ``` from nams import load_data as cf G = cf.load_sociopatterns_network() ``` It is loaded as an undirected graph object: ``` type(G) ``` As usual, before proceeding with any analysis, we should know basic graph statistics. ``` len(G.nodes()), len(G.edges()) ``` ## A Measure of Importance: "Number of Neighbors" One measure of importance of a node is the number of **neighbors** that the node has. What is a **neighbor**? We will work with the following definition: > The neighbor of a node is connected to that node by an edge. Let's explore this concept, using the NetworkX API. Every NetworkX graph provides a `G.neighbors(node)` class method, which lets us query a graph for the number of neighbors of a given node: ``` G.neighbors(7) ``` It returns a generator that doesn't immediately return the exact neighbors list. This means we cannot know its exact length, as it is a generator. If you tried to do: ```python len(G.neighbors(7)) ``` you would get the following error: ```python --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-13-72c56971d077> in <module> ----> 1 len(G.neighbors(7)) TypeError: object of type 'dict_keyiterator' has no len() ``` Hence, we will need to cast it as a list in order to know both its length and its members: ``` list(G.neighbors(7)) ``` In the event that some nodes have an extensive list of neighbors, then using the `dict_keyiterator` is potentially a good memory-saving technique, as it lazily yields the neighbors. ### Exercise: Rank-ordering the number of neighbors a node has Since we know how to get the list of nodes that are neighbors of a given node, try this following exercise: > Can you create a ranked list of the importance of each individual, based on the number of neighbors they have? Here are a few hints to help: - You could consider using a `pandas Series`. This would be a modern and idiomatic way of approaching the problem. - You could also consider using Python's `sorted` function. ``` from nams.solutions.hubs import rank_ordered_neighbors #### REPLACE THE NEXT FEW LINES WITH YOUR ANSWER # answer = rank_ordered_neighbors(G) # answer ``` The original implementation looked like the following ``` from nams.solutions.hubs import rank_ordered_neighbors_original # rank_ordered_neighbors_original?? ``` And another implementation that uses generators: ``` from nams.solutions.hubs import rank_ordered_neighbors_generator # rank_ordered_neighbors_generator?? ``` ## Generalizing "neighbors" to arbitrarily-sized graphs The concept of neighbors is simple and appealing, but it leaves us with a slight point of dissatisfaction: it is difficult to compare graphs of different sizes. Is a node more important solely because it has more neighbors? What if it were situated in an extremely large graph? Would we not expect it to have more neighbors? As such, we need a normalization factor. One reasonable one, in fact, is _the number of nodes that a given node could **possibly** be connected to._ By taking the ratio of the number of neighbors a node has to the number of neighbors it could possibly have, we get the **degree centrality** metric. Formally defined, the degree centrality of a node (let's call it $d$) is the number of neighbors that a node has (let's call it $n$) divided by the number of neighbors it could _possibly_ have (let's call it $N$): $$d = \frac{n}{N}$$ NetworkX provides a function for us to calculate degree centrality conveniently: ``` import networkx as nx import pandas as pd dcs = pd.Series(nx.degree_centrality(G)) dcs ``` `nx.degree_centrality(G)` returns to us a dictionary of key-value pairs, where the keys are node IDs and values are the degree centrality score. To save on output length, I took the liberty of casting it as a pandas Series to make it easier to display. Incidentally, we can also sort the series to find the nodes with the highest degree centralities: ``` dcs.sort_values(ascending=False) ``` Does the list order look familiar? It should, since the numerator of the degree centrality metric is identical to the number of neighbors, and the denominator is a constant. ## Distribution of graph metrics One important concept that you should come to know is that the distribution of node-centric values can characterize classes of graphs. What do we mean by "distribution of node-centric values"? One would be the degree distribution, that is, the collection of node degree values in a graph. Generally, you might be familiar with plotting a histogram to visualize distributions of values, but in this book, we are going to avoid histograms like the plague. I detail a lot of reasons in a [blog post][ecdf] I wrote in 2018, but the main points are that: 1. It's easier to lie with histograms. 1. You get informative statistical information (median, IQR, extremes/outliers) more easily. [ecdf]: https://ericmjl.github.io/blog/2018/7/14/ecdfs/ ### Exercise: Degree distribution In this next exercise, we are going to get practice visualizing these values using empirical cumulative distribution function plots. I have written for you an ECDF function that you can use already. Its API looks like the following: ```python x, y = ecdf(list_of_values) ``` giving you `x` and `y` values that you can directly plot. The exercise prompt is this: > Plot the ECDF of the degree centrality and degree distributions. First do it for **degree centrality**: ``` from nams.functions import ecdf from nams.solutions.hubs import ecdf_degree_centrality #### REPLACE THE FUNCTION CALL WITH YOUR ANSWER ecdf_degree_centrality(G) ``` Now do it for **degree**: ``` from nams.solutions.hubs import ecdf_degree #### REPLACE THE FUNCTION CALL WITH YOUR ANSWER ecdf_degree(G) ``` The fact that they are identically-shaped should not surprise you! ### Exercise: What about that denominator? The denominator $N$ in the degree centrality definition is "the number of nodes that a node could _possibly_ be connected to". Can you think of two ways $N$ be defined? ``` from nams.solutions.hubs import num_possible_neighbors #### UNCOMMENT TO SEE MY ANSWER # print(num_possible_neighbors()) ``` ### Exercise: Circos Plotting Let's get some practice with the `nxviz` API. > Visualize the graph `G`, while ordering and colouring them by the 'order' node attribute. ``` from nams.solutions.hubs import circos_plot #### REPLACE THE NEXT LINE WITH YOUR ANSWER circos_plot(G) ``` ### Exercise: Visual insights Since we know that node colour and order are by the "order" in which the person entered into the exhibit, what does this visualization tell you? ``` from nams.solutions.hubs import visual_insights #### UNCOMMENT THE NEXT LINE TO SEE MY ANSWER # print(visual_insights()) ``` ### Exercise: Investigating degree centrality and node order One of the insights that we might have gleaned from visualizing the graph is that the nodes that have a high degree centrality might also be responsible for the edges that criss-cross the Circos plot. To test this, plot the following: - x-axis: node degree centrality - y-axis: maximum difference between the neighbors' `order`s (a node attribute) and the node's `order`. ``` from nams.solutions.hubs import dc_node_order dc_node_order(G) ``` The somewhat positive correlation between the degree centrality might tell us that this trend holds true. A further applied question would be to ask what behaviour of these nodes would give rise to this pattern. Are these nodes actually exhibit staff? Or is there some other reason why they are staying so long? This, of course, would require joining in further information that we would overlay on top of the graph (by adding them as node or edge attributes) before we might make further statements. ## Reflections In this chapter, we defined a metric of node importance: the degree centrality metric. In the example we looked at, it could help us identify potential infectious agent superspreaders in a disease contact network. In other settings, it might help us spot: - message amplifiers/influencers in a social network, and - potentially crowded airports that have lots of connections into and out of it (still relevant to infectious disease spread!) - and many more! What other settings can you think of in which the number of neighbors that a node has can become a metric of importance for the node? ## Solutions Here are the solutions to the exercises above. ``` from nams.solutions import hubs import inspect print(inspect.getsource(hubs)) ```
github_jupyter
# Tutorial 08: Creating Custom Environments 创建自定义环境 This tutorial walks you through the process of creating custom environments in Flow. Custom environments contain specific methods that define the problem space of a task, such as the state and action spaces of the RL agent and the signal (or reward) that the RL algorithm will optimize over. By specifying a few methods within a custom environment, individuals can use Flow to design traffic control tasks of various types, such as optimal traffic light signal timing and flow regulation via mixed autonomy traffic (see the figures below). Finally, these environments are compatible with OpenAI Gym. 本教程将带您完成在Flow中创建自定义环境的过程。自定义环境包含定义任务的问题空间的特定方法,例如RL代理的状态和操作空间,以及RL算法将优化的信号(或奖励)。通过在自定义环境中指定一些方法,个人可以使用流来设计各种类型的交通控制任务,例如最优的交通灯信号定时和混合自主交通的流量调节(见下图)。最后,这些环境与OpenAI健身房是兼容的。 The rest of the tutorial is organized as follows: in section 1 walks through the process of creating an environment for mixed autonomy vehicle control where the autonomous vehicles perceive all vehicles in the network, and section two implements the environment in simulation. 本教程的其余部分组织如下:第1节介绍了创建混合自主车辆控制环境的过程,其中自主车辆感知网络中的所有车辆,第2节在仿真中实现了该环境。 <img src="img/sample_envs.png"> ## 1. Creating an Environment Class 创建一个环境类 In this tutorial we will create an environment in which the accelerations of a handful of vehicles in the network are specified by a single centralized agent, with the objective of the agent being to improve the average speed of all vehicle in the network. In order to create this environment, we begin by inheriting the base environment class located in *flow.envs*: 在本教程中,我们将创建一个环境,其中网络中少数车辆的加速由一个集中的代理指定,代理的目标是提高网络中所有车辆的平均速度。为了创建这样的环境,我们从继承位于*flow.envs*中的基本环境类开始: ``` # import the base environment class from flow.envs import Env # define the environment class, and inherit properties from the base environment class class myEnv(Env): pass ``` `Env` provides the interface for running and modifying a SUMO simulation. Using this class, we are able to start sumo, provide a network to specify a configuration and controllers, perform simulation steps, and reset the simulation to an initial configuration. “Env”提供了运行和修改sumo模拟的接口。使用这个类,我们可以启动sumo,提供指定配置和控制器的网络,执行模拟步骤,并将模拟重置为初始配置。 By inheriting Flow's base environment, a custom environment for varying control tasks can be created by adding the following functions to the child class: 通过继承Flow的基环境,可以通过在子类中添加以下函数来创建用于变化控制任务的自定义环境: * **action_space**动作空间 * **observation_space**观测空间 * **apply_rl_actions**RL应用空间 * **get_state**获取状态 * **compute_reward**计算奖励值 Each of these components are covered in the next few subsections. ### 1.1 ADDITIONAL_ENV_PARAMS The features used to parametrize components of the state/action space as well as the reward function are specified within the `EnvParams` input, as discussed in tutorial 1. Specifically, for the sake of our environment, the `additional_params` attribute within `EnvParams` will be responsible for storing information on the maximum possible accelerations and decelerations by the autonomous vehicles in the network. Accordingly, for this problem, we define an `ADDITIONAL_ENV_PARAMS` variable of the form: 用于参数化状态/动作空间组件的特性以及奖励功能在“EnvParams”输入中指定,如教程1中所述。具体来说,为了保护我们的环境,‘EnvParams’中的‘additional_params’属性将负责存储网络中自动驾驶车辆最大可能的加速和减速信息。因此,对于这个问题,我们定义了表单的‘ADDITIONAL_ENV_PARAMS’变量: ``` ADDITIONAL_ENV_PARAMS = { "max_accel": 1, "max_decel": 1, } ``` All environments presented in Flow provide a unique `ADDITIONAL_ENV_PARAMS` component containing the information needed to properly define some environment-specific parameters. We assume that these values are always provided by the user, and accordingly can be called from `env_params`. For example, if we would like to call the "max_accel" parameter, we simply type: Flow中提供的所有环境都提供了一个惟一的‘ADDITIONAL_ENV_PARAMS’组件,其中包含正确定义某些特定于环境的参数所需的信息。我们假设这些值总是由用户提供的,因此可以从' env_params '中调用。例如,如果我们想调用“max_accel”参数,我们只需输入: max_accel = env_params.additional_params["max_accel"] ### 1.2 action_space 动作空间 The `action_space` method defines the number and bounds of the actions provided by the RL agent. In order to define these bounds with an OpenAI gym setting, we use several objects located within *gym.spaces*. For instance, the `Box` object is used to define a bounded array of values in $\mathbb{R}^n$. “action_space”方法定义了RL代理提供的操作的数量和界限。为了定义OpenAI健身房设置的这些边界,我们使用了位于*gym.spaces*内的几个对象。例如,“Box”对象用于定义$\mathbb{R}^n$中的有界值数组。 ``` from gym.spaces.box import Box ``` In addition, `Tuple` objects (not used by this tutorial) allow users to combine multiple `Box` elements together. 此外,“Tuple”对象(本教程中没有使用)允许用户将多个“Box”元素组合在一起。 ``` from gym.spaces import Tuple ``` Once we have imported the above objects, we are ready to define the bounds of our action space. Given that our actions consist of a list of n real numbers (where n is the number of autonomous vehicles) bounded from above and below by "max_accel" and "max_decel" respectively (see section 1.1), we can define our action space as follows: 一旦导入了上述对象,就可以定义操作空间的边界了。假设我们的动作是由n个实数组成的列表(其中n是自动驾驶车辆的数量),从上到下分别由“max_accel”和“max_decel”约束(参见1.1节),我们可以这样定义我们的动作空间: ``` class myEnv(myEnv): @property def action_space(self): num_actions = self.initial_vehicles.num_rl_vehicles accel_ub = self.env_params.additional_params["max_accel"] accel_lb = - abs(self.env_params.additional_params["max_decel"]) return Box(low=accel_lb, high=accel_ub, shape=(num_actions,)) ``` ### 1.3 observation_space 观察空间 The observation space of an environment represents the number and types of observations that are provided to the reinforcement learning agent. For this example, we will be observe two values for each vehicle: its position and speed. Accordingly, we need a observation space that is twice the size of the number of vehicles in the network. 环境的观察空间表示提供给强化学习代理的观察的数量和类型。对于本例,我们将观察每个车辆的两个值:位置和速度。因此,我们需要的观测空间是网络中车辆数量的两倍。 ``` class myEnv(myEnv): # update my environment class @property def observation_space(self): return Box( low=0, high=float("inf"), shape=(2*self.initial_vehicles.num_vehicles,), ) ``` ### 1.4 apply_rl_actions 应用Rl动作 The function `apply_rl_actions` is responsible for transforming commands specified by the RL agent into actual actions performed within the simulator. The vehicle kernel within the environment class contains several helper methods that may be of used to facilitate this process. These functions include: 函数' apply_rl_actions '负责将RL代理指定的命令转换为在模拟器中执行的实际操作。environment类中的vehicle内核包含几个辅助方法,可以用来促进这个过程。这些功能包括: * **apply_acceleration** (list of str, list of float) -> None: converts an action, or a list of actions, into accelerations to the specified vehicles (in simulation) * **apply_lane_change** (list of str, list of {-1, 0, 1}) -> None: converts an action, or a list of actions, into lane change directions for the specified vehicles (in simulation) * **choose_route** (list of str, list of list of str) -> None: converts an action, or a list of actions, into rerouting commands for the specified vehicles (in simulation) For our example we consider a situation where the RL agent can only specify accelerations for the RL vehicles; accordingly, the actuation method for the RL agent is defined as follows: 在我们的例子中,我们考虑这样一种情况:RL代理只能为RL车辆指定加速;因此,RL agent的驱动方法定义如下: ``` class myEnv(myEnv): # update my environment class def _apply_rl_actions(self, rl_actions): # the names of all autonomous (RL) vehicles in the network rl_ids = self.k.vehicle.get_rl_ids() # use the base environment method to convert actions into accelerations for the rl vehicles self.k.vehicle.apply_acceleration(rl_ids, rl_actions) ``` ### 1.5 get_state 获取状态 The `get_state` method extracts features from within the environments and provides then as inputs to the policy provided by the RL agent. Several helper methods exist within flow to help facilitate this process. Some useful helper method can be accessed from the following objects: “get_state”方法从环境中提取特性,然后作为RL代理提供的策略的输入。flow中存在几个帮助方法来帮助简化这个过程。一些有用的帮助方法可以从以下对象访问: * **self.k.vehicle**: provides current state information for all vehicles within the network为网络中的所有车辆提供当前状态信息 * **self.k.traffic_light**: provides state information on the traffic lights提供交通信号灯的状态信息 * **self.k.network**: information on the network, which unlike the vehicles and traffic lights is static网络上的信息,这与车辆和红绿灯是静态的 * More accessor objects and methods can be found within the Flow documentation at: http://berkeleyflow.readthedocs.io/en/latest/ In order to model global observability within the network, our state space consists of the speeds and positions of all vehicles (as mentioned in section 1.3). This is implemented as follows: 为了在网络中建立全局可观测性模型,我们的状态空间由所有车辆的速度和位置组成(如第1.3节所述)。实施办法如下: ``` import numpy as np class myEnv(myEnv): # update my environment class def get_state(self, **kwargs): # the get_ids() method is used to get the names of all vehicles in the network ids = self.k.vehicle.get_ids() # we use the get_absolute_position method to get the positions of all vehicles pos = [self.k.vehicle.get_x_by_id(veh_id) for veh_id in ids] # we use the get_speed method to get the velocities of all vehicles vel = [self.k.vehicle.get_speed(veh_id) for veh_id in ids] # the speeds and positions are concatenated to produce the state return np.concatenate((pos, vel)) ``` ### 1.6 compute_reward 计算奖励值 The `compute_reward` method returns the reward associated with any given state. These value may encompass returns from values within the state space (defined in section 1.5) or may contain information provided by the environment but not immediately available within the state, as is the case in partially observable tasks (or POMDPs). ' compute_reward '方法返回与任何给定状态相关联的奖励。这些值可能包含状态空间(在第1.5节中定义)中的值的返回,或者可能包含环境提供的信息,但是不能立即在状态中使用,就像部分可观察任务(或POMDPs)中的情况一样。 For this tutorial, we choose the reward function to be the average speed of all vehicles currently in the network. In order to extract this information from the environment, we use the `get_speed` method within the Vehicle kernel class to collect the current speed of all vehicles in the network, and return the average of these speeds as the reward. This is done as follows: 在本教程中,我们选择奖励函数作为当前网络中所有车辆的平均速度。为了从环境中提取这些信息,我们在车辆内核类中使用' get_speed '方法来收集网络中所有车辆的当前速度,并返回这些速度的平均值作为奖励。具体做法如下: ``` import numpy as np class myEnv(myEnv): # update my environment class def compute_reward(self, rl_actions, **kwargs): # the get_ids() method is used to get the names of all vehicles in the network ids = self.k.vehicle.get_ids() # we next get a list of the speeds of all vehicles in the network speeds = self.k.vehicle.get_speed(ids) # finally, we return the average of all these speeds as the reward return np.mean(speeds) ``` ## 2. Testing the New Environment 测试新环境 ### 2.1 Testing in Simulation Now that we have successfully created our new environment, we are ready to test this environment in simulation. We begin by running this environment in a non-RL based simulation. The return provided at the end of the simulation is indicative of the cumulative expected reward when jam-like behavior exists within the netowrk. 现在我们已经成功地创建了新的环境,我们准备在模拟中测试这个环境。我们首先在一个非基于rl的模拟中运行这个环境。在模拟结束时提供的回报指示了在netowrk中存在类似于jam的行为时累积的预期回报。 ``` from flow.controllers import IDMController, ContinuousRouter from flow.core.experiment import Experiment from flow.core.params import SumoParams, EnvParams, \ InitialConfig, NetParams from flow.core.params import VehicleParams from flow.networks.ring import RingNetwork, ADDITIONAL_NET_PARAMS sim_params = SumoParams(sim_step=0.1, render=True) vehicles = VehicleParams() vehicles.add(veh_id="idm", acceleration_controller=(IDMController, {}), routing_controller=(ContinuousRouter, {}), num_vehicles=22) env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) additional_net_params = ADDITIONAL_NET_PARAMS.copy() net_params = NetParams(additional_params=additional_net_params) initial_config = InitialConfig(bunching=20) flow_params = dict( exp_tag='ring', env_name=myEnv, # using my new environment for the simulation network=RingNetwork, simulator='traci', sim=sim_params, env=env_params, net=net_params, veh=vehicles, initial=initial_config, ) # number of time steps flow_params['env'].horizon = 1500 exp = Experiment(flow_params) # run the sumo simulation _ = exp.run(1) ``` ### 2.2 Training the New Environment 培训新环境 Next, we wish to train this environment in the presence of the autonomous vehicle agent to reduce the formation of waves in the network, thereby pushing the performance of vehicles in the network past the above expected return. 接下来,我们希望在自主车辆代理存在的情况下训练这种环境,以减少网络中波浪的形成,从而使网络中车辆的性能超过上述预期收益。 The below code block may be used to train the above environment using the Proximal Policy Optimization (PPO) algorithm provided by RLlib. In order to register the environment with OpenAI gym, the environment must first be placed in a separate ".py" file and then imported via the script below. Then, the script immediately below should function regularly. 下面的代码块可以使用RLlib提供的Proximal Policy Optimization (PPO)算法来训练上述环境。为了注册OpenAI健身房的环境,环境必须首先放在一个单独的。py”。然后通过下面的脚本导入。然后,下面的脚本应该正常工作。 ``` ############################################################# ####### Replace this with the environment you created ####### ############################################################# from flow.envs import AccelEnv as myEnv ``` **Note**: We do not recommend training this environment to completion within a jupyter notebook setting; however, once training is complete, visualization of the resulting policy should show that the autonomous vehicle learns to dissipate the formation and propagation of waves in the network. **注**:我们不建议在这种环境下进行的培训是在木星笔记本设置中完成的;然而,一旦训练完成,结果策略的可视化应该表明,自主车辆学会了在网络中消散波的形成和传播。 ``` import json import ray from ray.rllib.agents.registry import get_agent_class from ray.tune import run_experiments from ray.tune.registry import register_env from flow.networks.ring import RingNetwork, ADDITIONAL_NET_PARAMS from flow.utils.registry import make_create_env from flow.utils.rllib import FlowParamsEncoder from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams from flow.core.params import VehicleParams, SumoCarFollowingParams from flow.controllers import RLController, IDMController, ContinuousRouter # time horizon of a single rollout HORIZON = 1500 # number of rollouts per training iteration N_ROLLOUTS = 20 # number of parallel workers N_CPUS = 2 # We place one autonomous vehicle and 22 human-driven vehicles in the network vehicles = VehicleParams() vehicles.add( veh_id="human", acceleration_controller=(IDMController, { "noise": 0.2 }), car_following_params=SumoCarFollowingParams( min_gap=0 ), routing_controller=(ContinuousRouter, {}), num_vehicles=21) vehicles.add( veh_id="rl", acceleration_controller=(RLController, {}), routing_controller=(ContinuousRouter, {}), num_vehicles=1) flow_params = dict( # name of the experiment exp_tag="stabilizing_the_ring", # name of the flow environment the experiment is running on env_name=myEnv, # <------ here we replace the environment with our new environment # name of the network class the experiment is running on network=RingNetwork, # simulator that is used by the experiment simulator='traci', # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( sim_step=0.1, render=True, ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( horizon=HORIZON, warmup_steps=750, clip_actions=False, additional_params={ "target_velocity": 20, "sort_vehicles": False, "max_accel": 1, "max_decel": 1, }, ), # network-related parameters (see flow.core.params.NetParams and the # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( additional_params=ADDITIONAL_NET_PARAMS.copy() ), # vehicles to be placed in the network at the start of a rollout (see # flow.core.params.VehicleParams) veh=vehicles, # parameters specifying the positioning of vehicles upon initialization/ # reset (see flow.core.params.InitialConfig) initial=InitialConfig( bunching=20, ), ) def setup_exps(): """Return the relevant components of an RLlib experiment. Returns ------- str name of the training algorithm str name of the gym environment to be trained dict training configuration parameters """ alg_run = "PPO" agent_cls = get_agent_class(alg_run) config = agent_cls._default_config.copy() config["num_workers"] = N_CPUS config["train_batch_size"] = HORIZON * N_ROLLOUTS config["gamma"] = 0.999 # discount rate config["model"].update({"fcnet_hiddens": [3, 3]}) config["use_gae"] = True config["lambda"] = 0.97 config["kl_target"] = 0.02 config["num_sgd_iter"] = 10 config['clip_actions'] = False # FIXME(ev) temporary ray bug config["horizon"] = HORIZON # save the flow params for replay flow_json = json.dumps( flow_params, cls=FlowParamsEncoder, sort_keys=True, indent=4) config['env_config']['flow_params'] = flow_json config['env_config']['run'] = alg_run create_env, gym_name = make_create_env(params=flow_params, version=0) # Register as rllib env register_env(gym_name, create_env) return alg_run, gym_name, config alg_run, gym_name, config = setup_exps() ray.init(num_cpus=N_CPUS + 1) trials = run_experiments({ flow_params["exp_tag"]: { "run": alg_run, "env": gym_name, "config": { **config }, "checkpoint_freq": 20, "checkpoint_at_end": True, "max_failures": 999, "stop": { "training_iteration": 200, }, } }) ```
github_jupyter
# Description : This is a emotion analysis program that parses the tweets fetched from Twitter using Python ``` # import libraries import tweepy from textblob import TextBlob from wordcloud import WordCloud import pandas as pd import numpy as np import re import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') # Twitter API credentials consumer_key = 'ycEZMmO2frdqYsFQfYSZlEZzy' consumer_secret = 'r9aJEgLmvmPte9HslJoM37RyP5Gay1ZJ3NfSam67wMTPFmq7IY' access_token = '1197336121107050496-7OTvNKMk5Z1v4nwgjJCpzDaJE5gIdc' access_t_secret = 'Wc9IwyuX48jsKLnF2lsn679sZ03g6yXuy2KZthziMLbgP' #create the authentication object authenticate = tweepy.OAuthHandler(consumer_key, consumer_secret) #Set the access tokens authenticate.set_access_token(access_token, access_t_secret) #Create API object while passing in the auth info api = tweepy.API(authenticate, wait_on_rate_limit = True) #Extract 100 tweets from a hashtag hashtag = api.search(q='#messi', lang='en', result_type='recent', count=100) #print the last 5 tweets print('show 5 tweets') i = 1 for tweet in hashtag[:5]: print(str(i) + ') ' + tweet.text + '\n') i += 1 #Create a dataframe df = pd.DataFrame([tweet.text for tweet in hashtag], columns=['Tweets']) #show the first 5 rows of data df.head() #Clean text #create a function to clean the text def clean_text(text): text = re.sub(r'@[A-za-z0-9]+', '', text) # removing @metions text = re.sub(r'#', '', text) # removing the '#' symbol text = re.sub(r'RT[\s]+', '', text) #removing RT's text = re.sub(r'https?:\/\/?', '', text) #removing links return text df['Tweets'] = df['Tweets'].apply(clean_text) #show the clean text df.head() # Create a function to get the subjectivity def get_subjectivity(text): return TextBlob(text).sentiment.subjectivity # Create a function to get the polarity def get_polarity(text): return TextBlob(text).sentiment.polarity # Create the new colums df['Subjectivity'] = df['Tweets'].apply(get_subjectivity) df['Polarity'] = df['Tweets'].apply(get_polarity) # Show the new DataFrame df # Plot the Word Cloid all_words = ' '.join( [twts for twts in df['Tweets']] ) word_cloud = WordCloud(width=500, height=300, random_state= 21, max_font_size=119).generate(all_words) plt.imshow(word_cloud, interpolation= 'bilinear') plt.axis('off') plt.show() # Create a function to compute the negative, neutral and positive analysis def analysis(score): if score < 0: return 'Negative' elif score == 0: return 'Neutral' else: return 'Positive' # Adding new Column df['Analysis'] = df['Polarity'].apply(analysis) # Show DataFrame df # Print all of positive tweets j = 1 sortedDF = df.sort_values(by=['Polarity']) for i in range(0, sortedDF.shape[0]): if(sortedDF['Analysis'][i] == 'Positive'): print(str(j) + ') ' + sortedDF['Tweets'][i]) print() j += 1 # Print all of negative tweets j = 1 sortedDF = df.sort_values(by=['Polarity'], ascending='False') for i in range(0, sortedDF.shape[0]): if(sortedDF['Analysis'][i] == 'Negative'): print(str(j) + ') ' + sortedDF['Tweets'][i]) print() j += 1 # Plot the polarity ans subjectivity plt.figure(figsize=(8,6)) for i in range(0, df.shape[0]): plt.scatter(df['Polarity'][i], df['Subjectivity'][i], color='Blue') plt.title('Sentiment Analysis') plt.xlabel('Polarity') plt.ylabel('Subjectivity') plt.show() # get the percentage of positive tweets ptweet = df[df.Analysis == 'Positive'] ptweet = ptweet['Tweets'] round(ptweet.shape[0] / df.shape[0] * 100, 1) # get the percentage of negative tweets ntweet = df[df.Analysis == 'Negative'] ntweet = ntweet['Tweets'] round(ntweet.shape[0] / df.shape[0] * 100, 1) # Show the value counts df['Analysis'].value_counts() # Plot and visualize the counts plt.title('Sentiment Analysis') plt.xlabel('Sentiment') plt.ylabel('Counts') df['Analysis'].value_counts().plot(kind='bar') plt.show() ```
github_jupyter
``` !git clone https://github.com/parhamzm/Beijing-Pollution-DataSet !ls Beijing-Pollution-DataSet import torch import torchvision import torch.nn as nn from torchvision import transforms import pandas as pd import matplotlib.pyplot as plt import numpy as np from torch.utils.data import random_split from math import sqrt from numpy import concatenate from matplotlib import pyplot from pandas import read_csv from pandas import DataFrame from pandas import concat from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder from sklearn.metrics import mean_squared_error from numpy import array from numpy import hstack ``` # **Data Pre Processing** ``` DATA_DIR = "Beijing-Pollution-DataSet/" from pandas import read_csv from datetime import datetime from random import randint def select_month(sequences, n_samples=250): X, y = list(), list() rand_hour = randint(0, 24) rand_day = randint(0, 7) for i in range(0, n_samples): start_ix = rand_hour + rand_day*24 + 672 * i # 168 : Week hours! idxs = [] for j in range(0, 4): if j <=2: idx = start_ix + (j * 168) # Add different weeks idxs.append(idx) if j == 3: # Target idy = start_ix + (j * 168) seq_x = sequences[idxs, :] seq_y = sequences[idy, 0] y.append(seq_y) X.append(seq_x) return X, y # split a multivariate sequence into samples def split_sequences(sequences, n_steps, n_samples=12000, start_from=0): X, y = list(), list() for i in range(start_from, (start_from + n_samples)): # find the end of this pattern end_ix = i + n_steps # check if we are beyond the dataset # if end_ix > len(sequences): # break # gather input and output parts of the pattern seq_x = sequences[i:end_ix, :] seq_y = sequences[end_ix, 0] y.append(seq_y) X.append(seq_x) return array(X), array(y) # load dataset DATA_DIR = "Beijing-Pollution-DataSet/" data = np.load(DATA_DIR + 'polution_dataSet.npy') scaled_data = data x, y = select_month(data, n_samples=65) print("X shape => ", np.array(x).shape) print("y shape => ", np.array(y).shape) x = np.array(x) y = np.array(y) dataset = data train_X, train_y = x[0:50], y[0:50] #split_sequences(dataset, n_timesteps, n_samples=15000, start_from=0) valid_X, valid_y = x[50:], y[50:] #split_sequences(dataset, n_timesteps, n_samples=3000, start_from=15000) test_loader_X = torch.utils.data.DataLoader(dataset=(train_X), batch_size=20, shuffle=False) # train_X = torch.tensor(train_X, dtype=torch.float32) # train_y = torch.tensor(train_y, dtype=torch.float32) print("Train X Shape :=> ", train_X.shape) print("Train Y Shape :=> ", train_y.shape) print("####################################") # print("Test X Shape :=> ", test_X.shape) # print("Test Y Shape :=> ", test_y.shape) class LSTM(torch.nn.Module): def __init__(self, n_features=8, n_output=1, seq_length=11, n_hidden_layers=233, n_layers=1): super(LSTM, self).__init__() self.n_features = n_features self.seq_len = seq_length self.n_hidden = n_hidden_layers # number of hidden states self.n_layers = n_layers # number of LSTM layers (stacked) self.n_output = n_output self.l_lstm = torch.nn.LSTM(input_size = n_features, hidden_size = self.n_hidden, num_layers = self.n_layers, batch_first = True) # according to pytorch docs LSTM output is # (batch_size, seq_len, num_directions * hidden_size) # when considering batch_first = True self.l_linear = torch.nn.Linear(self.n_hidden * self.seq_len, self.n_output) def forward(self, x): hidden_state = torch.zeros(self.n_layers, x.size(0), self.n_hidden).requires_grad_() cell_state = torch.zeros(self.n_layers, x.size(0), self.n_hidden).requires_grad_() self.hidden = (hidden_state.detach(), cell_state.detach()) batch_size, seq_len, _ = x.size() lstm_out, self.hidden = self.l_lstm(x, self.hidden) # lstm_out(with batch_first = True) is # (batch_size,seq_len,num_directions * hidden_size) # for following linear layer we want to keep batch_size dimension and merge rest # .contiguous() -> solves tensor compatibility error x = lstm_out.contiguous().view(batch_size, -1) # print("X shape :=> ", x.shape) # out = self.l_linear(lstm_out[:, -1, :]) # print("Out Shape :=> ", lstm_out[:, -1, :].shape) out = self.l_linear(x) return out torch.manual_seed(13) model = LSTM(n_features=8, n_output=1, seq_length=3, n_hidden_layers=233, n_layers=1) criterion = nn.L1Loss() optimizer = torch.optim.Adagrad(model.parameters(), lr=0.0003) model = model #.to(device) criterion = criterion #.to(device) for p in model.parameters(): print(p.numel()) import time start_time = time.time() # train_X, train_y epochs = 200 model.train() batch_size = 5 running_loss_history = [] val_running_loss_history = [] for epoch in range(epochs): running_loss = 0.0 val_running_loss = 0.0 model.train() for b in range(0, len(train_X), batch_size): inpt = train_X[b:b+batch_size, :, :] target = train_y[b:b+batch_size] # print("Input Shape :=> ", inpt.shape) x_batch = torch.tensor(inpt, dtype=torch.float32) y_batch = torch.tensor(target, dtype=torch.float32) output = model(x_batch) loss = criterion(output.view(-1), y_batch) running_loss += loss.item() loss.backward() optimizer.step() optimizer.zero_grad() else: with torch.no_grad(): # it will temprerorerly set all the required grad flags to be false model.eval() for b in range(0, len(valid_X), batch_size): inpt = valid_X[b:b+batch_size, :, :] target = valid_y[b:b+batch_size] x_batch_test = torch.tensor(inpt, dtype=torch.float32) y_batch_test = torch.tensor(target, dtype=torch.float32) # model.init_hidden(x_batch_test.size(0)) output_test = model(x_batch_test) loss_test = criterion(output_test.view(-1), y_batch_test) val_running_loss += loss_test.item() val_epoch_loss = val_running_loss / len(valid_X) val_running_loss_history.append(val_epoch_loss) epoch_loss = running_loss / len(valid_X) running_loss_history.append(epoch_loss) print('step : ' , epoch , ' Train loss : ' , epoch_loss, ', Valid Loss : => ', val_epoch_loss) print("***->>>-----------------------------------------------<<<-***") total_time = time.time() - start_time print("===========================================================") print("*********************************************************") print("The total Training Time is Equal with ==> : {0} Sec.".format(total_time)) print("*********************************************************") print("===========================================================") f, ax = plt.subplots(1, 1, figsize=(10, 7)) plt.title("Valid & Test Loss", fontsize=18) plt.xlabel("Epoch") plt.ylabel("Loss") plt.plot(running_loss_history, label='train') plt.plot(val_running_loss_history, label='test') # pyplot.plot(history.history['val_loss'], label='test') plt.legend() plt.show() test_x, test_y = x[50:], y[50:] model.eval() test_x = torch.tensor(test_x, dtype=torch.float32) test_y = torch.tensor(test_y, dtype=torch.float32) res = model(test_x) loss_test = criterion(res.view(-1), test_y) future = 100 window_size = 11 fig = plt.figure(figsize=(20, 7)) plt.title("Beijing Polution Prediction - LSTM", fontsize=18) plt.ylabel('Polution') plt.xlabel('Num data') plt.grid(True) plt.autoscale(axis='x', tight=True) fig.autofmt_xdate() # plt.plot(data[15000:15100, 0]) plt.plot(test_y, label="Real") # plt.plot(preds[12:]) print(res.shape) plt.plot(res.detach().numpy(), label="Prediction") plt.legend() plt.show() test_x, test_y = x[50:], y[50:] model.eval() test_running_loss = 0 with torch.no_grad(): # it will temprerorerly set all the required grad flags to be false model.eval() for b in range(0, len(test_x), batch_size): inpt = test_x[b:b+batch_size, :, :] target = test_y[b:b+batch_size] x_batch_test = torch.tensor(inpt, dtype=torch.float32) y_batch_test = torch.tensor(target, dtype=torch.float32) # model.init_hidden(x_batch_test.size(0)) output_test = model(x_batch_test) loss_test = criterion(output_test.view(-1), y_batch_test) test_running_loss += loss_test.item() test_epoch_loss = test_running_loss / len(test_x) print("##########################################################") print(">>>>---------------------------------------------------<<<<") print(">>>>----------***************************--------------<<<<") print("**** Test Loss :==>>> ", test_epoch_loss) print(">>>>----------***************************--------------<<<<") print(">>>>---------------------------------------------------<<<<") print("##########################################################") ``` # **Predict Only 12 & 24 Times!** ``` # split a multivariate sequence into samples def split_sequences12(sequences, n_steps, n_samples=12000, start_from=0): X, y = list(), list() j = 0 for i in range(start_from, (start_from + n_samples)): # find the end of this pattern end_ix = j*12 + n_steps + start_from # check if we are beyond the dataset # gather input and output parts of the pattern j = j + 1 seq_x = sequences[end_ix-11:end_ix, :] seq_y = sequences[end_ix, 0] y.append(seq_y) X.append(seq_x) print("End :=> ", end_ix) return array(X), array(y) ```
github_jupyter
# Treasure Hunt Game Notebook ## Read and Review Your Starter Code The theme of this project is a popular treasure hunt game in which the player needs to find the treasure before the pirate does. While you will not be developing the entire game, you will write the part of the game that represents the intelligent agent, which is a pirate in this case. The pirate will try to find the optimal path to the treasure using deep Q-learning. You have been provided with two Python classes and this notebook to help you with this assignment. The first class, TreasureMaze.py, represents the environment, which includes a maze object defined as a matrix. The second class, GameExperience.py, stores the episodes – that is, all the states that come in between the initial state and the terminal state. This is later used by the agent for learning by experience, called "exploration". This notebook shows how to play a game. Your task is to complete the deep Q-learning implementation for which a skeleton implementation has been provided. The code blocs you will need to complete has #TODO as a header. First, read and review the next few code and instruction blocks to understand the code that you have been given. ``` from __future__ import print_function import os, sys, time, datetime, json, random import numpy as np from keras.models import Sequential from keras.layers.core import Dense, Activation from keras.optimizers import SGD , Adam, RMSprop from keras.layers.advanced_activations import PReLU import matplotlib.pyplot as plt from TreasureMaze import TreasureMaze from GameExperience import GameExperience %matplotlib inline ``` The following code block contains an 8x8 matrix that will be used as a maze object: ``` maze = np.array([ [ 1., 0., 1., 1., 1., 1., 1., 1.], [ 1., 0., 1., 1., 1., 0., 1., 1.], [ 1., 1., 1., 1., 0., 1., 0., 1.], [ 1., 1., 1., 0., 1., 1., 1., 1.], [ 1., 1., 0., 1., 1., 1., 1., 1.], [ 1., 1., 1., 0., 1., 0., 0., 0.], [ 1., 1., 1., 0., 1., 1., 1., 1.], [ 1., 1., 1., 1., 0., 1., 1., 1.] ]) ``` This helper function allows a visual representation of the maze object: ``` def show(qmaze): plt.grid('on') nrows, ncols = qmaze.maze.shape ax = plt.gca() ax.set_xticks(np.arange(0.5, nrows, 1)) ax.set_yticks(np.arange(0.5, ncols, 1)) ax.set_xticklabels([]) ax.set_yticklabels([]) canvas = np.copy(qmaze.maze) for row,col in qmaze.visited: canvas[row,col] = 0.6 pirate_row, pirate_col, _ = qmaze.state canvas[pirate_row, pirate_col] = 0.3 # pirate cell canvas[nrows-1, ncols-1] = 0.9 # treasure cell img = plt.imshow(canvas, interpolation='none', cmap='gray') return img ``` The pirate agent can move in four directions: left, right, up, and down. While the agent primarily learns by experience through exploitation, often, the agent can choose to explore the environment to find previously undiscovered paths. This is called "exploration" and is defined by epsilon. This value is typically a lower value such as 0.1, which means for every ten attempts, the agent will attempt to learn by experience nine times and will randomly explore a new path one time. You are encouraged to try various values for the exploration factor and see how the algorithm performs. ``` LEFT = 0 UP = 1 RIGHT = 2 DOWN = 3 # Exploration factor epsilon = 0.1 # Actions dictionary actions_dict = { LEFT: 'left', UP: 'up', RIGHT: 'right', DOWN: 'down', } num_actions = len(actions_dict) ``` The sample code block and output below show creating a maze object and performing one action (DOWN), which returns the reward. The resulting updated environment is visualized. ``` qmaze = TreasureMaze(maze) canvas, reward, game_over = qmaze.act(DOWN) print("reward=", reward) show(qmaze) ``` This function simulates a full game based on the provided trained model. The other parameters include the TreasureMaze object and the starting position of the pirate. ``` def play_game(model, qmaze, pirate_cell): qmaze.reset(pirate_cell) envstate = qmaze.observe() while True: prev_envstate = envstate # get next action q = model.predict(prev_envstate) action = np.argmax(q[0]) # apply action, get rewards and new state envstate, reward, game_status = qmaze.act(action) if game_status == 'win': return True elif game_status == 'lose': return False ``` This function helps you to determine whether the pirate can win any game at all. If your maze is not well designed, the pirate may not win any game at all. In this case, your training would not yield any result. The provided maze in this notebook ensures that there is a path to win and you can run this method to check. ``` def completion_check(model, qmaze): for cell in qmaze.free_cells: if not qmaze.valid_actions(cell): return False if not play_game(model, qmaze, cell): return False return True ``` The code you have been given in this block will build the neural network model. Review the code and note the number of layers, as well as the activation, optimizer, and loss functions that are used to train the model. ``` def build_model(maze): model = Sequential() model.add(Dense(maze.size, input_shape=(maze.size,))) model.add(PReLU()) model.add(Dense(maze.size)) model.add(PReLU()) model.add(Dense(num_actions)) model.compile(optimizer='adam', loss='mse') return model ``` # #TODO: Complete the Q-Training Algorithm Code Block This is your deep Q-learning implementation. The goal of your deep Q-learning implementation is to find the best possible navigation sequence that results in reaching the treasure cell while maximizing the reward. In your implementation, you need to determine the optimal number of epochs to achieve a 100% win rate. You will need to complete the section starting with #pseudocode. The pseudocode has been included for you. ``` def qtrain(model, maze, **opt): # exploration factor global epsilon # number of epochs n_epoch = opt.get('n_epoch', 15000) # maximum memory to store episodes max_memory = opt.get('max_memory', 1000) # maximum data size for training data_size = opt.get('data_size', 50) # start time start_time = datetime.datetime.now() # Construct environment/game from numpy array: maze (see above) qmaze = TreasureMaze(maze) # Initialize experience replay object experience = GameExperience(model, max_memory=max_memory) win_history = [] # history of win/lose game hsize = qmaze.maze.size//2 # history window size win_rate = 0.0 # Training Code # Epoch 'for' code: for i in range (n_epoch): Agent_cell = random.choice(qmaze.free_cells) qmaze.reset(Agent_cell) envstate = qmaze.observe # State declaration State = 'not game over' # While loop for 'not game over' while State == 'not game over': previous_envstate = envstate q = model.predict(previous_envstate) action = random.choice(actions_dict) envstate, reward, game_status = qmaze.act(action) actionInt = list(actions_dict.keys()) [list(actions_dict.values()).index(action)] episode = [previous_envstate, actionInt, reward, envstate, game_status] # Store the episode in Experience replay Object experience.remember(episode) # Call GameExperience.get_data to retrieve training data (input and target) inputs, targets = experience.get_data() # Pass to model.fit method to train the model model.fit(inputs, targets) # Evaluated loss with model.evaluate win_rate = model.evaluate(inputs, targets) print(win_rate) print(State) # If the win rate is above the threshold and your model passes the completion check, that would be your epoch. if win_rate > 0.9 and completion_check(model, qmaze): epoch = i print(i) #Print the epoch, loss, episodes, win count, and win rate for each epoch dt = datetime.datetime.now() - start_time t = format_time(dt.total_seconds()) template = "Epoch: {:03d}/{:d} | Loss: {:.4f} | Episodes: {:d} | Win count: {:d} | Win rate: {:.3f} | time: {}" print(template.format(epoch, n_epoch-1, loss, n_episodes, sum(win_history), win_rate, t)) # We simply check if training has exhausted all free cells and if in all # cases the agent won. if win_rate > 0.9 : epsilon = 0.05 if sum(win_history[-hsize:]) == hsize and completion_check(model, qmaze): print("Reached 100%% win rate at epoch: %d" % (epoch,)) break # Determine the total time for training dt = datetime.datetime.now() - start_time seconds = dt.total_seconds() t = format_time(seconds) print("n_epoch: %d, max_mem: %d, data: %d, time: %s" % (epoch, max_memory, data_size, t)) return seconds # This is a small utility for printing readable time strings: def format_time(seconds): if seconds < 400: s = float(seconds) return "%.1f seconds" % (s,) elif seconds < 4000: m = seconds / 60.0 return "%.2f minutes" % (m,) else: h = seconds / 3600.0 return "%.2f hours" % (h,) ``` ## Test Your Model Now we will start testing the deep Q-learning implementation. To begin, select **Cell**, then **Run All** from the menu bar. This will run your notebook. As it runs, you should see output begin to appear beneath the next few cells. The code below creates an instance of TreasureMaze. ``` qmaze = TreasureMaze(maze) show(qmaze) ``` In the next code block, you will build your model and train it using deep Q-learning. Note: This step takes several minutes to fully run. ``` model = build_model(maze) qtrain(model, maze, epochs=1000, max_memory=8*maze.size, data_size=32) ``` This cell will check to see if the model passes the completion check. Note: This could take several minutes. ``` completion_check(model, qmaze) show(qmaze) ``` This cell will test your model for one game. It will start the pirate at the top-left corner and run play_game. The agent should find a path from the starting position to the target (treasure). The treasure is located in the bottom-right corner. ``` pirate_start = (0, 0) play_game(model, qmaze, pirate_start) show(qmaze) ``` ## Save and Submit Your Work After you have finished creating the code for your notebook, save your work. Make sure that your notebook contains your name in the filename (e.g. Doe_Jane_ProjectTwo.ipynb). This will help your instructor access and grade your work easily. Download a copy of your IPYNB file and submit it to Brightspace. Refer to the Jupyter Notebook in Apporto Tutorial if you need help with these tasks.
github_jupyter
``` from google.colab import drive drive.mount('/content/drive') cd drive/My Drive/google_colab_gpu/GSOC 2020/CERN-HSF ls #import cv2 import numpy as np import pandas as pd #from google.colab.patches import cv2_imshow import h5py #import numpy as np #import matplotlib.pyplot as plt import pandas as pd import warnings warnings.filterwarnings('ignore') #import cv2 #from keras.datasets import mnist #from keras.utils import np_utils #from keras.models import Sequential,load_model #from sklearn.model_selection import train_test_split import torch.nn #import torchvision.datasets as dsets #import torchvision.transforms as transforms #from torch.autograd import Variable filename='SingleElectronPt50_IMGCROPS_n249k_RHv1.hdf5' data1 = h5py.File(filename, 'r') Y1=data1['y'] X1=data1['X'] filename='SinglePhotonPt50_IMGCROPS_n249k_RHv1.hdf5' data0 = h5py.File(filename, 'r') Y0=data0['y'] X0=data0['X'] X_final=np.concatenate((X0[:],X1[:]),axis=0) Y_final=np.concatenate((Y0[:],Y1[:]),axis=0) X_train,X_valid, Y_train, Y_valid = train_test_split(X_final,Y_final,test_size = 0.2, random_state = 42) print(X_train.shape,Y_train.shape) print(X_valid.shape,Y_valid.shape) X_train0=(X_train[:,:,:,0].reshape((X_train.shape[0],1,X_train.shape[1],X_train.shape[2]))) X_valid0=(X_valid[:,:,:,0].reshape((X_valid.shape[0],1,X_valid.shape[1],X_valid.shape[2]))) X_train1=(X_train[:,:,:,1].reshape((X_train.shape[0],1,X_train.shape[1],X_train.shape[2]))) X_valid1=(X_valid[:,:,:,1].reshape((X_valid.shape[0],1,X_valid.shape[1],X_valid.shape[2]))) X_train.shape,X_valid.shape,X_train0.shape,X_valid0.shape,X_train1.shape,X_valid1.shape X_train0, Y_train, X_valid0, Y_valid, X_train1, X_valid1 = map(torch.tensor, (X_train0, Y_train_tp, X_valid0, Y_valid_tp, X_train1, X_valid1)) import torch from torch.utils import data class Dataset(data.Dataset): 'Characterizes a dataset for PyTorch' def __init__(self, inputs, labels): 'Initialization' self.labels = labels self.inputs = inputs def __len__(self): 'Denotes the total number of samples' return len(self.inputs) def __getitem__(self, index): 'Generates one sample of data' # Get data and get label X = self.inputs[index] y = self.labels[index] return X, y train_loader = torch.utils.data.DataLoader(dataset={'input':X_train0,'output':Y_train}, batch_size=1024, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset={'input':X_valid0}, batch_size=1024, shuffle=False) window_height=32 window_width=32 import torch.nn as nn import torch.nn.functional as F from torchsummary import summary class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3, stride=1, padding=1) #self.relu1 = nn.Relu() self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1) #self.relu2 = nn.Relu() self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(32*8*8, 128) #self.relu3 = nn.Relu() self.fc2 = nn.Linear(128, 64) #self.relu4 = nn.Relu() self.fc3 = nn.Linear(64, 1) #self.sigmoid1 = nn.Sigmoid() def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, x.shape[1]*x.shape[2]*x.shape[3]) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.sigmoid(self.fc3(x)) return x net = Net() net.cuda() summary(net, (1, 32, 32), device='cuda') use_cuda = True if use_cuda and torch.cuda.is_available(): net.cuda() batch_size=1024 import torch from torch.utils import data #import cudnn #from my_classes import Dataset # CUDA for PyTorch use_cuda = torch.cuda.is_available() device = torch.device("cuda:0" if use_cuda else "cpu") #cudnn.benchmark = True # Parameters params = {'batch_size': batch_size, 'shuffle': True, } max_epochs = 100 # Generators training_set = Dataset(X_train0, Y_train) training_generator = data.DataLoader(training_set, **params) validation_set = Dataset(X_valid0, Y_valid) validation_generator = data.DataLoader(validation_set, **params) import torch.optim as optim criterion = nn.BCELoss() optimizer = optim.Adam(net.parameters(), lr=0.001) num_epochs=10 correct=0 total=0 for epoch in range(num_epochs): for i, (inputs, labels) in enumerate(training_generator): # Load a batch of images with its (index, data, class) #images = Variable(images.view(-1, 28*28)) # Convert torch tensor to Variable: change image from a vector of size 784 to a matrix of 28 x 28 #labels = Variable(labels) if use_cuda and torch.cuda.is_available(): inputs = inputs.cuda() labels = labels.cuda() optimizer.zero_grad() # Intialize the hidden weight to all zeros outputs = net(inputs) # Forward pass: compute the output class given a image loss = criterion(outputs, labels) # Compute the loss: difference between the output class and the pre-given label loss.backward() # Backward pass: compute the weight optimizer.step() # Optimizer: update the weights of hidden nodes outputs[outputs>0.5]=1 outputs[outputs<0.5]=0 total += labels.shape[0] # Increment the total count correct += (outputs == labels).sum() if (i+1) % 100 == 0: # Logging print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Accuracy: %.4f' %(epoch+1, num_epochs, i+1, len(X_train0)//batch_size, loss.item(), ((100 * correct.item() / total)))) #outputs = net(X_valid0.cuda()) # Forward pass: compute the output class given a image #loss = criterion(outputs, Y_valid.cuda()) correct = 0 total = 0 for inputs, labels in validation_generator: #inputs = Variable(images.view(-1, 28*28)) if use_cuda and torch.cuda.is_available(): inputs = inputs.cuda() labels = labels.cuda() outputs = net(inputs) outputs[outputs>0.5]=1 outputs[outputs<0.5]=0 total += labels.shape[0] # Increment the total count correct += (outputs == labels).sum() # Increment the correct count print('Validation Accuracy of the network on the ' + str(total)+' test images: %.4f' % ((100 * correct.item() / total))+' correct: '+str((correct).item())) torch.save(net.state_dict(), 'pytorch1.pt') net = Net() net.load_state_dict(torch.load('pytorch1.pt')) if use_cuda and torch.cuda.is_available(): net.cuda() net.eval() ```
github_jupyter
# Introdcution This trial describes how to create edge and screw dislocations in iron BCC strating with one unitcell containing two atoms ## Background The elastic solution for displacement field of dislocations is provided in the paper [Dislocation Displacement Fields in Anisotropic Media](https://doi.org/10.1063/1.1657954). ## Theoritical The [paper](https://doi.org/10.1063/1.1657954) mentioned in backgroud subsection deals with only one dislocation. Here we describe how to extend the solution to periodic array of dislocations. Since we are dealing with linear elasticity we can superpose (sum up) the displacement field of all the individual dislocations. Looking at the Eqs. (2-8) of abovementioned reference this boils done to finding a closed form soloution for $$\sum_{m=-\infty}^{\infty} \log\left(z-ma \right).$$ Where $z= x+yi$ and $a$ is a real number, equivakent to $\mathbf{H}_{00}$ that defines the periodicity of dislocations on x direction. Let us simplify the problem a bit further. Since this is the component displacement field we can add or subtract constant term so for each $\log\left(z-ma \right)$ we subtract a factor of $log\left(a \right)$, leading to $$\sum_{m=-\infty}^{\infty} \log\left(\frac{z}{a}-m \right).$$ Lets change $z/a$ to $z$ and when we arrive the solution we will change ot back $$\sum_{m=-\infty}^{\infty} \log\left(z-m \right).$$ Objective is to find a closed form solution for $$f\left(z\right)=\sum_{m=-\infty}^{\infty} \log\left(z-m \right).$$ First note that $$ f'\left(z\right)=\frac{1}{z}+\sum_{m=1}^{\infty}\frac{1}{z-m}+\frac{1}{z+m}, $$ and also $$ \frac{1}{z\mp m}=\mp \frac{1}{m}\sum_{n=0}^{\infty} \left(\pm \frac{z}{m}\right)^n. $$ This leads to $$ \frac{1}{z-m}+\frac{1}{z+m}=-\frac{2}{z}\sum_{n=1}^{\infty}\left(\frac{z}{m}\right)^{2n}, $$ and subsequently $$ f'\left(z\right)=\frac{1}{z}-\frac{2}{z}\sum_{n=1}^{\infty}\left(z\right)^{2n}\sum_{m=1}^{\infty}m^{-2n}, $$ $$ =\frac{1}{z}-\frac{2}{z}\sum_{n=1}^{\infty}\left(z\right)^{2n}\zeta\left(2n\right). $$ Where $\zeta$ is Riemann zeta function. Since $\zeta\left(0\right)=-1/2$, it simplifies to: $$ f'\left(z\right)=-\frac{2}{z}\sum_{n=0}^{\infty}\left(z\right)^{2n}\zeta\left(2n\right) $$ Note that $$ -\frac{\pi z\cot\left(\pi z\right)}{2}=\sum_{n=0}^{\infty}z^{2n} \zeta\left(2n\right) $$ I have no idea how I figured this out but it is true. Therefore, $$ f'\left(z\right)=\pi\cot\left(\pi z\right). $$ At this point one can naively assume that the problem is solved (like I did) and the answer is something like: $$ f\left(z\right)=\log\left[\sin\left(\pi z\right)\right]+C, $$ Where $C$ is a constant. However, after checking this against numerical vlaues you will see that this is completely wrong. The issue here is that startegy was wrong at the very begining. The sum of the displacelment of infinte dislocations will not converge since we have infinite discountinuity in displacement field. In other words they do not cancel each other they feed each other. But there is still a way to salvage this. Luckily, displacement is relative quantity and we are dealing with crystals. We can easily add a discontinuity in form an integer number burger vectors to a displacement field and nothing will be affected. So here is the trick: We will focus only on the displacement field of one unit cell dislocation (number 0). At each iteration we add two dislocation to its left and right. At $n$th iterations we add a discontinuity of the form $$ -\mathrm{Sign}\left[\mathrm{Im}\left(z\right)\right] \pi i $$ and a constant of the form: $$ -2\log n. $$ In other words and we need to evaluate: $$ \lim_{m\to\infty}\sum_{n=-m}^{m} \biggl\{ \log\left(z-n\right) -\mathrm{Sign}\left[\mathrm{Im}\left(z\right)\right] \pi i -2\log\left(n \right) \biggr\} + \pi, $$ which simplifies to $$ \lim_{m\to\infty}\sum_{n=-m}^{m}\log\left(z-n\right) -\mathrm{Sign}\left[\mathrm{Im}\left(z\right)\right] m \pi i -2\log\left(\frac{m\!!}{\sqrt{\pi}} \right) $$ Note that we added an extra $\pi$ to displacement field for aesthetic reasons. After a lot of manipulations and tricks (meaning I dont't remember how I got here) we arrive at the following relation: $$ \lim_{m\to\infty}\sum_{n=-m}^{m}\log\left(z-n\right) -\mathrm{Sign}\left[\mathrm{Im}\left(z\right)\right] m \pi i -2\log\left(\frac{m\!!}{\sqrt{\pi}} \right)=\log\left[\sin\left(\pi z\right)\right] $$ However, this is only valid when $$-1/2 \le\mathrm{Re}\left(z\right)\lt 1/2.$$ If one exceeds this domain the answer is: $$ \boxed{ \log\left[\sin\left(\pi z\right)\right]-\mathrm{Sign}\left[\mathrm{Im}\left(z\right)\right]\left \lceil{\mathrm{Re}\left(\frac{z}{2}\right)}-\frac{3}{4}\right \rceil 2 \pi i } $$ Where $\lceil . \rceil$ is the cieling function. Of course there is probably a nicer form. Feel free to derive it ## Final formulation To account for peridicity of dislocations in $x$ direction, the expression $\log\left(z\right)$ in Eqs(2-7) of the [paper](https://doi.org/10.1063/1.1657954), it should be replaced by: $$\lim_{m\to\infty}\sum_{n=-m}^{m}\log\left(z-na\right) -\mathrm{Sign}\left[\mathrm{Im}\left(z\right)\right] m \pi i -2\log\left(\frac{m\,\,\!!}{\sqrt{\pi}} \right),$$ which has the closed form: $$ \boxed{ \log\left[\sin\left(\pi\frac{z}{a}\right)\right]-\mathrm{Sign}\left[\mathrm{Im}\left(\frac{z}{a}\right)\right]\left \lceil{\mathrm{Re}\left(\frac{z}{2a}\right)}-\frac{3}{4}\right \rceil 2 \pi i. } $$ # Preperation ## Import packages ``` import numpy as np import matplotlib.pyplot as plt import mapp4py from mapp4py import md from lib.elasticity import rot, cubic, resize, displace, HirthEdge, HirthScrew ``` ## Block the output of all cores except for one ``` from mapp4py import mpi if mpi().rank!=0: with open(os.devnull, 'w') as f: sys.stdout = f; ``` ## Define an `md.export_cfg` object `md.export_cfg` has a call method that we can use to create quick snapshots of our simulation box ``` xprt = md.export_cfg(""); ``` # Screw dislocation ``` sim=md.atoms.import_cfg('configs/Fe_300K.cfg'); nlyrs_fxd=2 a=sim.H[0][0]; b_norm=0.5*a*np.sqrt(3.0); b=np.array([1.0,1.0,1.0]) s=np.array([1.0,-1.0,0.0])/np.sqrt(2.0) ``` ## Create a $\langle110\rangle\times\langle112\rangle\times\frac{1}{2}\langle111\rangle$ cell ### create a $\langle110\rangle\times\langle112\rangle\times\langle111\rangle$ cell Since `mapp4py.md.atoms.cell_chenge()` only accepts integer values start by creating a $\langle110\rangle\times\langle112\rangle\times\langle111\rangle$ cell ``` sim.cell_change([[1,-1,0],[1,1,-2],[1,1,1]]) ``` ### Remove half of the atoms and readjust the position of remaining Now one needs to cut the cell in half in $[111]$ direction. We can achive this in three steps: 1. Remove the atoms that are above located above $\frac{1}{2}[111]$ 2. Double the position of the remiaing atoms in the said direction 3. Shrink the box affinly to half on that direction ``` H=np.array(sim.H); def _(x): if x[2] > 0.5*H[2, 2] - 1.0e-8: return False; else: x[2]*=2.0; sim.do(_); _ = np.full((3,3), 0.0) _[2, 2] = - 0.5 sim.strain(_) ``` ### Readjust the postions ``` displace(sim,np.array([sim.H[0][0]/6.0,sim.H[1][1]/6.0,0.0])) ``` ## Replicating the unit cell ``` max_natms=100000 H=np.array(sim.H); n_per_area=sim.natms/(H[0,0] * H[1,1]); _ =np.sqrt(max_natms/n_per_area); N0 = np.array([ np.around(_ / sim.H[0][0]), np.around(_ / sim.H[1][1]), 1], dtype=np.int32) sim *= N0; H = np.array(sim.H); H_new = np.array(sim.H); H_new[1][1] += 50.0 resize(sim, H_new, np.full((3),0.5) @ H) C_Fe=cubic(1.3967587463636366,0.787341583191591,0.609615090769241); Q=np.array([np.cross(s,b)/np.linalg.norm(np.cross(s,b)),s/np.linalg.norm(s),b/np.linalg.norm(b)]) hirth = HirthScrew(rot(C_Fe,Q), rot(b*0.5*a,Q)) ctr = np.full((3),0.5) @ H_new; s_fxd=0.5-0.5*float(nlyrs_fxd)/float(N0[1]) def _(x,x_d,x_dof): sy=(x[1]-ctr[1])/H[1, 1]; x0=(x-ctr)/H[0, 0]; if sy>s_fxd or sy<=-s_fxd: x_dof[1]=x_dof[2]=False; x+=b_norm*hirth.ave_disp(x0) else: x+=b_norm*hirth.disp(x0) sim.do(_) H = np.array(sim.H); H_inv = np.array(sim.B); H_new = np.array(sim.H); H_new[0,0]=np.sqrt(H[0,0]**2+(0.5*b_norm)**2) H_new[2,0]=H[2,2]*0.5*b_norm/H_new[0,0] H_new[2,2]=np.sqrt(H[2,2]**2-H_new[2,0]**2) F = np.transpose(H_inv @ H_new); sim.strain(F - np.identity(3)) xprt(sim, "dumps/screw.cfg") ``` ## putting it all together ``` def make_scrw(nlyrs_fxd,nlyrs_vel,vel): #this is for 0K #c_Fe=cubic(1.5187249951755375,0.9053185628093443,0.7249256807942608); #this is for 300K c_Fe=cubic(1.3967587463636366,0.787341583191591,0.609615090769241); #N0=np.array([80,46,5],dtype=np.int32) sim=md.atoms.import_cfg('configs/Fe_300K.cfg'); a=sim.H[0][0]; b_norm=0.5*a*np.sqrt(3.0); b=np.array([1.0,1.0,1.0]) s=np.array([1.0,-1.0,0.0])/np.sqrt(2.0) Q=np.array([np.cross(s,b)/np.linalg.norm(np.cross(s,b)),s/np.linalg.norm(s),b/np.linalg.norm(b)]) c0=rot(c_Fe,Q) hirth = HirthScrew(rot(c_Fe,Q),np.dot(Q,b)*0.5*a) sim.cell_change([[1,-1,0],[1,1,-2],[1,1,1]]) displace(sim,np.array([sim.H[0][0]/6.0,sim.H[1][1]/6.0,0.0])) max_natms=1000000 n_per_vol=sim.natms/sim.vol; _=np.power(max_natms/n_per_vol,1.0/3.0); N1=np.full((3),0,dtype=np.int32); for i in range(0,3): N1[i]=int(np.around(_/sim.H[i][i])); N0=np.array([N1[0],N1[1],1],dtype=np.int32); sim*=N0; sim.kB=8.617330350e-5 sim.create_temp(300.0,8569643); H=np.array(sim.H); H_new=np.array(sim.H); H_new[1][1]+=50.0 resize(sim, H_new, np.full((3),0.5) @ H) ctr=np.dot(np.full((3),0.5),H_new); s_fxd=0.5-0.5*float(nlyrs_fxd)/float(N0[1]) s_vel=0.5-0.5*float(nlyrs_vel)/float(N0[1]) def _(x,x_d,x_dof): sy=(x[1]-ctr[1])/H[1][1]; x0=(x-ctr)/H[0][0]; if sy>s_fxd or sy<=-s_fxd: x_d[1]=0.0; x_dof[1]=x_dof[2]=False; x+=b_norm*hirth.ave_disp(x0) else: x+=b_norm*hirth.disp(x0) if sy<=-s_vel or sy>s_vel: x_d[2]=2.0*sy*vel; sim.do(_) H = np.array(sim.H); H_inv = np.array(sim.B); H_new = np.array(sim.H); H_new[0,0]=np.sqrt(H[0,0]**2+(0.5*b_norm)**2) H_new[2,0]=H[2,2]*0.5*b_norm/H_new[0,0] H_new[2,2]=np.sqrt(H[2,2]**2-H_new[2,0]**2) F = np.transpose(H_inv @ H_new); sim.strain(F - np.identity(3)) return N1[2],sim; ``` # Edge dislocation ``` sim=md.atoms.import_cfg('configs/Fe_300K.cfg'); nlyrs_fxd=2 a=sim.H[0][0]; b_norm=0.5*a*np.sqrt(3.0); b=np.array([1.0,1.0,1.0]) s=np.array([1.0,-1.0,0.0])/np.sqrt(2.0) sim.cell_change([[1,1,1],[1,-1,0],[1,1,-2]]) H=np.array(sim.H); def _(x): if x[0] > 0.5*H[0, 0] - 1.0e-8: return False; else: x[0]*=2.0; sim.do(_); _ = np.full((3,3), 0.0) _[0,0] = - 0.5 sim.strain(_) displace(sim,np.array([0.0,sim.H[1][1]/4.0,0.0])) max_natms=100000 H=np.array(sim.H); n_per_area=sim.natms/(H[0, 0] * H[1, 1]); _ =np.sqrt(max_natms/n_per_area); N0 = np.array([ np.around(_ / sim.H[0, 0]), np.around(_ / sim.H[1, 1]), 1], dtype=np.int32) sim *= N0; # remove one layer along ... direction H=np.array(sim.H); frac=H[0,0] /N0[0] def _(x): if x[0] < H[0, 0] /N0[0] and x[1] >0.5*H[1, 1]: return False; sim.do(_) H = np.array(sim.H); H_new = np.array(sim.H); H_new[1][1] += 50.0 resize(sim, H_new, np.full((3),0.5) @ H) C_Fe=cubic(1.3967587463636366,0.787341583191591,0.609615090769241); _ = np.cross(b,s) Q = np.array([b/np.linalg.norm(b), s/np.linalg.norm(s), _/np.linalg.norm(_)]) hirth = HirthEdge(rot(C_Fe,Q), rot(b*0.5*a,Q)) _ = (1.0+0.5*(N0[0]-1.0))/N0[0]; ctr = np.array([_,0.5,0.5]) @ H_new; frac = H[0][0]/N0[0] s_fxd=0.5-0.5*float(nlyrs_fxd)/float(N0[1]) def _(x,x_d,x_dof): sy=(x[1]-ctr[1])/H[1, 1]; x0=(x-ctr); if(x0[1]>0.0): x0/=(H[0, 0]-frac) else: x0/= H[0, 0] if sy>s_fxd or sy<=-s_fxd: x+=b_norm*hirth.ave_disp(x0); x_dof[0]=x_dof[1]=False; else: x+=b_norm*hirth.disp(x0); x[0]-=0.25*b_norm; sim.do(_) H = np.array(sim.H) H_new = np.array(sim.H); H_new[0, 0] -= 0.5*b_norm; resize(sim, H_new, np.full((3),0.5) @ H) xprt(sim, "dumps/edge.cfg") ``` ## putting it all together ``` def make_edge(nlyrs_fxd,nlyrs_vel,vel): #this is for 0K #c_Fe=cubic(1.5187249951755375,0.9053185628093443,0.7249256807942608); #this is for 300K c_Fe=cubic(1.3967587463636366,0.787341583191591,0.609615090769241); #N0=np.array([80,46,5],dtype=np.int32) sim=md.atoms.import_cfg('configs/Fe_300K.cfg'); a=sim.H[0][0]; b_norm=0.5*a*np.sqrt(3.0); b=np.array([1.0,1.0,1.0]) s=np.array([1.0,-1.0,0.0])/np.sqrt(2.0) # create rotation matrix _ = np.cross(b,s) Q=np.array([b/np.linalg.norm(b), s/np.linalg.norm(s), _/np.linalg.norm(_)]) hirth = HirthEdge(rot(c_Fe,Q),np.dot(Q,b)*0.5*a) # create a unit cell sim.cell_change([[1,1,1],[1,-1,0],[1,1,-2]]) H=np.array(sim.H); def f0(x): if x[0]>0.5*H[0][0]-1.0e-8: return False; else: x[0]*=2.0; sim.do(f0); _ = np.full((3,3), 0.0) _[0,0] = - 0.5 sim.strain(_) displace(sim,np.array([0.0,sim.H[1][1]/4.0,0.0])) max_natms=1000000 n_per_vol=sim.natms/sim.vol; _=np.power(max_natms/n_per_vol,1.0/3.0); N1=np.full((3),0,dtype=np.int32); for i in range(0,3): N1[i]=int(np.around(_/sim.H[i][i])); N0=np.array([N1[0],N1[1],1],dtype=np.int32); N0[0]+=1; sim*=N0; # remove one layer along ... direction H=np.array(sim.H); frac=H[0][0]/N0[0] def _(x): if x[0] < H[0][0]/N0[0] and x[1]>0.5*H[1][1]: return False; sim.do(_) sim.kB=8.617330350e-5 sim.create_temp(300.0,8569643); H = np.array(sim.H); H_new = np.array(sim.H); H_new[1][1] += 50.0 ctr=np.dot(np.full((3),0.5),H); resize(sim,H_new, np.full((3),0.5) @ H) l=(1.0+0.5*(N0[0]-1.0))/N0[0]; ctr=np.dot(np.array([l,0.5,0.5]),H_new); frac=H[0][0]/N0[0] s_fxd=0.5-0.5*float(nlyrs_fxd)/float(N0[1]) s_vel=0.5-0.5*float(nlyrs_vel)/float(N0[1]) def f(x,x_d,x_dof): sy=(x[1]-ctr[1])/H[1][1]; x0=(x-ctr); if(x0[1]>0.0): x0/=(H[0][0]-frac) else: x0/= H[0][0] if sy>s_fxd or sy<=-s_fxd: x_d[1]=0.0; x_dof[0]=x_dof[1]=False; x+=b_norm*hirth.ave_disp(x0); else: x+=b_norm*hirth.disp(x0); if sy<=-s_vel or sy>s_vel: x_d[0]=2.0*sy*vel; x[0]-=0.25*b_norm; sim.do(f) H = np.array(sim.H) H_new = np.array(sim.H); H_new[0, 0] -= 0.5*b_norm; resize(sim, H_new, np.full((3),0.5) @ H) return N1[2], sim; nlyrs_fxd=2 nlyrs_vel=7; vel=-0.004; N,sim=make_edge(nlyrs_fxd,nlyrs_vel,vel) xprt(sim, "dumps/edge.cfg") _ = np.array([[-1,1,0],[1,1,1],[1,1,-2]], dtype=np.float); Q = np.linalg.inv(np.sqrt(_ @ _.T)) @ _; C = rot(cubic(1.3967587463636366,0.787341583191591,0.609615090769241),Q) B = np.linalg.inv( np.array([ [C[0, 0, 0, 0], C[0, 0, 1, 1], C[0, 0, 0, 1]], [C[0, 0, 1, 1], C[1, 1, 1, 1], C[1, 1, 0, 1]], [C[0, 0, 0, 1], C[1, 1, 0, 1], C[0, 1, 0, 1]] ] )) _ = np.roots([B[0, 0], -2.0*B[0, 2],2.0*B[0, 1]+B[2, 2], -2.0*B[1, 2], B[1, 1]]) mu = np.array([_[0],0.0]); if np.absolute(np.conjugate(mu[0]) - _[1]) > 1.0e-12: mu[1] = _[1]; else: mu[1] = _[2] alpha = np.real(mu); beta = np.imag(mu); p = B[0,0] * mu**2 - B[0,2] * mu + B[0, 1] q = B[0,1] * mu - B[0, 2] + B[1, 1]/ mu K = np.stack([p, q]) * np.array(mu[1], mu[0]) /(mu[1] - mu[0]) K_r = np.real(K) K_i = np.imag(K) Tr = np.stack([ np.array(np.array([[1.0, alpha[0]], [0.0, beta[0]]])), np.array([[1.0, alpha[1]], [0.0, beta[1]]]) ], axis=1) def u_f0(x): return np.sqrt(np.sqrt(x[0] * x[0] + x[1] * x[1]) + x[0]) def u_f1(x): return np.sqrt(np.sqrt(x[0] * x[0] + x[1] * x[1]) - x[0]) * np.sign(x[1]) def disp(x): _ = Tr @ x return K_r @ u_f0(_) + K_i @ u_f1(_) ``` ## Putting it all together ``` _ = np.array([[-1,1,0],[1,1,1],[1,1,-2]], dtype=np.float); Q = np.linalg.inv(np.sqrt(_ @ _.T)) @ _; C = rot(cubic(1.3967587463636366,0.787341583191591,0.609615090769241),Q) disp = crack(C) n = 300; r = 10; disp_scale = 0.3; n0 = int(np.round(n/ (1 +np.pi), )) n1 = n - n0 xs = np.concatenate(( np.stack([np.linspace(0, -r , n0), np.full((n0,), -1.e-8)]), r * np.stack([np.cos(np.linspace(-np.pi, np.pi , n1)),np.sin(np.linspace(-np.pi, np.pi , n1))]), np.stack([np.linspace(-r, 0 , n0), np.full((n0,), 1.e-8)]), ), axis =1) xs_def = xs + disp_scale * disp(xs) fig, ax = plt.subplots(figsize=(10.5,5), ncols = 2) ax[0].plot(xs[0], xs[1], "b-", label="non-deformed"); ax[1].plot(xs_def[0], xs_def[1], "r-.", label="deformed"); ```
github_jupyter
# Rolling Update Tests Check rolling updates function as expected. ``` import json import time !kubectl create namespace seldon !kubectl config set-context $(kubectl config current-context) --namespace=seldon ``` ## Change Image ``` !kubectl apply -f resources/fixed_v1.yaml !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \ -o jsonpath='{.items[0].metadata.name}') for i in range(60): state=!kubectl get sdep fixed -o jsonpath='{.status.state}' state=state[0] print(state) if state=="Available": break time.sleep(1) assert(state=="Available") !curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \ -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \ -H "Content-Type: application/json" !kubectl apply -f resources/fixed_v2.yaml time.sleep(5) # To allow operator to start the update for i in range(120): responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json" try: response = json.loads(responseRaw[0]) except: print("Failed to parse json",responseRaw) continue assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5) jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json data="".join(jsonRaw) resources = json.loads(data) numReplicas = int(resources["items"][0]["status"]["replicas"]) if numReplicas == 3: break time.sleep(1) print("Rollout Success") !kubectl delete -f resources/fixed_v1.yaml ``` ## Separate Service Orchestrator ``` !kubectl apply -f resources/fixed_v1_sep.yaml !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \ -o jsonpath='{.items[0].metadata.name}') for i in range(60): state=!kubectl get sdep fixed -o jsonpath='{.status.state}' state=state[0] print(state) if state=="Available": break time.sleep(1) assert(state=="Available") !curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \ -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \ -H "Content-Type: application/json" !kubectl apply -f resources/fixed_v2_sep.yaml time.sleep(5) # To allow operator to start the update for i in range(120): responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json" try: response = json.loads(responseRaw[0]) except: print("Failed to parse json",responseRaw) continue assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5) jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json data="".join(jsonRaw) resources = json.loads(data) numReplicas = int(resources["items"][0]["status"]["replicas"]) if numReplicas == 1: break time.sleep(1) print("Rollout Success") !kubectl delete -f resources/fixed_v1_sep.yaml ``` ## Two PodSpecs ``` !kubectl apply -f resources/fixed_v1_2podspecs.yaml !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \ -o jsonpath='{.items[0].metadata.name}') for i in range(60): state=!kubectl get sdep fixed -o jsonpath='{.status.state}' state=state[0] print(state) if state=="Available": break time.sleep(1) assert(state=="Available") !curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \ -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \ -H "Content-Type: application/json" !kubectl apply -f resources/fixed_v2_2podspecs.yaml time.sleep(5) # To allow operator to start the update for i in range(120): responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json" try: response = json.loads(responseRaw[0]) except: print("Failed to parse json",responseRaw) continue assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5) jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json data="".join(jsonRaw) resources = json.loads(data) numReplicas = int(resources["items"][0]["status"]["replicas"]) if numReplicas == 1: break time.sleep(1) print("Rollout Success") !kubectl delete -f resources/fixed_v1_2podspecs.yaml ``` ## Two Models ``` !kubectl apply -f resources/fixed_v1_2models.yaml !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \ -o jsonpath='{.items[0].metadata.name}') for i in range(60): state=!kubectl get sdep fixed -o jsonpath='{.status.state}' state=state[0] print(state) if state=="Available": break time.sleep(1) assert(state=="Available") !curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \ -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \ -H "Content-Type: application/json" !kubectl apply -f resources/fixed_v2_2models.yaml time.sleep(5) # To allow operator to start the update for i in range(120): responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json" try: response = json.loads(responseRaw[0]) except: print("Failed to parse json",responseRaw) continue assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5) jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json data="".join(jsonRaw) resources = json.loads(data) numReplicas = int(resources["items"][0]["status"]["replicas"]) if numReplicas == 3: break time.sleep(1) print("Rollout Success") !kubectl delete -f resources/fixed_v2_2models.yaml ``` ## Model name changes This will not do a rolling update but create a new deployment. ``` !kubectl apply -f resources/fixed_v1.yaml !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \ -o jsonpath='{.items[0].metadata.name}') for i in range(60): state=!kubectl get sdep fixed -o jsonpath='{.status.state}' state=state[0] print(state) if state=="Available": break time.sleep(1) assert(state=="Available") !curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \ -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \ -H "Content-Type: application/json" !kubectl apply -f resources/fixed_v2_new_name.yaml time.sleep(5) for i in range(120): responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json" try: response = json.loads(responseRaw[0]) except: print("Failed to parse json",responseRaw) continue assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5) jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json data="".join(jsonRaw) resources = json.loads(data) numItems = len(resources["items"]) if numItems == 1: break time.sleep(1) print("Rollout Success") !kubectl delete -f resources/fixed_v2_new_name.yaml ```
github_jupyter
# Hypothesis and Inference In this chapter, we test hypotheses. Firstly, let's test the hypothesis that a series of coin flips will be fair. It also build upon previous functions found in earlier chapters. ### Assumptions: 1. each flip is a Bernoulli trial, meaning that `X` a binomial `(n,p)` random variable. 2. `X` can be approximated using normal distribution. 3. Normal CDF is the probability that a var is below a threshold. 4. anything not below the threshold is considered to be above the threshold. 5. A var that's less than `hi` but not less than `lo` is considered to be between threshold. 6. A var that is not between is considered outside. ``` import math # Bernoulli trial #1 def normal_approximation_to_binomial(n, p): mu = p * n sigma = math.sqrt(p * (1 - p) * n) return mu, sigma # normal distribution function that determines a value below threshold. #2,#3 def normal_cdf(x, mu=0, sigma=1): return (1 + math.erf((x - mu) / math.sqrt(2) / sigma)) / 2 normal_probability_below = normal_cdf # normal distribution that determines a value above threshold #4 def normal_probability_above(lo, mu=0, sigma=1): return 1 - normal_cdf(lo, mu, sigma) # normal distribution functino that determines a value between #5 def normal_probability_between(lo, hi, mu=0, sigma=1): return normal_cdf(hi, mu, sigma) - normal_cdf(lo, mu, sigma) # normal distribution function that determines a value outside #6 def normal_probability_outside(lo, hi, mu=0, sigma=1): return 1 - normal_probability_between(lo, hi, mu, sigma) ``` By creating functions that find the nontail region of our distribution, we can do the reverse of the above using the `inverse_normal_cdf`: ``` def inverse_normal_cdf(p, mu=0, sigma=1, tolerance=0.00001): if mu != 0 or sigma != 1: return mu + sigma * inverse_normal_cdf(p, tolerance=tolerance) low_z, low_p = -10.0, 0 hi_z, hi_p = 10.0, 1 while hi_z - low_z > tolerance: mid_z = (low_z + hi_z) / 2 mid_p = normal_cdf(mid_z) if mid_p < p: low_z, low_p = mid_z, mid_p elif mid_p > p: hi_z, hi_p = mid_z, mid_p else: break return mid_z def normal_upper_bound(probability, mu=0, sigma=1): return inverse_normal_cdf(probability, mu, sigma) def normal_lower_bound(probability, mu=0, sigma=1): return inverse_normal_cdf(1 - probability, mu, sigma) def normal_two_sided_bounds(probability, mu=0, sigma=1): tail_probability = (1 - probability) / 2 upper_bound = normal_lower_bound(tail_probability, mu, sigma) lower_bound = normal_upper_bound(tail_probability, mu, sigma) return lower_bound, upper_bound ``` Since we've created our functions, let's begin testing. let `n=1000` where `n` is the number of coin flips that will populate our event data. If our hypothesis is true, `X` should have a mean close to 50. ``` mu_0, sigma_0 = normal_approximation_to_binomial(1000, 0.5) print(mu_0, sigma_0) ``` So, we've gotten our `mu` (mean) and `sigma` (standard deviation) values. Next, we'll need to determine significance. This is done by setting our willingness to accept a false positive at `5%`. ``` normal_two_sided_bounds(0.95, mu_0, sigma_0) ``` The values 469 and 531 are now considered our lower and upper bounds, respectively. If `Hsub0` (our hypothesis that a coin flips fairly one way or another) is true, and `p=0.5` is true, then that should mean that our test will only fail 19/20 flips made. Our next goal is to determine the *power* of our test. While determining significance allows us to find type 1 errors (false positives), power allows us to find type 2 errors (a failure to reject `Hsub0` even though it is false). To determine this, we must derive a value that `p` should not be. In this instance, we'll determine that `p=0.55`. ``` # set vars for determining power of our test lo, hi = normal_two_sided_bounds(0.95, mu_0, sigma_0) print(lo, hi) # set vars for determining power if p = 0.55 mu_1, sigma_1 = normal_approximation_to_binomial(1000, 0.55) print(mu_1, sigma_1) ``` And here we can determine our power value. However, there's an issue with the logic of `Hsub1`'s lower bounds. It could potentially eliminate an `Hsub0` value if the mean falls below 500 since its lower bound is 469, and we know that's not going to happen. ``` type_2_probability = normal_probability_between(lo, hi, mu_1, sigma_1) power = 1 - type_2_probability print(power) ``` In order to get a better power value, we can introduce a one sided test to determine if `X` is larger than 50, but not when it's smaller. One sided tests are useful when conducting hypothesis tests where `Hsub1` is known to have a bias in one direction versus another. ``` hi = normal_upper_bound(0.95, mu_0, sigma_0) print(hi) type_2_probability = normal_probability_below(hi, mu_1, sigma_1) power = 1 - type_2_probability print(power) ``` Now that's a lot better. This new test now only rejects `Hsub0` when `X` is between 526 (derived from `hi`) and 531 (derived from `sigma_1`). Another way of deriving probability is through the use of *p-values*. Instead of deriving probability from using thresholds, you can derive the probability computationally. ``` def two_sided_p_value(x, mu=0, sigma=1): if x >= mu: return 2 * normal_probability_above(x, mu, sigma) else: return 2 * normal_probability_below(x, mu, sigma) # using 529.5 instead of 530 for continuity correction. Basically 529.5-530.5 as a range is a better estimate than # using 530 specifically. two_sided_p_value(529.5, mu_0, sigma_0) ``` A quick way to determine that continuity corrections are an accurate representation of 530 than directly calling 530 is to run a quick simulation: ``` import random extreme_value_count = 0 for _ in range(100000): num_heads = sum(1 if random.random() < 0.5 else 0 for _ in range(1000)) if num_heads >=530 or num_heads <=470: extreme_value_count += 1 print(extreme_value_count / 100000) ``` So what does this value mean? Since it's larger than 5%, we don't reject the null hypothesis. If it was just a bit larger, the outcome would be a bit different: ``` two_sided_p_value(531.5, mu_0, sigma_0) ``` Since this value falls below our 5% threshold, we would have to reject this null. For a one sided test, we would have the following new functions: ``` upper_p_value = normal_probability_above lower_p_value = normal_probability_below upper_p_value(524.5, mu_0, sigma_0) ``` This value wouldn't be rejected, but if the value were 527: ``` upper_p_value(526.5, mu_0, sigma_0) ``` Which would be rejected by the one sided test. Another way of determining p values would be through confidence intervals. By using central limit theorem, we can determine the average of the Bernoulli vars `X` should be normal, with mean `p` and standard deviation: `math.sqrt(p * (1 - p) / 1000)` We don't know `p`, so instead we use an estimate: ``` p_hat = 525 / 1000 mu = p_hat sigma = math.sqrt(p_hat * (1 - p_hat) / 1000) print(sigma) normal_two_sided_bounds(0.95, mu, sigma) ``` So, using the normal approximation, we can say that we are 95% confident that the interval contains `p`. Alternatively, a result that would not pass confidence would be: ``` p_hat = 540 / 1000 mu = p_hat sigma = math.sqrt(p_hat * (1 - p_hat) / 1000) print(sigma) normal_two_sided_bounds(0.95, mu, sigma) ``` And since this value doesn't pass `Hsub0` it fails confidence. A way to reduce erroneous rejections would be through *p-hacking*. P-hacking is a process by which a statistician would hack away a proposed null hypotheses, eliminating enough outliers to get a p-value below `0.05`. While this may be a viable way of determining the accuracy of your results, a good data scientist should have a hypothesis developed prior to reviewing data, and clean the data without consideration to hypothesis. Additionally, p-values shouldn't be a substitute for common sense. When attempting to compare two sets of data, it may be appropriate to use *A/B tests* to test those comparisons. In this example, we'll say that we are testing the popularity of two adds A and B. If `NsubA` people see ad A and `nsubA` people have clicked it, and `NsubB` people see ad A and `nsubB` people have clicked it, we know that `nsubA | NsubA` is approximately a normal random variable. ``` def estimated_parameters(N, n): p = n / N sigma = math.sqrt(p * (1 - p) / N) return p, sigma def a_b_test_statistic(N_A, n_A, N_B, n_B): p_A, sigma_A = estimated_parameters(N_A, n_A) p_B, sigma_B = estimated_parameters(N_B, n_B) return (p_B - p_A) / math.sqrt(sigma_A ** 2 + sigma_B ** 2) ``` So, if Ad A "Tastes Great" gets `200 clicks/1000 views` and Ad B "Less Bias" gets `180 clicks / 1000 views`: ``` z = a_b_test_statistic(1000, 200, 1000, 180) print(z) ``` The probability of seeing such a large difference if the means were actually equal would be: ``` two_sided_p_value(z) ``` Which is large enough that you can't conclude there's much of a difference. On the other hand, if "Less Bias" only got 150 clicks: ``` z = a_b_test_statistic(1000, 200, 1000, 150) print(z) two_sided_p_value(z) ``` Which means there's only a 0.003 probability that you'd see such a large difference if the ads were equally effective. a final method of of determining the validity of a hypothesis is by treating the unknown parameters themselves as random variables. By using a *Prior distribution* for the parameters and then using the observed data and *Bayes's Theorem* to get an updated *posterior distribution* for the parameters, you can make probability judgements about the parameters themselves instead of the tests. For example, when the unknown parameter is a probability like in the coin flipping example, we often use a prior from the *Beta distribution*, which puts all its probability between 0 and 1: ``` def B(alpha, beta): return math.gamma(alpha) + math.gamma(beta) / math.gamma(alpha + beta) def beta_pdf(x, alpha, beta): if x < 0 or x > 1: return 0 return x ** (alpha - 1) * (1 - x) ** (beta - 1) / B(alpha, beta) ```
github_jupyter
# Homework 8 ## Due Date: Tuesday, October 31st at 11:59 PM # Problem 1: BST Traversal This problem builds on Problem 1 of Homework 7 in which you wrote a binary search tree. ### Part 1 As discussed in lecture, three different types to do a depth-first traversal are: preorder, inorder, and postorder. Here is a reference: [Tree Traversal](https://en.wikipedia.org/wiki/Tree_traversal#Depth-first_search). Write an iterator class called `DFSTraversal` with the following specifications: * `__init__(self, tree, traversalType)`: Constructor takes a `BinaryTree` object and one of the enums from `DFSTraversalTypes` ```python from enum import Enum class DFSTraversalTypes(Enum): PREORDER = 1 INORDER = 2 POSTORDER = 3 ``` * `changeTraversalType(self, traversalType)`: Change the traversal type * `__iter__(self)`: This is the initialization of an iterator * `__next__(self)`: This is called in the iterator for getting the next value Here's how you might use your `DFSTraversal` class: ```python input_array = [3, 9, 2, 11] bt = BinaryTree() for val in input_array: bt.insert(val) traversal = DFSTraversal(bt, DFSTraversalTypes.INORDER) for val in traversal: print(val) 2 3 9 11 ``` ### Part 2 Put your `BinaryTree` class (from homework 7) and your `DFSTraversal` class (from Part 1 of this homework) in a file titled `TreeTraversal.py`. ``` import warnings # The BinaryNode class for nodes in the BinaryTree class BinaryNode: def __init__(self, val): self.val = val self.p = None self.left = None self.right = None def __repr__(self): return "BinaryNode({})".format(self.val) def count_child(self): # count the number of children of this node if self.left == None and self.right == None: return 0 elif self.left != None and self.right != None: return 2 else: return 1 # The BinaryTree class class BinaryTree: def __init__(self): self.root = None def __repr__(self): return "BinaryTree()" # The height of the BinaryTree def __len__(self): return self.maxDepth(self.root) # The height of the BinaryTree def maxDepth(self, root): if root == None: return 0 else: return max(self.maxDepth(root.left), self.maxDepth(root.right))+1 # Insert def insert(self, val): bi_node = BinaryNode(val) # create a new BinaryNode for the value to be inserted if self.root == None: # if the tree is empty, we just need to insert it at root self.root = bi_node return current_node = self.root # walk thru the tree to find the right position to insert while current_node != None: current_p = current_node if val > current_node.val: current_node = current_node.right else: current_node = current_node.left if val > current_p.val: current_p.right = bi_node # is a right child else: current_p.left = bi_node # is a left child bi_node.p = current_p # set parent def inOrderWalk(self, node, ordered_nodes): if node != None: self.inOrderWalk(node.left, ordered_nodes) ordered_nodes.append(node.val) self.inOrderWalk(node.right, ordered_nodes) return ordered_nodes def preOrderWalk(self, node, ordered_nodes): if node != None: ordered_nodes.append(node.val) self.preOrderWalk(node.left, ordered_nodes) self.preOrderWalk(node.right, ordered_nodes) return ordered_nodes def postOrderWalk(self, node, ordered_nodes): if node != None: self.postOrderWalk(node.left, ordered_nodes) self.postOrderWalk(node.right, ordered_nodes) ordered_nodes.append(node.val) return ordered_nodes # Delete the nodes with 'None' as value def clearNoneNodes(self, node): if node != None: if node.val == 'None': if node == node.p.right: node.p.right = None else: node.p.left = None self.clearNoneNodes(node.left) self.clearNoneNodes(node.right) # GetValues: calling getValuesNode(self.root, 0, depth, values) def getValues(self, depth): values = [] self.getValuesNode(self.root, 0, depth, values) self.clearNoneNodes(self.root) return values # GetValues from the subtree rooted at node, store in values def getValuesNode(self, node, current_depth, depth, values): if node != None: if current_depth == depth: values.append(node.val) else: if node.left == None: none_node = BinaryNode('None') none_node.p = node node.left = none_node if node.right == None: none_node = BinaryNode('None') none_node.p = node node.right = none_node self.getValuesNode(node.left, current_depth+1, depth, values) self.getValuesNode(node.right, current_depth+1, depth, values) # Return the right-most node from the subtree rooted at node def tree_max(self, node): while node.right != None: node = node.right return node # Replace the subtree rooted at u with the subtree rooted at v def transplant(self, u, v): if u.p == None: self.root = v elif u == u.p.left: u.p.left = v else: u.p.right = v if v != None: v.p = u.p # Search for the value=key thru the subtree rooted at node def search(self, node, key): while node != None and key != node.val: if key > node.val: node = node.right else: node = node.left return node # Remove def remove(self, val): rm_node = self.search(self.root, val) if rm_node == None: # invalid remove node warnings.warn('The value to be removed does not has a node associated.') return if rm_node.left == None: self.transplant(rm_node, rm_node.right) elif rm_node.right == None: self.transplant(rm_node, rm_node.left) else: left_max = self.tree_max(rm_node.left) if left_max.p != rm_node: self.transplant(left_max, left_max.left) left_max.left = rm_node.left left_max.left.p = left_max self.transplant(rm_node, left_max) left_max.right = rm_node.right left_max.right.p = left_max from enum import Enum class DFSTraversalTypes(Enum): PREORDER = 1 INORDER = 2 POSTORDER = 3 class DFSTraversal: # DFSTraversal Constructor def __init__(self, tree, traversalType): if traversalType == DFSTraversalTypes.INORDER: self.ordered_nodes = tree.inOrderWalk(tree.root, list()) elif traversalType == DFSTraversalTypes.PREORDER: self.ordered_nodes = tree.preOrderWalk(tree.root, list()) elif traversalType == DFSTraversalTypes.POSTORDER: self.ordered_nodes = tree.postOrderWalk(tree.root, list()) else: raise TypeError('TraversalType Wrong: must be DFSTraversalTypes.INORDER/PREORDER/POSTORDER') # set attributes self.tree = tree self.type = traversalType self.index = 0 # Change Traversal Type def changeTraversalType(self, traversalType): if self.type == traversalType: # nothing changed return else: if traversalType == DFSTraversalTypes.INORDER: # change to INORDER self.ordered_nodes = self.tree.inOrderWalk(self.tree.root, list()) elif traversalType == DFSTraversalTypes.PREORDER: # change to PREORDER self.ordered_nodes = self.tree.preOrderWalk(self.tree.root, list()) elif traversalType == DFSTraversalTypes.POSTORDER: # change to POSTORDER self.ordered_nodes = self.tree.postOrderWalk(self.tree.root, list()) else: raise TypeError('TraversalType Wrong: must be DFSTraversalTypes.INORDER/PREORDER/POSTORDER') print('Changed traversalType to be {}'.format(traversalType)) self.type = traversalType self.index = 0 # Initialize the iterator def __iter__(self): return self # Called by __iter__ to get the next value def __next__(self): try: node = self.ordered_nodes[self.index] except IndexError: raise StopIteration() self.index += 1 return node ``` ### Using codes from imported module `TreeTraversal.py` ``` # Using codes from imported modules from TreeTraversal import * tree1 = BinaryTree() arr1 = [20, 10, 17, 14, 3, 0] for a1 in arr1: tree1.insert(a1) tree1.postOrderWalk(tree1.root, list()) print('Height of tree1: ', len(tree1)) for i in range(len(tree1)): print('Level %d values: ' % i, tree1.getValues(i)) input_array = [20, 10, 17, 14, 3, 0] bt = BinaryTree() for val in input_array: bt.insert(val) traversal = DFSTraversal(bt, DFSTraversalTypes.INORDER) for val in traversal: print(val) traversal.changeTraversalType(DFSTraversalTypes.PREORDER) for val in traversal: print(val) traversal.changeTraversalType(DFSTraversalTypes.POSTORDER) for val in traversal: print(val) ``` --- ## Problem 2: Markov Chains [Markov Chains](https://en.wikipedia.org/wiki/Markov_chain) are widely used to model and predict discrete events. Underlying Markov chains are Markov processes which make the assumption that the outcome of a future event only depends on the event immediately preceeding it. In this exercise, we will be assuming that weather has Markov properties (e.g. today's weather is dependent only on yesterday's weather). We will use the Markov assumption to create a basic model for predicting weather. To begin, let's categorize weather into 7 types: ['sunny', 'cloudy', 'rainy', 'snowy', 'windy', 'hailing']. In the `weather.csv` file accompanying this homework, each row corresponds to one type of weather (in the order given above) and each column is the probability of one type of weather occurring the following day (also in the order given above). The $ij$th element is the probability that the $j$th weather type occurs after the $i$th weather type. So for example, (1,2) is the probability a cloudy day occurs after a sunny day. Take a look at the data. Make sure you see how if the previous day was sunny, the following day will have a 0.4 probability of being sunny as well. If the previous day was raining (index $i = 3$), then the following day (index $j$) has a 0.05 probability of being windy ($j = 5$). ### Part 1: Parse the `.csv` file into a `Numpy` array ``` import numpy as np #Load CSV file -- hint: you can use np.genfromtxt() weather_arr = np.genfromtxt('weather.csv', delimiter=',') weather_arr ``` ### Part 2: Create a class called `Markov` that has the following methods: * `load_data(array)`: loads the Numpy 2D array and stores it as a class variable. * `get_prob(previous_day, following_day)`: returns the probability of `following_day` weather given `previous_day` weather. **Note:** `previous_day` and `following_day` should be passed in string form (e.g. "sunny"), as opposed to an index (e.g. 0). ``` class Markov: def __init__(self, state0='sunny'): # Initial state default to sunny self.data = None self.weather_types = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy', 'hailing'] self.weather_dict = {t : i for i, t in enumerate(self.weather_types)} self.index = self.weather_dict[state0] def load_data(self, array): self.data = array def get_prob(self, previous_day, following_day): try: p_i, f_i = self.weather_dict[previous_day], self.weather_dict[following_day] return float("{0:.4f}".format(self.data[p_i, f_i])) except KeyError as e: print('KeyError {}: Key must in set([sunny, cloudy, rainy, snowy, windy, hailing])'.format(e)) mk2 = Markov() mk2.load_data(weather_arr) mk2.get_prob('sunny', 's') mk2.get_prob('sunny', 'sunny') mk2.get_prob('rainy', 'windy') mk2.get_prob('hailing', 'sunny') ``` --- ## Problem 3: Iterators Iterators are a convenient way to walk along your Markov chain. #### Part 1: Using your `Markov` class from Problem 3, write `Markov` as an iterator by implementing the `__iter__()` and `__next__()` methods. Remember: * `__iter__()` should return the iterator object and should be implicitly called when the loop begins * The `__next()__` method should return the next value and is implicitly called at each step in the loop. Each 'next' step should be stochastic (i.e. randomly selected based on the relative probabilities of the following day weather types) and should return the next day's weather as a string (e.g. "sunny") rather than an index (e.g. 0). ``` # Class of Markov as an iterator class Markov: # Constructor of the Markov Iterator def __init__(self, state0='sunny'): # Initial state default to sunny self.data = None self.weather_types = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy', 'hailing'] self.weather_dict = {t : i for i, t in enumerate(self.weather_types)} self.index = self.weather_dict[state0] #print(self.weather_types, '\n') # Load weather.csv def load_data(self, array): self.data = array # Get probability of the following_day weather given the previous_day weather def get_prob(self, previous_day, following_day): try: p_i, f_i = self.weather_dict[previous_day], self.weather_dict[following_day] return float("{0:.4f}".format(self.data[p_i, f_i])) except KeyError as e: print('KeyError {}: Key must in set([sunny, cloudy, rainy, snowy, windy, hailing])'.format(e)) # Return the Markov iterator itself def __iter__(self): return self # Called by __iter__ to get the next value def __next__(self): next_probs = self.data[self.index, :] next_probs_int = (next_probs * 100).astype(np.int8) next_cum_int = np.zeros(next_probs_int.shape).astype(np.int8) # Randomly choosing the nextday's weather using cumulant probabilities as boundaries for i, next_prob in enumerate(next_probs_int): if i == 0: next_cum_int[i] = next_prob else: next_cum_int[i] = next_cum_int[i-1] + next_prob r = np.random.choice(100) print('------------------ r={}, next_cum_int={}'.format(r, next_cum_int)) if r < next_cum_int[0]: self.index = 0 else: idx = 1 while idx < len(next_cum_int): if r >= next_cum_int[idx-1] and r < next_cum_int[idx]: break idx += 1 self.index = idx print('------------------ the_next_index = {}, {}'.format(self.index, self.weather_types[self.index])) return self.weather_types[self.index] np.random.seed(12345) mk = Markov('sunny') mk.load_data(weather_arr) i = 0 for weather in mk: print(weather) i += 1 if i >= 10: break ``` ## Note of Discussion > After discussion with Michelle (Chia Chi Ho), I tried using > **`np.random.choice(list, size=1, p=specified_probs)[0]` ** > to directly implement the random choice by specified probabilities. The codes get shorter and cleaner. > Codes below this part use `__next__(self)` implemented with **`np.random.choice(list, size=1, p=specified_probs)[0]`** ``` # Class of Markov as an iterator class Markov: # Constructor of the Markov Iterator def __init__(self, state0='sunny'): # Initial state default to sunny self.data = None self.weather_types = ['sunny', 'cloudy', 'rainy', 'snowy', 'windy', 'hailing'] self.weather_dict = {t : i for i, t in enumerate(self.weather_types)} self.index = self.weather_dict[state0] #print(self.weather_types, '\n') # Load weather.csv def load_data(self, array): self.data = array # Get probability of the following_day weather given the previous_day weather def get_prob(self, previous_day, following_day): try: p_i, f_i = self.weather_dict[previous_day], self.weather_dict[following_day] return float("{0:.4f}".format(self.data[p_i, f_i])) except KeyError as e: print('KeyError {}: Key must in set([sunny, cloudy, rainy, snowy, windy, hailing])'.format(e)) # Return the Markov iterator itself def __iter__(self): return self # Called by __iter__ to get the next value, using np.random.choice def __next__(self): next_probs = self.data[self.index, :] next_weather = np.random.choice(self.weather_types, size=1, p=next_probs)[0] self.index = self.weather_dict[next_weather] return next_weather # Using __next__ implemented with np.random.choice(self.weather_types, size=1, p=next_probs)[0] np.random.seed(12345) mk = Markov('sunny') mk.load_data(weather_arr) i = 0 for weather in mk: print(weather) i += 1 if i >= 10: break ``` #### Part 2: We want to predict what weather will be like in a week for 5 different cities. Now that we have our `Markov` iterator, we can try to predict what the weather will be like in seven days from now. Given each city's current weather in the dictionary `city_weather` (see below), simulate what the weather will be like in 7 days from now. Rather than just producing one prediction per city, simulate 100 such predictions per city and store the most commonly occuring prediction. In your submission, print a dictionary `city_weather_predictions` that has each city as a key and the most commonly predicted weather as the corresponding value. **Note**: Don't worry if your values don't seem to make intuitive sense. We made up the weather probabilities. ``` city_weather = { 'New York': 'rainy', 'Chicago': 'snowy', 'Seattle': 'rainy', 'Boston': 'hailing', 'Miami': 'windy', 'Los Angeles': 'cloudy', 'San Fransisco': 'windy' } np.random.seed(12345) n_days = 7 n_sim = 100 city_weather_predictions = {} city_weather_predictions_sims = {} print('The weather in 7 days from now:\n') for city, w0 in city_weather.items(): sim_preds_count = np.zeros(6).astype(np.int8) for i in range(n_sim): # In each simulation, mk = Markov(w0) # Initialize the Markov Chain mk.load_data(weather_arr) # Load the transfer probs ii = 0 for weather in mk: # Call __next__() implicitly by __iter__() ci = mk.index # record the index of the current weather (state) ii += 1 if ii >= n_days: # iterate for 7 consecutive days break sim_preds_count[ci] += 1 predicted = mk.weather_types[np.argmax(sim_preds_count)] city_weather_predictions[city] = predicted city_weather_predictions_sims[city] = sim_preds_count # print('np.sum(sim_preds_count) = {}'.format(np.sum(sim_preds_count))) print('{}: {}'.format(city, predicted)) for (city, w_pred), (c, counts) in zip(city_weather_predictions.items(), city_weather_predictions_sims.items()): print('{}: {} {}'.format(city, w_pred, counts)) # Print the dictionary city_weather_predictions print(city_weather_predictions) ```
github_jupyter
# Managing pins ``` %load_ext autoreload %autoreload 2 import qiskit_metal as metal from qiskit_metal import designs, draw from qiskit_metal import MetalGUI, Dict, Headings Headings.h1('Welcome to Qiskit Metal') design = designs.DesignPlanar() gui = MetalGUI(design) ``` First we create some transmon pockets to have a number of pins generated for use. ``` from qiskit_metal.qlibrary.qubits.transmon_pocket import TransmonPocket ## Custom options for all the transmons options = dict( # Some options we want to modify from the deafults # (see below for defaults) pad_width = '425 um', pocket_height = '650um', # Adding 4 connectors (see below for defaults) connection_pads=dict( a = dict(loc_W=+1,loc_H=+1), b = dict(loc_W=-1,loc_H=+1, pad_height='30um'), c = dict(loc_W=+1,loc_H=-1, pad_width='200um'), d = dict(loc_W=-1,loc_H=-1, pad_height='50um') ) ) ## Create 4 transmons q1 = TransmonPocket(design, 'Q1', options = dict( pos_x='+2.4mm', pos_y='+0.0mm', **options)) q2 = TransmonPocket(design, 'Q2', options = dict( pos_x='+0.0mm', pos_y='-0.9mm', orientation = '90', **options)) q3 = TransmonPocket(design, 'Q3', options = dict( pos_x='-2.4mm', pos_y='+0.0mm', **options)) q4 = TransmonPocket(design, 'Q4', options = dict( pos_x='+0.0mm', pos_y='+0.9mm', orientation = '90', **options)) ## Rebuild the design gui.rebuild() gui.autoscale() ``` Selecting the different components via the GUI shows the pins said component has. You can also see this via; ``` design.components.Q1.pins.keys() ``` Each pin contains a dictionary of information which can be used by other components or renderers. ``` design.components.Q1.pins.a ``` We can pass these pins into some components to auto generate connections, such as CPW lines. ``` from qiskit_metal.qlibrary.tlines.straight_path import RouteStraight c1 = RouteStraight(design, 'c1', type="Route", options=dict(pin_inputs=dict(start_pin = dict(component = 'Q1', pin = 'd'), end_pin=dict(component = 'Q2', pin = 'c')))) gui.rebuild() gui.autoscale() ``` The example CPW also automatically generates it's own pins based on the pin inputs it was given. This is to allow for such a component to not be destroyed if the component it is attached to is deleted. ``` design.components.c1.pins ``` We can also see what active connections there are from the netlist. Pins that share the same net_id indicate they are connected. Pins that are not on the net list are currently open. ``` design.net_info ``` What happens if we try to pass in a component/pin combo that doesn't exist? ``` #A component that doesn't exist c2 = RouteStraight(design, 'c2', type="Route", options=dict(pin_inputs = dict(start_pin = dict(component = 'NotReallyHere', pin = 'd'), end_pin =dict(component = 'Q2', pin = 'a')))) #A pin that doesn't exist c3 = RouteStraight(design, 'c3', type="Route", options=dict(pin_inputs = dict(start_pin = dict(component = 'Q1', pin = 'NotReallyHere'), end_pin =dict(component = 'Q2', pin = 'a')))) ``` Or if try to pass in a pin that is already connected. ``` c4 = RouteStraight(design, 'c4', type="Route", options=dict(pin_inputs = dict(start_pin = dict(component = 'Q1', pin = 'b'), end_pin =dict(component = 'Q2', pin = 'c')))) ``` pin_inputs is the default dictionary for passing pins into a component, **BUT** how the dictionary is structured is component dependent. Using the above structure (eg. start_pin, end_pin) is suggested for any 2 port type connection, but you should always check the documentation for the specific component you are wanting to use. ``` Headings.h1('CPW Examples') ``` An example set showing some current functional CPW components, including both simple auto-routing and meandering ``` design.delete_all_components() from qiskit_metal.qlibrary.terminations.open_to_ground import OpenToGround from qiskit_metal.qlibrary.tlines.framed_path import RouteFramed from qiskit_metal.qlibrary.tlines.straight_path import RouteStraight from qiskit_metal.qlibrary.tlines.meandered import RouteMeander open_start_straight = OpenToGround(design,'Open_straight_start',options=Dict(pos_x='0um',pos_y='0um',orientation = '-90')) open_end_straight = OpenToGround(design,'Open_straight_end',options=Dict(pos_x='0um',pos_y='1500um',orientation = '90')) open_start_auto = OpenToGround(design,'Open_auto_start',options=Dict(pos_x='250um',pos_y='0um',orientation = '-90')) open_end_auto = OpenToGround(design,'Open_auto_end',options=Dict(pos_x='250um',pos_y='1500um',orientation = '0')) open_start_meander = OpenToGround(design,'Open_meander_start',options=Dict(pos_x='1000um',pos_y='0um',orientation = '-90')) open_end_meander = OpenToGround(design,'Open_meander_end',options=Dict(pos_x='1000um',pos_y='1500um',orientation = '90')) testStraight = RouteStraight(design,'straightTest',options=Dict(pin_inputs=Dict( start_pin=Dict( component = 'Open_straight_start', pin = 'open'), end_pin=Dict( component = 'Open_straight_end', pin = 'open') ))) testAuto = RouteFramed(design,'autoTest',options=Dict(pin_inputs=Dict( start_pin=Dict( component = 'Open_auto_start', pin = 'open'), end_pin=Dict( component = 'Open_auto_end', pin = 'open') ))) testMeander = RouteMeander(design,'meanderTest',options=Dict(pin_inputs=Dict( start_pin=Dict( component = 'Open_meander_start', pin = 'open'), end_pin=Dict( component = 'Open_meander_end', pin = 'open') ))) gui.rebuild() gui.autoscale() gui.screenshot() ```
github_jupyter
# Deep Convolutional Neural Networks In this assignment, we will be using the Keras library to build, train, and evaluate some *relatively simple* Convolutional Neural Networks to demonstrate how adding layers to a network can improve accuracy, yet are more computationally expensive. The purpose of this assignment is for you to demonstrate understanding of the appropriate structure of a convolutional neural network and to give you an opportunity to research any parameters or elements of CNNs that you don't fully understand. We will be using the cifar100 dataset for this assignment, however, in order to keep the dataset size small enough to be trained in a reasonable amount of time in a Google Colab, we will only be looking at two classes from the dataset - cats and dogs. ![CNN Structure Diagram](http://www.ryanleeallred.com/wp-content/uploads/2018/06/CNN-diagram.jpeg) ``` # Import important libraries and methods import matplotlib.pyplot as plt import numpy as np import keras from keras.datasets import cifar10 from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Activation from keras.layers.convolutional import Conv2D, MaxPooling2D from keras import backend as K if K.backend()=='tensorflow': K.set_image_dim_ordering("th") # input image dimensions img_rows, img_cols = 32, 32 # the data, shuffled and split between train and test sets (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Important Hyperparameters batch_size = 32 num_classes = 2 epochs = 100 # Plot sample image from each cifar10 class. class_names = ['airplane','automobile','bird','cat','deer','dog','frog','horse','shop','truck'] fig = plt.figure(figsize=(8,3)) for i in range(10): ax = fig.add_subplot(2, 5, 1 + i, xticks=[], yticks=[]) idx = np.where(y_train[:]==i)[0] features_idx = x_train[idx,::] img_num = np.random.randint(features_idx.shape[0]) im = np.transpose(features_idx[img_num,::],(1,2,0)) ax.set_title(class_names[i]) plt.imshow(im) plt.show() # Only look at cats [=3] and dogs [=5] train_picks = np.ravel(np.logical_or(y_train==3,y_train==5)) test_picks = np.ravel(np.logical_or(y_test==3,y_test==5)) y_train = np.array(y_train[train_picks]==5,dtype=int) y_test = np.array(y_test[test_picks]==5,dtype=int) x_train = x_train[train_picks] x_test = x_test[test_picks] # check for image_data format and format image shape accordingly if K.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols) input_shape = (3, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3) input_shape = (img_rows, img_cols, 3) # Normalize pixel values between 0 and 1 x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 # Convert class vectors to binary class matrices y_train = keras.utils.to_categorical(np.ravel(y_train), num_classes) y_test = keras.utils.to_categorical(np.ravel(y_test), num_classes) # Check train and test lengths print('y_train length:', len(y_train)) print('x_train length:', len(x_train)) print('y_test length:', len(y_test)) print('x_test length:', len(x_test)) ``` # Model #1 This model will be almost as simple as we can make it. It should look something like: * Conv2D - kernel_size = (3,3) * Relu Activation * Conv2D - kernel_size = (3,3) * Relu Activation * Max Pooling - pool_size = (2,2) * Dropout - use .25 for all layers but the final dropout layer --- * Flatten * Fully-Connected (Dense) * Dropout - use .5 this time * Fully-Connected (Dense layer where # neurons = # final classes/labels) Then compile the model using categorical_crossentropy as your loss metric. Use the Adam optimizer, and accuracy as your overall scoring metric. If you're lost when you get to this point, make sure you look at the lecture colab for somewhat similar sample code. ``` x_train.shape model1 = Sequential() model1.add(Conv2D(8, (3,3), activation='relu', input_shape=(3, 32, 32))) model1.add(Dropout(.25)) model1.add(Conv2D(16, (3,3), activation='relu')) model1.add(Dropout(.25)) model1.add(MaxPooling2D((2,2))) model1.add(Flatten()) model1.add(Dense(64, activation='relu')) model1.add(Dropout(0.5)) model1.add(Dense(2, activation='softmax')) model1.summary() model1.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) ``` ## Fit your model Fit your model and save it to a new variable so that we can access the .history value to make a plot of our training and validation accuracies by epoch. ``` model1_training = model1.fit(x_train, y_train, epochs=50, batch_size=128, validation_split=0.1) ``` ## Plot Training and Validation Accuracies Use your matplotlib skills to give us a nice line graph of both training and validation accuracies as the number of epochs increases. Don't forget your legend, axis and plot title. ``` def train_val_metrics(epochs, model_training): epochs = range(1, epochs+1) metrics = model_training.history train_loss = metrics['loss'] train_acc = metrics['acc'] val_loss = metrics['val_loss'] val_acc = metrics['val_acc'] ax = plt.subplot(211) train, = ax.plot(epochs, train_loss) val, = ax.plot(epochs, val_loss) ax.legend([train, val], ['training', 'validation']) ax.set(xlabel='epochs', ylabel='categorical cross-entropy loss') ax2 = plt.subplot(212) train2, = ax2.plot(epochs, train_acc) val2, = ax2.plot(epochs, val_acc) ax2.legend([train2, val2], ['training', 'validation']) ax2.set(xlabel='epochs', ylabel='accuracy') train_val_metrics(50, model1_training) ``` The model begins to overfit around epoch 20 or so. Early stopping would be useful here. ![something a little deeper](http://www.ryanleeallred.com/wp-content/uploads/2018/06/a-little-deeper.gif) # Model #2 Lets add an additional set of convolutional->activation->pooling to this model: * Conv2D - kernel_size = (3,3) * Relu Activation * Conv2D - kernel_size = (3,3) * Relu Activation * Max Pooling - pool_size = (2,2) * Dropout - use .25 for all layers but the final layer --- * Conv2D - kernel_size = (3,3) * Relu Activation * Conv2D - kernel_size = (3,3) * Relu Activation * Max Pooling - pool_size = (2,2) * Dropout - use .25 for all layers but the final layer --- * Flatten * Fully-Connected (Dense) * Dropout - use .5 this time * Fully-Connected (Dense layer where # neurons = # final classes/labels) Again, compile the model using categorical_crossentropy as your loss metric and use the Adam optimizer, and accuracy as your overall scoring metric. ``` model2 = Sequential() model2.add(Conv2D(8, (3,3), activation='relu', input_shape=(3, 32, 32))) model2.add(Dropout(.25)) model2.add(Conv2D(16, (3,3), activation='relu')) model2.add(Dropout(.25)) model2.add(MaxPooling2D((2,2))) model2.add(Conv2D(16, (3,3), activation='relu', input_shape=(3, 32, 32))) model2.add(Dropout(.25)) model2.add(Conv2D(32, (3,3), activation='relu')) model2.add(Dropout(.25)) model2.add(MaxPooling2D((2,2))) model2.add(Flatten()) model2.add(Dense(64, activation='relu')) model2.add(Dropout(0.5)) model2.add(Dense(2, activation='softmax')) model2.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model2.summary() ``` ## Fit your model Fit your model and save it to a new variable so that we can access the .history value to make a plot of our training and validation accuracies by epoch. ``` model2_training = model2.fit(x_train, y_train, epochs=50, batch_size=128, validation_split=0.1) ``` ## Plot Training and Validation Accuracies Use your matplotlib skills to give us a nice line graph of both training and validation accuracies as the number of epochs increases. Don't forget your legend, axis and plot title. ``` train_val_metrics(50, model2_training) ``` The model continues to find loss and accuracy improvements, suggesting that it could be trained for more epochs. ![We Need To Go Deeper](http://www.ryanleeallred.com/wp-content/uploads/2018/06/go-deeper.gif) # Model #3 Finally, one more set of convolutional/activation/pooling: * Conv2D - kernel_size = (3,3) * Relu Activation * Conv2D - kernel_size = (3,3) * Relu Activation * Max Pooling - pool_size = (2,2) * Dropout - use .25 for all layers but the final layer --- * Conv2D - kernel_size = (3,3) * Relu Activation * Conv2D - kernel_size = (3,3) * Relu Activation * Max Pooling - pool_size = (2,2) * Dropout - use .25 for all layers but the final layer --- * Conv2D - kernel_size = (3,3) * Relu Activation * Conv2D - kernel_size = (3,3) * Relu Activation * Max Pooling - pool_size = (2,2) * Dropout - use .25 for all layers but the final layer --- * Flatten * Fully-Connected (Dense) * Dropout - use .5 this time * Fully-Connected (Dense layer where # neurons = # final classes/labels) Again, compile the model using categorical_crossentropy as your loss metric and use the Adam optimizer, and accuracy as your overall scoring metric. ``` model3 = Sequential() model3.add(Conv2D(8, (3,3), activation='relu', input_shape=(3, 32, 32))) model3.add(Dropout(.25)) model3.add(Conv2D(16, (3,3), activation='relu')) model3.add(Dropout(.25)) model3.add(MaxPooling2D((2,2), strides=1)) model3.add(Conv2D(16, (3,3), activation='relu', input_shape=(3, 32, 32))) model3.add(Dropout(.25)) model3.add(Conv2D(32, (3,3), activation='relu')) model3.add(Dropout(.25)) model3.add(MaxPooling2D((2,2), strides=1)) model3.add(Conv2D(32, (3,3), activation='relu', input_shape=(3, 32, 32))) model3.add(Dropout(.25)) model3.add(Conv2D(64, (3,3), activation='relu')) model3.add(Dropout(.25)) model3.add(MaxPooling2D(2,2)) model3.add(Flatten()) model3.add(Dense(128, activation='relu')) model3.add(Dropout(0.5)) model3.add(Dense(2, activation='softmax')) model3.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model3.summary() ``` ## Fit your model Fit your model and save it to a new variable so that we can access the .history value to make a plot of our training and validation accuracies by epoch. ``` model3_training = model3.fit(x_train, y_train, epochs=50, batch_size=128, validation_split=0.1) ``` ## Plot Training and Validation Accuracies Use your matplotlib skills to give us a nice line graph of both training and validation accuracies as the number of epochs increases. Don't forget your legend, axis and plot title. ``` train_val_metrics(50, model3_training) ``` # Stretch Goal: ## Use other classes from Cifar10 Try using different classes from the Cifar10 dataset or use all 10. You might need to sample the training data or limit the number of epochs if you decide to use the entire dataset due to processing constraints. ## Hyperparameter Tune Your Model If you have successfully complete shown how increasing the depth of a neural network can improve its accuracy, and you feel like you have a solid understanding of all of the different parts of CNNs, try hyperparameter tuning your strongest model to see how much additional accuracy you can squeeze out of it. This will also give you a chance to research the different hyperparameters as well as their significance/purpose. (There are lots and lots) --- Here's a helpful article that will show you how to get started using GridSearch to hyperaparameter tune your CNN. (should you desire to use that method): [Grid Search Hyperparameters for Deep Learning Models in Python With Keras](https://machinelearningmastery.com/grid-search-hyperparameters-deep-learning-models-python-keras/)
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # The Keras Functional API in TensorFlow <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/alpha/guide/keras/functional"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/guide/keras/functional.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/guide/keras/functional.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> ## Setup ``` !pip install pydot !apt-get install graphviz from __future__ import absolute_import, division, print_function !pip install tensorflow-gpu==2.0.0-alpha0 import tensorflow as tf tf.keras.backend.clear_session() # For easy reset of notebook state. ``` ## Introduction You're already familiar with the use of `keras.Sequential()` to create models. The Functional API is a way to create models that is more flexible than `Sequential`: it can handle models with non-linear topology, models with shared layers, and models with multiple inputs or outputs. It's based on the idea that a deep learning model is usually a directed acyclic graph (DAG) of layers. The Functional API a set of tools for **building graphs of layers**. Consider the following model: ``` (input: 784-dimensional vectors) ↧ [Dense (64 units, relu activation)] ↧ [Dense (64 units, relu activation)] ↧ [Dense (10 units, softmax activation)] ↧ (output: probability distribution over 10 classes) ``` It's a simple graph of 3 layers. To build this model with the functional API, you would start by creating an input node: ``` from tensorflow import keras inputs = keras.Input(shape=(784,)) ``` Here we just specify the shape of our data: 784-dimensional vectors. None that the batch size is always omitted, we only specify the shape of each sample. For an input meant for images of shape `(32, 32, 3)`, we would have used: ``` img_inputs = keras.Input(shape=(32, 32, 3)) ``` What gets returned, `inputs`, contains information about the shape and dtype of the input data that you expect to feed to your model: ``` inputs.shape inputs.dtype ``` You create a new node in the graph of layers by calling a layer on this `inputs` object: ``` from tensorflow.keras import layers dense = layers.Dense(64, activation='relu') x = dense(inputs) ``` The "layer call" action is like drawing an arrow from "inputs" to this layer we created. We're "passing" the inputs to the `dense` layer, and out we get `x`. Let's add a few more layers to our graph of layers: ``` x = layers.Dense(64, activation='relu')(x) outputs = layers.Dense(10, activation='softmax')(x) ``` At this point, we can create a `Model` by specifying its inputs and outputs in the graph of layers: ``` model = keras.Model(inputs=inputs, outputs=outputs) ``` To recap, here is our full model definition process: ``` inputs = keras.Input(shape=(784,), name='img') x = layers.Dense(64, activation='relu')(inputs) x = layers.Dense(64, activation='relu')(x) outputs = layers.Dense(10, activation='softmax')(x) model = keras.Model(inputs=inputs, outputs=outputs, name='mnist_model') ``` Let's check out what the model summary looks like: ``` model.summary() ``` We can also plot the model as a graph: ``` keras.utils.plot_model(model, 'my_first_model.png') ``` And optionally display the input and output shapes of each layer in the plotted graph: ``` keras.utils.plot_model(model, 'my_first_model_with_shape_info.png', show_shapes=True) ``` This figure and the code we wrote are virtually identical. In the code version, the connection arrows are simply replaced by the call operation. A "graph of layers" is a very intuitive mental image for a deep learning model, and the functional API is a way to create models that closely mirrors this mental image. ## Training, evaluation, and inference Training, evaluation, and inference work exactly in the same way for models built using the Functional API as for Sequential models. Here is a quick demonstration. Here we load MNIST image data, reshape it into vectors, fit the model on the data (while monitoring performance on a validation split), and finally we evaluate our model on the test data: ``` (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() x_train = x_train.reshape(60000, 784).astype('float32') / 255 x_test = x_test.reshape(10000, 784).astype('float32') / 255 model.compile(loss='sparse_categorical_crossentropy', optimizer=keras.optimizers.RMSprop(), metrics=['accuracy']) history = model.fit(x_train, y_train, batch_size=64, epochs=5, validation_split=0.2) test_scores = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', test_scores[0]) print('Test accuracy:', test_scores[1]) ``` For a complete guide about model training and evaluation, see [Guide to Training & Evaluation](./training_and_evaluation.ipynb). ## Saving and serialization Saving and serialization work exactly in the same way for models built using the Functional API as for Sequential models. To standard way to save a Functional model is to call `model.save()` to save the whole model into a single file. You can later recreate the same model from this file, even if you no longer have access to the code that created the model. This file includes: - The model's architecture - The model's weight values (which were learned during training) - The model's training config (what you passed to `compile`), if any - The optimizer and its state, if any (this enables you to restart training where you left off) ``` model.save('path_to_my_model.h5') del model # Recreate the exact same model purely from the file: model = keras.models.load_model('path_to_my_model.h5') ``` For a complete guide about model saving, see [Guide to Saving and Serializing Models](./saving_and_serializing.ipynb). ## Using the same graph of layers to define multiple models In the functional API, models are created by specifying their inputs and outputs in a graph of layers. That means that a single graph of layers can be used to generate multiple models. In the example below, we use the same stack of layers to instantiate two models: an `encoder` model that turns image inputs into 16-dimensional vectors, and an end-to-end `autoencoder` model for training. ``` encoder_input = keras.Input(shape=(28, 28, 1), name='img') x = layers.Conv2D(16, 3, activation='relu')(encoder_input) x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.MaxPooling2D(3)(x) x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.Conv2D(16, 3, activation='relu')(x) encoder_output = layers.GlobalMaxPooling2D()(x) encoder = keras.Model(encoder_input, encoder_output, name='encoder') encoder.summary() x = layers.Reshape((4, 4, 1))(encoder_output) x = layers.Conv2DTranspose(16, 3, activation='relu')(x) x = layers.Conv2DTranspose(32, 3, activation='relu')(x) x = layers.UpSampling2D(3)(x) x = layers.Conv2DTranspose(16, 3, activation='relu')(x) decoder_output = layers.Conv2DTranspose(1, 3, activation='relu')(x) autoencoder = keras.Model(encoder_input, decoder_output, name='autoencoder') autoencoder.summary() ``` Note that we make the decoding architecture strictly symmetrical to the encoding architecture, so that we get an output shape that is the same as the input shape `(28, 28, 1)`. The reverse of a `Conv2D` layer is a `Conv2DTranspose` layer, and the reverse of a `MaxPooling2D` layer is an `UpSampling2D` layer. ## All models are callable, just like layers You can treat any model as if it were a layer, by calling it on an `Input` or on the output of another layer. Note that by calling a model you aren't just reusing the architecture of the model, you're also reusing its weights. Let's see this in action. Here's a different take on the autoencoder example that creates an encoder model, a decoder model, and chain them in two calls to obtain the autoencoder model: ``` encoder_input = keras.Input(shape=(28, 28, 1), name='original_img') x = layers.Conv2D(16, 3, activation='relu')(encoder_input) x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.MaxPooling2D(3)(x) x = layers.Conv2D(32, 3, activation='relu')(x) x = layers.Conv2D(16, 3, activation='relu')(x) encoder_output = layers.GlobalMaxPooling2D()(x) encoder = keras.Model(encoder_input, encoder_output, name='encoder') encoder.summary() decoder_input = keras.Input(shape=(16,), name='encoded_img') x = layers.Reshape((4, 4, 1))(decoder_input) x = layers.Conv2DTranspose(16, 3, activation='relu')(x) x = layers.Conv2DTranspose(32, 3, activation='relu')(x) x = layers.UpSampling2D(3)(x) x = layers.Conv2DTranspose(16, 3, activation='relu')(x) decoder_output = layers.Conv2DTranspose(1, 3, activation='relu')(x) decoder = keras.Model(decoder_input, decoder_output, name='decoder') decoder.summary() autoencoder_input = keras.Input(shape=(28, 28, 1), name='img') encoded_img = encoder(autoencoder_input) decoded_img = decoder(encoded_img) autoencoder = keras.Model(autoencoder_input, decoded_img, name='autoencoder') autoencoder.summary() ``` As you can see, model can be nested: a model can contain submodels (since a model is just like a layer). A common use case for model nesting is *ensembling*. As an example, here's how to ensemble a set of models into a single model that averages their predictions: ``` def get_model(): inputs = keras.Input(shape=(128,)) outputs = layers.Dense(1, activation='sigmoid')(inputs) return keras.Model(inputs, outputs) model1 = get_model() model2 = get_model() model3 = get_model() inputs = keras.Input(shape=(128,)) y1 = model1(inputs) y2 = model2(inputs) y3 = model3(inputs) outputs = layers.average([y1, y2, y3]) ensemble_model = keras.Model(inputs=inputs, outputs=outputs) ``` ## Manipulating complex graph topologies ### Models with multiple inputs and outputs The functional API makes it easy to manipulate multiple inputs and outputs. This cannot be handled with the Sequential API. Here's a simple example. Let's say you're building a system for ranking custom issue tickets by priority and routing them to the right department. You model will have 3 inputs: - Title of the ticket (text input) - Text body of the ticket (text input) - Any tags added by the user (categorical input) It will have two outputs: - Priority score between 0 and 1 (scalar sigmoid output) - The department that should handle the ticket (softmax output over the set of departments) Let's built this model in a few lines with the Functional API. ``` num_tags = 12 # Number of unique issue tags num_words = 10000 # Size of vocabulary obtained when preprocessing text data num_departments = 4 # Number of departments for predictions title_input = keras.Input(shape=(None,), name='title') # Variable-length sequence of ints body_input = keras.Input(shape=(None,), name='body') # Variable-length sequence of ints tags_input = keras.Input(shape=(num_tags,), name='tags') # Binary vectors of size `num_tags` # Embed each word in the title into a 64-dimensional vector title_features = layers.Embedding(num_words, 64)(title_input) # Embed each word in the text into a 64-dimensional vector body_features = layers.Embedding(num_words, 64)(body_input) # Reduce sequence of embedded words in the title into a single 128-dimensional vector title_features = layers.LSTM(128)(title_features) # Reduce sequence of embedded words in the body into a single 32-dimensional vector body_features = layers.LSTM(32)(body_features) # Merge all available features into a single large vector via concatenation x = layers.concatenate([title_features, body_features, tags_input]) # Stick a logistic regression for priority prediction on top of the features priority_pred = layers.Dense(1, activation='sigmoid', name='priority')(x) # Stick a department classifier on top of the features department_pred = layers.Dense(num_departments, activation='softmax', name='department')(x) # Instantiate an end-to-end model predicting both priority and department model = keras.Model(inputs=[title_input, body_input, tags_input], outputs=[priority_pred, department_pred]) ``` Let's plot the model: ``` keras.utils.plot_model(model, 'multi_input_and_output_model.png', show_shapes=True) ``` When compiling this model, we can assign different losses to each output. You can even assign different weights to each loss, to modulate their contribution to the total training loss. ``` model.compile(optimizer=keras.optimizers.RMSprop(1e-3), loss=['binary_crossentropy', 'categorical_crossentropy'], loss_weights=[1., 0.2]) ``` Since we gave names to our output layers, we could also specify the loss like this: ``` model.compile(optimizer=keras.optimizers.RMSprop(1e-3), loss={'priority': 'binary_crossentropy', 'department': 'categorical_crossentropy'}, loss_weights=[1., 0.2]) ``` We can train the model by passing lists of Numpy arrays of inputs and targets: ``` import numpy as np # Dummy input data title_data = np.random.randint(num_words, size=(1280, 10)) body_data = np.random.randint(num_words, size=(1280, 100)) tags_data = np.random.randint(2, size=(1280, num_tags)).astype('float32') # Dummy target data priority_targets = np.random.random(size=(1280, 1)) dept_targets = np.random.randint(2, size=(1280, num_departments)) model.fit({'title': title_data, 'body': body_data, 'tags': tags_data}, {'priority': priority_targets, 'department': dept_targets}, epochs=2, batch_size=32) ``` When calling fit with a `Dataset` object, it should yield either a tuple of lists like `([title_data, body_data, tags_data], [priority_targets, dept_targets])` or a tuple of dictionaries like `({'title': title_data, 'body': body_data, 'tags': tags_data}, {'priority': priority_targets, 'department': dept_targets})`. For more detailed explanation, refer to the complete guide [Guide to Training & Evaluation](./training_and_evaluation.ipynb). ### A toy resnet model In addition to models with multiple inputs and outputs, the Functional API makes it easy to manipulate non-linear connectivity topologies, that is to say, models where layers are not connected sequentially. This also cannot be handled with the Sequential API (as the name indicates). A common use case for this is residual connections. Let's build a toy ResNet model for CIFAR10 to demonstrate this. ``` inputs = keras.Input(shape=(32, 32, 3), name='img') x = layers.Conv2D(32, 3, activation='relu')(inputs) x = layers.Conv2D(64, 3, activation='relu')(x) block_1_output = layers.MaxPooling2D(3)(x) x = layers.Conv2D(64, 3, activation='relu', padding='same')(block_1_output) x = layers.Conv2D(64, 3, activation='relu', padding='same')(x) block_2_output = layers.add([x, block_1_output]) x = layers.Conv2D(64, 3, activation='relu', padding='same')(block_2_output) x = layers.Conv2D(64, 3, activation='relu', padding='same')(x) block_3_output = layers.add([x, block_2_output]) x = layers.Conv2D(64, 3, activation='relu')(block_3_output) x = layers.GlobalAveragePooling2D()(x) x = layers.Dense(256, activation='relu')(x) x = layers.Dropout(0.5)(x) outputs = layers.Dense(10, activation='softmax')(x) model = keras.Model(inputs, outputs, name='toy_resnet') model.summary() ``` Let's plot the model: ``` keras.utils.plot_model(model, 'mini_resnet.png', show_shapes=True) ``` Let's train it: ``` (x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. y_train = keras.utils.to_categorical(y_train, 10) y_test = keras.utils.to_categorical(y_test, 10) model.compile(optimizer=keras.optimizers.RMSprop(1e-3), loss='categorical_crossentropy', metrics=['acc']) model.fit(x_train, y_train, batch_size=64, epochs=1, validation_split=0.2) ``` ## Sharing layers Another good use for the functional API are models that use shared layers. Shared layers are layer instances that get reused multiple times in a same model: they learn features that correspond to multiple paths in the graph-of-layers. Shared layers are often used to encode inputs that come from similar spaces (say, two different pieces of text that feature similar vocabulary), since they enable sharing of information across these different inputs, and they make it possible to train such a model on less data. If a given word is seen in one of the inputs, that will benefit the processing of all inputs that go through the shared layer. To share a layer in the Functional API, just call the same layer instance multiple times. For instance, here's an `Embedding` layer shared across two different text inputs: ``` # Embedding for 1000 unique words mapped to 128-dimensional vectors shared_embedding = layers.Embedding(1000, 128) # Variable-length sequence of integers text_input_a = keras.Input(shape=(None,), dtype='int32') # Variable-length sequence of integers text_input_b = keras.Input(shape=(None,), dtype='int32') # We reuse the same layer to encode both inputs encoded_input_a = shared_embedding(text_input_a) encoded_input_b = shared_embedding(text_input_b) ``` ## Extracting and reusing nodes in the graph of layers Because the graph of layers you are manipulating in the Functional API is a static datastructure, it can be accessed and inspected. This is how we are able to plot Functional models as images, for instance. This also means that we can access the activations of intermediate layers ("nodes" in the graph) and reuse them elsewhere. This is extremely useful for feature extraction, for example! Let's look at an example. This is a VGG19 model with weights pre-trained on ImageNet: ``` from tensorflow.keras.applications import VGG19 vgg19 = VGG19() ``` And these are the intermediate activations of the model, obtained by querying the graph datastructure: ``` features_list = [layer.output for layer in vgg19.layers] ``` We can use these features to create a new feature-extraction model, that returns the values of the intermediate layer activations -- and we can do all of this in 3 lines. ``` feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list) img = np.random.random((1, 224, 224, 3)).astype('float32') extracted_features = feat_extraction_model(img) ``` This comes in handy when [implementing neural style transfer](https://medium.com/tensorflow/neural-style-transfer-creating-art-with-deep-learning-using-tf-keras-and-eager-execution-7d541ac31398), among other things. ## Extending the API by writing custom layers tf.keras has a wide range of built-in layers. Here are a few examples: - Convolutional layers: `Conv1D`, `Conv2D`, `Conv3D`, `Conv2DTranspose`, etc. - Pooling layers: `MaxPooling1D`, `MaxPooling2D`, `MaxPooling3D`, `AveragePooling1D`, etc. - RNN layers: `GRU`, `LSTM`, `ConvLSTM2D`, etc. - `BatchNormalization`, `Dropout`, `Embedding`, etc. If you don't find what you need, it's easy to extend the API by creating your own layers. All layers subclass the `Layer` class and implement: - A `call` method, that specifies the computation done by the layer. - A `build` method, that creates the weights of the layer (note that this is just a style convention; you could create weights in `__init__` as well). To learn more about creating layers from scratch, check out the guide [Guide to writing layers and models from scratch](./custom_layers_and_models.ipynb). Here's a simple implementation of a `Dense` layer: ``` class CustomDense(layers.Layer): def __init__(self, units=32): super(CustomDense, self).__init__() self.units = units def build(self, input_shape): self.w = self.add_weight(shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) self.b = self.add_weight(shape=(self.units,), initializer='random_normal', trainable=True) def call(self, inputs): return tf.matmul(inputs, self.w) + self.b inputs = keras.Input((4,)) outputs = CustomDense(10)(inputs) model = keras.Model(inputs, outputs) ``` If you want your custom layer to support serialization, you should also define a `get_config` method, that returns the constructor arguments of the layer instance: ``` class CustomDense(layers.Layer): def __init__(self, units=32): super(CustomDense, self).__init__() self.units = units def build(self, input_shape): self.w = self.add_weight(shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) self.b = self.add_weight(shape=(self.units,), initializer='random_normal', trainable=True) def call(self, inputs): return tf.matmul(inputs, self.w) + self.b def get_config(self): return {'units': self.units} inputs = keras.Input((4,)) outputs = CustomDense(10)(inputs) model = keras.Model(inputs, outputs) config = model.get_config() new_model = keras.Model.from_config( config, custom_objects={'CustomDense': CustomDense}) ``` Optionally, you could also implement the classmethod `from_config(cls, config)`, which is in charge of recreating a layer instance given its config dictionary. The default implementation of `from_config` is: ```python def from_config(cls, config): return cls(**config) ``` ## When to use the Functional API How to decide whether to use the Functional API to create a new model, or just subclass the `Model` class directly? In general, the Functional API is higher-level, easier & safer to use, and has a number of features that subclassed Models do not support. However, Model subclassing gives you greater flexibility when creating models that are not easily expressible as directed acyclic graphs of layers (for instance, you could not implement a Tree-RNN with the Functional API, you would have to subclass `Model` directly). ### Here are the strengths of the Functional API: The properties listed below are all true for Sequential models as well (which are also data structures), but they aren't true for subclassed models (which are Python bytecode, not data structures). #### It is less verbose. No `super(MyClass, self).__init__(...)`, no `def call(self, ...):`, etc. Compare: ```python inputs = keras.Input(shape=(32,)) x = layers.Dense(64, activation='relu')(inputs) outputs = layers.Dense(10)(x) mlp = keras.Model(inputs, outputs) ``` With the subclassed version: ```python class MLP(keras.Model): def __init__(self, **kwargs): super(MLP, self).__init__(**kwargs) self.dense_1 = layers.Dense(64, activation='relu') self.dense_2 = layers.Dense(10) def call(self, inputs): x = self.dense_1(inputs) return self.dense_2(x) # Instantiate the model. mlp = MLP() # Necessary to create the model's state. # The model doesn't have a state until it's called at least once. _ = mlp(tf.zeros((1, 32))) ``` #### It validates your model while you're defining it. In the Functional API, your input specification (shape and dtype) is created in advance (via `Input`), and every time you call a layer, the layer checks that the specification passed to it matches its assumptions, and it will raise a helpful error message if not. This guarantees that any model you can build with the Functional API will run. All debugging (other than convergence-related debugging) will happen statically during the model construction, and not at execution time. This is similar to typechecking in a compiler. #### Your Functional model is plottable and inspectable. You can plot the model as a graph, and you can easily access intermediate nodes in this graph -- for instance, to extract and reuse the activations of intermediate layers, as we saw in a previous example: ```python features_list = [layer.output for layer in vgg19.layers] feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list) ``` #### Your Functional model can be serialized or cloned. Because a Functional model is a data structure rather than a piece of code, it is safely serializable and can be saved as a single file that allows you to recreate the exact same model without having access to any of the original code. See our [saving and serialization guide](./saving_and_serializing.ipynb) for more details. ### Here are the weaknesses of the Functional API: #### It does not support dynamic architectures. The Functional API treats models as DAGs of layers. This is true for most deep learning architectures, but not all: for instance, recursive networks or Tree RNNs do not follow this assumption and cannot be implemented in the Functional API. #### Sometimes, you just need to write everything from scratch. When writing advanced achitectures, you may want to do things that are outside the scope of "defining a DAG of layers": for instance, you may want to expose multiple custom training and inference methods on your model instance. This requires subclassing. --- To dive more in-depth into the differences between the Functional API and Model subclassing, you can read [What are Symbolic and Imperative APIs in TensorFlow 2.0?](https://medium.com/tensorflow/what-are-symbolic-and-imperative-apis-in-tensorflow-2-0-dfccecb01021). ## Mix-and-matching different API styles Importantly, choosing between the Functional API or Model subclassing isn't a binary decision that restricts you to one category of models. All models in the tf.keras API can interact with each, whether they're Sequential models, Functional models, or subclassed Models/Layers written from scratch. You can always use a Functional model or Sequential model as part of a subclassed Model/Layer: ``` units = 32 timesteps = 10 input_dim = 5 # Define a Functional model inputs = keras.Input((None, units)) x = layers.GlobalAveragePooling1D()(inputs) outputs = layers.Dense(1, activation='sigmoid')(x) model = keras.Model(inputs, outputs) class CustomRNN(layers.Layer): def __init__(self): super(CustomRNN, self).__init__() self.units = units self.projection_1 = layers.Dense(units=units, activation='tanh') self.projection_2 = layers.Dense(units=units, activation='tanh') # Our previously-defined Functional model self.classifier = model def call(self, inputs): outputs = [] state = tf.zeros(shape=(inputs.shape[0], self.units)) for t in range(inputs.shape[1]): x = inputs[:, t, :] h = self.projection_1(x) y = h + self.projection_2(state) state = y outputs.append(y) features = tf.stack(outputs, axis=1) print(features.shape) return self.classifier(features) rnn_model = CustomRNN() _ = rnn_model(tf.zeros((1, timesteps, input_dim))) ``` Inversely, you can use any subclassed Layer or Model in the Functional API as long as it implements a `call` method that follows one of the following patterns: - `call(self, inputs, **kwargs)` where `inputs` is a tensor or a nested structure of tensors (e.g. a list of tensors), and where `**kwargs` are non-tensor arguments (non-inputs). - `call(self, inputs, training=None, **kwargs)` where `training` is a boolean indicating whether the layer should behave in training mode and inference mode. - `call(self, inputs, mask=None, **kwargs)` where `mask` is a boolean mask tensor (useful for RNNs, for instance). - `call(self, inputs, training=None, mask=None, **kwargs)` -- of course you can have both masking and training-specific behavior at the same time. In addition, if you implement the `get_config` method on your custom Layer or Model, the Functional models you create with it will still be serializable and clonable. Here's a quick example where we use a custom RNN written from scratch in a Functional model: ``` units = 32 timesteps = 10 input_dim = 5 batch_size = 16 class CustomRNN(layers.Layer): def __init__(self): super(CustomRNN, self).__init__() self.units = units self.projection_1 = layers.Dense(units=units, activation='tanh') self.projection_2 = layers.Dense(units=units, activation='tanh') self.classifier = layers.Dense(1, activation='sigmoid') def call(self, inputs): outputs = [] state = tf.zeros(shape=(inputs.shape[0], self.units)) for t in range(inputs.shape[1]): x = inputs[:, t, :] h = self.projection_1(x) y = h + self.projection_2(state) state = y outputs.append(y) features = tf.stack(outputs, axis=1) return self.classifier(features) # Note that we specify a static batch size for the inputs with the `batch_shape` # arg, because the inner computation of `CustomRNN` requires a static batch size # (when we create the `state` zeros tensor). inputs = keras.Input(batch_shape=(batch_size, timesteps, input_dim)) x = layers.Conv1D(32, 3)(inputs) outputs = CustomRNN()(x) model = keras.Model(inputs, outputs) rnn_model = CustomRNN() _ = rnn_model(tf.zeros((1, 10, 5))) ``` This concludes our guide on the Functional API! Now you have at your fingertips a powerful set of tools for building deep learning models.
github_jupyter
# Various Routines to Harvest CRIM Metadata from Production Server ### Just the basics here, allowing interaction with "request" as a way to retrieve individual Observations and Relationships ``` import requests import pandas as pd ``` # Variables Now we can set a variable, in this case the URL of a single Observation in CRIM ``` Obs_url = "https://crimproject.org/data/observations/2/" ``` And if we call for that variable, it will tell us what it is: ``` Obs_url ``` # Requests Now defining a new variable, which itself is a "get request" for our first variable: ``` response = requests.get(Obs_url) type(response) ``` And now the json representation of that variable: ``` Obs_json = response.json() Obs_json ``` # Json, Dictionaries, Keys and Values Json is in fact an elaborate dictionary, with items nested in an order. ``` type(Obs_json) ``` We can list the fixed "keys" for that JSON, which are in turned paired with "values". ``` Obs_json.keys() ``` And here we are after the value of just ONE key ``` Obs_ema = Obs_json["ema"] Obs_ema ``` It has a data type: string ``` type(Obs_ema) ``` Now calling for various other values for other keys: ``` Obs_json["musical_type"] Obs_mt = Obs_json["musical_type"] Obs_mt ``` The piece key actually is a dictionary within a dictionary, so it has LOTS of keys and values within it. ``` Obs_piece = Obs_json["piece"] Obs_piece ``` And to interact with the items there, we need to call for a key *within* that key. ``` Obs_mei = Obs_piece["mei_links"] Obs_mei ``` Various ways of calling for items according to their position. Note: Zero-based indexing! ``` len(Obs_mei) Obs_mei[0] Obs_json["piece"]["mei_links"][0] Obs_json["ema"] def get_ema_for_observation_id(obs_id): # get Obs_url url = "https://crimproject.org/data/observations/{}/".format(obs_id) return url def get_ema_for_observation_id(obs_id): # get Obs_ema my_ema_mei_dictionary = dict() url = "https://crimproject.org/data/observations/{}/".format(obs_id) response = requests.get(url) Obs_json = response.json() # Obs_ema = Obs_json["ema"] my_ema_mei_dictionary["id"]=Obs_json["id"] my_ema_mei_dictionary["musical type"]=Obs_json["musical_type"] my_ema_mei_dictionary["int"]=Obs_json["mt_fg_int"] my_ema_mei_dictionary["tint"]=Obs_json["mt_fg_tint"] my_ema_mei_dictionary["ema"]=Obs_json["ema"] my_ema_mei_dictionary["mei"]=Obs_json["piece"]["mei_links"][0] my_ema_mei_dictionary["pdf"]=Obs_json["piece"]["pdf_links"][0] # Obs_piece = Obs_json["piece"] # Obs_mei = Obs_piece["mei_links"] print(f'Got: {obs_id}') # return {"ema":Obs_ema,"mei":Obs_mei} return my_ema_mei_dictionary ``` Now we get a _particular_ observation. ``` get_ema_for_observation_id(20) ``` A new variable that contains the "get_ema" routine. We will pass a series of numbers to it. ``` output = get_ema_for_observation_id(20) # this holds the output as a LIST of DICTS obs_data_list = [] # this is the list of Observation IDs to call obs_call_list = [1,3,5,17,21] # this is the LOOP that runs through the list aboe # for observ in obs_call_list: for observ in range(1,11): call_list_output = get_ema_for_observation_id(observ) # the print command simply puts the output in the notebook terminal. #Later we will put it in the List of Dicts. # print(call_list_output) The APPEND function adds one item after each loop. obs_data_list.append(call_list_output) # list includes APPEND function that will allow us to add one item after each loop. # EX blank_list = [1,5,6] (note that these are in square brackets as LIST) # blank_list.append(89) # range would in parenths as in: range(1,11) # here we make a LIST object that contains the Range. # This allows it to iterate over the range # since the range could be HUGE We can ONLY append a number to a LIST! Obs_range = list(range(1,11)) ``` Now we call up the list of observations we created above, after appending one at a time to the "[]" ``` obs_data_list ``` # Pandas as Data Frame or CSV ``` pd.Series(obs_data_list).to_csv("obs_data_list.csv") # Pandas DataFrame interprets the series of items in each Dict # as separate 'cells' (a tab structure) DF_output = pd.DataFrame(obs_data_list) DF_output DF_output.to_csv("obs_data_list.csv") # two "==" means check for equality # for 'contains' use str.contains("letter") # can also use regex in this (for EMA range) # Filter_by_Type = (DF_output["musical type"]=="Fuga") & (DF_output["id"]==8) Filter_by_Type = DF_output["musical type"].str.contains("Fuga") # DF_output[Filter_by_Type] ```
github_jupyter
#Sheet Copy Copy tab from a sheet to a sheet. #License Copyright 2020 Google LLC, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. #Disclaimer This is not an officially supported Google product. It is a reference implementation. There is absolutely NO WARRANTY provided for using this code. The code is Apache Licensed and CAN BE fully modified, white labeled, and disassembled by your team. This code generated (see starthinker/scripts for possible source): - **Command**: "python starthinker_ui/manage.py colab" - **Command**: "python starthinker/tools/colab.py [JSON RECIPE]" #1. Install Dependencies First install the libraries needed to execute recipes, this only needs to be done once, then click play. ``` !pip install git+https://github.com/google/starthinker ``` #2. Set Configuration This code is required to initialize the project. Fill in required fields and press play. 1. If the recipe uses a Google Cloud Project: - Set the configuration **project** value to the project identifier from [these instructions](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md). 1. If the recipe has **auth** set to **user**: - If you have user credentials: - Set the configuration **user** value to your user credentials JSON. - If you DO NOT have user credentials: - Set the configuration **client** value to [downloaded client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md). 1. If the recipe has **auth** set to **service**: - Set the configuration **service** value to [downloaded service credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md). ``` from starthinker.util.configuration import Configuration CONFIG = Configuration( project="", client={}, service={}, user="/content/user.json", verbose=True ) ``` #3. Enter Sheet Copy Recipe Parameters 1. Provide the full edit URL for both sheets. 1. Provide the tab name for both sheets. 1. The tab will only be copied if it does not already exist. Modify the values below for your use case, can be done multiple times, then click play. ``` FIELDS = { 'auth_read': 'user', # Credentials used for reading data. 'from_sheet': '', 'from_tab': '', 'to_sheet': '', 'to_tab': '', } print("Parameters Set To: %s" % FIELDS) ``` #4. Execute Sheet Copy This does NOT need to be modified unless you are changing the recipe, click play. ``` from starthinker.util.configuration import execute from starthinker.util.recipe import json_set_fields TASKS = [ { 'sheets': { 'auth': 'user', 'template': { 'sheet': {'field': {'name': 'from_sheet', 'kind': 'string', 'order': 1, 'default': ''}}, 'tab': {'field': {'name': 'from_tab', 'kind': 'string', 'order': 2, 'default': ''}} }, 'sheet': {'field': {'name': 'to_sheet', 'kind': 'string', 'order': 3, 'default': ''}}, 'tab': {'field': {'name': 'to_tab', 'kind': 'string', 'order': 4, 'default': ''}} } } ] json_set_fields(TASKS, FIELDS) execute(CONFIG, TASKS, force=True) ```
github_jupyter
# 数学函数、字符串和对象 ## 本章介绍Python函数来执行常见的数学运算 - 函数是完成一个特殊任务的一组语句,可以理解为一个函数相当于一个小功能,但是在开发中,需要注意一个函数的长度最好不要超过一屏 - Python中的内置函数是不需要Import导入的 <img src="../Photo/15.png"></img> ``` #绝对值 print(abs(-10)) #MAX max(1,2,3) #max('abc') #MIN min(-1,0,1) #POW(幂) pow(5,8) #ROUND(X)(返回与X最接近的整数) round(3.8) #round(x,n) 保留浮点小数 round(3.99999988875622332,8) #赌博 import random random.randint() #石头 = 0 #剪刀 = 1 #布 = 2 import random number = random.randint(0,2) count = eval(input('please input count only 0、1、2:')) if abs(number - count) == 1: min(number,count) print('你赢啦!') elif abs(number - count) == 0: print('平局,再来一次吧!') else: max(number,count) print('输了,再来一次吧!') import os import random a = eval(input('please input number')) b = a = -10 print(abs(a)) b = -10.1 print(abs(b)) c = 0 print(abs(c)) max(1, 2, 3, 4, 5) min(1, 2, 3, 4, 5) min(1, 2, 3, -4, 5) for i in range(10): print(i) pow(2, 4, 2) # 幂指数运算,第三个参数是取模运算 round(10.67, 1) # 一个参数就是四舍五入,保留小数位数 ``` ## 尝试练习Python内置函数 ## Python中的math模块提供了许多数学函数 <img src="../Photo/16.png"></img> <img src="../Photo/17.png"></img> ``` #圆的实现 import random y = random.randint(0,1) a = random.randint(0,1) L = -(y*log(a)+(1-y)*(log(1-a))) #sin()使用弧度 import math math.sin(math.radians(90)) #FABS import math math.fabs(-4) #ceil 向上取整 import math math.ceil(-3.1) import math math.exp(100) import time start = time.time()#返回时间戳 num = 0 for i in range (1000000): num +=i end = time.time() print(end - start) import math # 导入数学包 a1 = math.fabs(-2) print(a1) print(math.log(2.71828)) print(math.asin(1.0)) b1 = math.cos(math.radians(90)) # cos代入的是弧度值,very important! print(b1) c1 = 3.1415926 print(math.degrees(c1)) math.sqrt(9) math.sin(2 * math.pi) math.cos(2 * math.pi) min(2, 2, 1) math.log(math.e ** 2) math.exp(1) max(2, 3, 4) math.ceil(-2.5) # 验证码系统 first_num, second_num = 3, 4 print('验证码', first_num ,'+', second_num, '= ?') answer = eval(input('写出结果: ')) if answer == first_num + second_num: print('验证码正确') else: print('验证码错误') import random import math first_num, second_num = 3, 4 list = ['+', '-', '*', '/'] randl = random.randint(0, 3) if list[randl]=='+': print('验证码', first_num ,'+', second_num, '= ?') right_answer = first_num + second_num elif list[randl]=='-': print('验证码', first_num ,'-', second_num, '= ?') right_answer = first_num - second_num elif list[randl]=='-': print('验证码', first_num ,'*', second_num, '= ?') right_answer = first_num * second_num else: print('验证码', first_num ,'/', second_num, '= ?') right_answer = first_num / second_num answer = eval(input('写出结果: ')) if answer == right_answer: print('验证码正确') else: print('验证码错误') # 验证码系统 import random first_num = random.randint(0, 9) second_num = random.randint(0, 9) fuhao = random.randint(0, 3) if fuhao==0: print('验证码', first_num ,'+', second_num, '= ?') right_answer = first_num + second_num elif fuhao==1: print('验证码', first_num ,'-', second_num, '= ?') right_answer = first_num - second_num elif fuhao==2: print('验证码', first_num ,'*', second_num, '= ?') right_answer = first_num * second_num else: print('验证码', first_num ,'/', second_num, '= ?') right_answer = first_num / second_num answer = eval(input('写出结果: ')) if answer == right_answer: print('验证码正确') else: print('验证码错误') import random list = ['+', '-', '*', '/'] c = random.sample(list, 1) print(c) import random import math first_num = random.randint(0, 9) second_num = random.randint(0, 9) list = ['+', '-', '*', '/'] fuhao = random.sample(list, 1) if fuhao=='+': print('验证码', first_num ,'+', second_num, '= ?') right_answer = first_num + second_num elif fuhao=='-': print('验证码', first_num ,'-', second_num, '= ?') right_answer = first_num - second_num elif fuhao=='-': print('验证码', first_num ,'*', second_num, '= ?') right_answer = first_num * second_num else: print('验证码', first_num ,'/', second_num, '= ?') right_answer = first_num / second_num answer = eval(input('写出结果: ')) if answer == right_answer: print('验证码正确') else: print('验证码错误') import PIL ``` ## 两个数学常量PI和e,可以通过使用math.pi 和math.e调用 ``` import math print(math.pi) print(math.e) ``` ## EP: - 通过math库,写一个程序,使得用户输入三个顶点(x,y)返回三个角度 - 注意:Python计算角度为弧度制,需要将其转换为角度 <img src="../Photo/18.png"> ``` import math x1,y1 = eval(input('输入A点坐标:')) x2,y2 = eval(input('输入B点坐标:')) x3,y3 = eval(input('输入C点坐标:')) a = math.sqrt(pow((x2-x3),2)+pow((y2-y3),2)) b = math.sqrt(pow((x3-x1),2)+pow((y3-y1),2)) c = math.sqrt(pow((x2-x1),2)+pow((y2-y1),2)) A = math.degrees(math.acos((a*a-b*b-c*c)/(-2*b*c))) B = math.degrees(math.acos((b*b-a*a-c*c)/(-2*a*c))) C = math.degrees(math.acos((c*c-b*b-a*a)/(-2*b*a))) print('三角形的三个角分别是:'A,B,C) c = '''她说:“你真是一个小天才” 你真是一个小煞笔 你真是一个大煞笔 你真是一个小机灵鬼''' num = 0 for i in c: if i=='\n': print('ok') continue else: num+=1 print(num) c = '''她说:“你真是一个小天才” 你真是一个小煞笔 你真是一个大煞笔 你真是一个小机灵鬼''' bytes(c.encode('utf-8')) #统计煞笔 c = ''' 她说:“你真是一个小天才” 你真是一个小煞笔 你真是一个大煞笔 你真是一个小机灵鬼 ''' num = 0 for i in c: num +=1 print(num) import math x1, y1 = eval(input('输入A点坐标:')) x2, y2 = eval(input('输入B点坐标:')) x3, y3 = eval(input('输入C点坐标:')) a = math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) b = math.sqrt((x1 - x3) ** 2 + (y1 - y3) ** 2) c = math.sqrt((x2 - x3) ** 2 + (y2 - y3) ** 2) A = math.degrees(math.acos((a * a - b * b - c * c) / (-2 * b * c))) B = math.degrees(math.acos((b * b - a * a - c * c) / (-2 * a * c))) C = math.degrees(math.acos((c * c - b * b - a * a) / (-2 * a * b))) print('三角形的三个角分别为', A, B, C) ``` ## 字符串和字符 - 在Python中,字符串必须是在单引号或者双引号内,在多段换行的字符串中可以使用“”“ - 在使用”“”时,给予其变量则变为字符串,否则当多行注释使用 ``` #练习 a = 'a' b = 'b' ''' 你真是 ''' a = 'joker' b = "Kate" c = """在Python中,字符串必须是在单引号或者双引号内,在多段换行的字符串中可以使用“”“ 在使用”“”时,给予其变量则变为字符串,否则当多行注释使用""" #字符串有多行时,添加三个单引号或者三个双引号 """在Python中,字符串必须是在单引号或者双引号内,在多段换行的字符串中可以使用“”“ 在使用”“”时,给予其变量则变为字符串,否则当多行注释使用""" #三引号可以表示多行注释 # 当6个引号没有赋值时,那么它是注释的作用 # 6个引号的作用,多行文本 print(type(a), type(b), type(c)) ``` ## ASCII码与Unicode码 - <img src="../Photo/19.png"></img> - <img src="../Photo/20.png"></img> - <img src="../Photo/21.png"></img> ## 函数ord、chr - ord 返回ASCII码值 - chr 返回字符 ``` ord('b') chr(98) #邮箱 a = '1575771603@qq.com' for i in a: #print(ord(i),end=' ') print(chr(ord(i)),end='') import random fh = '' a = '1575771603@qq.com' b = random.randint(0,4) for i in a: if b == 1: fh = '+b' elif b == 2: fh = '-b' elif b == 3: fh = '*b' elif b = 4: fh = '/b' else: fh = '%b' s = ord(i)fh print(s) #print(chr(ord(i)),end='') joker = 'A' ord(joker) print(ord('q'), ord('Z')) print(chr(65)) print(chr(90)) import numpy as np np.nonzero(1) ``` ## EP: - 利用ord与chr进行简单邮箱加密 ``` email = 'maomaochong@163.com' # 邮箱加密过程 j = 0 for i in email: text = ord(i) + 1 re_text = chr(text) print(re_text) import hashlib str1 = 'this is a test.' h1 = hashlib.md5() h1.update(str1.encode(encoding = 'utf-8')) print('MD5加密之后为:', h1.hexdigest()) ``` ## 转义序列 \ - a = "He said,"Johon's program is easy to read"" - 转掉原来的意思 - 一般情况下,只有当语句与默认语句相撞的时候,就需要转义 ``` #JOIN 以。。。进行拼接 %time ''.join(('a','b')) %time 'a'+'100' a = "He said,\"Johon's program is easy to read\"" #z正则表达式中常用转义字符\ print(a) ``` ## 高级print - 参数 end: 以什么方式结束打印 - 默认换行打印 ``` email = 'maomaochong@163.com' # 邮箱加密过程 j = 0 for i in email: text = ord(i) + 1 re_text = chr(text) print(re_text, end = '') ``` ## 函数str - 将类型强制转换成字符串类型 - 其他一些以后会学到(list,set,tuple...) ``` a = 100.12 type(str(a)) ``` ## 字符串连接操作 - 直接使用 “+” - join() 函数 ``` a1 = 'www.baidu.com/image.page=' a2 = '1' for i in range(0, 10): a2 = a1 + str(i) print(a2) joint = '^' %time joint.join(('a', 'b', 'c', 'd')) # join的参数需要在一个元组之中 %time '*'.join(('a', 'b', 'c', 'd')) # join的参数需要在一个元组之中 %time 'A' + 'B' + 'C' ``` ## EP: - 将 “Welcome” “to” "Python" 拼接 - 将int型 100 与 “joker is a bad man” 拼接 - 从控制台读取字符串 > 输入一个名字返回夸奖此人是一个帅哥 ``` ' '.join(('Welcome','to','Python')) ''.join((str(100),'joker is a bad man')) name = input('please input name:') a = ''.join((name,',you are very beautiful!')) print(a) text1 = ' '.join(('Welcome', 'to', 'Python')) i = 100 text2 = str(i) text3 = ' '.join((text2, 'Joker is a bad man')) print(text1, '\n', text2 ,'\n', text3) name = input('输入名字:') text = ' '.join((name, 'is a good boy.')) print(text) ``` ## 实例研究:最小数量硬币 - 开发一个程序,让用户输入总金额,这是一个用美元和美分表示的浮点值,返回一个由美元、两角五分的硬币、一角的硬币、五分硬币、以及美分个数 <img src="../Photo/22.png"></img> ``` amount = eval(input('Enter an amount, for example 11.56: ')) fenshuAmount = int(amount * 100) dollorAmount = fenshuAmount // 100 remainDollorAmount = fenshuAmount % 100 jiaoAmount = remainDollorAmount // 25 remainJiaoAmount = remainDollorAmount % 25 fenAmount = remainJiaoAmount // 10 remainFenAmount = remainJiaoAmount % 10 fenAmount2 = remainFenAmount // 5 remainFenAmount2 = remainFenAmount % 5 fenFinalAmount = remainFenAmount2 print('美元个数为',dollorAmount,'\n', '两角五分硬币个数为', jiaoAmount, '\n','一角个数为', fenAmount, '\n','五美分个数为', fenAmount2,'\n', '一美分个数为',fenFinalAmount) amount = eval(input('Ennter an amount,for example 11.56:')) remainingAmount = int(amount * 100) print(remainingAmount) numberOfOneDollars = remainingAmount //100 remainingAmount = remainingAmount % 100 numberOfQuarters = remainingAmount // 25 remainingAmount = remainingAmount % 25 numberOfDimes = remainingAmount // 10 remainingAmount = remainingAmount % 10 numberOfNickls = remainingAmount // 5 remainingAmount = remainingAmount % 5 numberOfPenies = remainingAmount print(numberOfOneDollars,numberOfQuarters,numberOfDimes,numberOfNickls,numberOfPenies) ``` - Python弱项,对于浮点型的处理并不是很好,但是处理数据的时候使用的是Numpy类型 <img src="../Photo/23.png"></img> ``` remainingAmount = eval(input('Ennter an amount,for example 11.56:')) print(remainingAmount) numberOfOneDollars = remainingAmount //100 remainingAmount = remainingAmount % 100 numberOfQuarters = remainingAmount // 25 remainingAmount = remainingAmount % 25 numberOfDimes = remainingAmount // 10 remainingAmount = remainingAmount % 10 numberOfNickls = remainingAmount // 5 remainingAmount = remainingAmount % 5 numberOfPenies = remainingAmount print(numberOfOneDollars,numberOfQuarters,numberOfDimes,numberOfNickls,numberOfPenies) ``` ## id与type - id 查看内存地址,在判断语句中将会使用 - type 查看元素类型 ``` # id(262) is id(262) a = 100 id(a) id(True) 100 == 100 112345678800000000 is '112345678800000000' 112345678800000000 is 112345678800000000 a = True b = False print(id(a), id(b)) a is b ``` ## 其他格式化语句见书 # Homework - 1 <img src="../Photo/24.png"><img> <img src="../Photo/25.png"><img> ``` import math r = eval(input('Enter the length from the center to a vertex:')) s = 2*r*math.sin(math.pi / 5) area = 5 * s * s / (4 * math.tan(math.pi / 5)) print('The area of the pentagon is %.2f' %(area) ) ``` - 2 <img src="../Photo/26.png"><img> ``` import math x1,y1 = eval(input('Enter point 1 (latitude and longitude) in degrees:')) x2,y2 = eval(input('Enter point 2 (latitude and longitude) in degrees:')) x1 = math.radians(x1) x2 = math.radians(x2) y1 = math.radians(y1) y2 = math.radians(y2) radius = 6371.01 d = radius * math.acos(math.sin(x1) * math.sin(x2) + math.cos(x1) * math.cos(x2) * math.cos(y1-y2)) print('The distance between the two points is %f' %(d)) ``` - 3 <img src="../Photo/27.png"><img> ``` import math side = eval(input('Enter the said:')) area =5 * s * s/ (4 * math.tan(math.pi / 5)) print('The area of the pentagon is %.4f' %(area)) ``` - 4 <img src="../Photo/28.png"><img> ``` import math number = eval(input('Enter the number of sides:')) s = eval(input('Enter the sides:')) area = number * s * s /(4 * math.tan(math.pi / 5)) print('The area of the polyon is %f' %(area)) ``` - 5 <img src="../Photo/29.png"><img> <img src="../Photo/30.png"><img> ``` number = eval(input('Enter an ASCII code :')) a = chr(number) print('The character is ', a) ``` - 6 <img src="../Photo/31.png"><img> ``` name = input("Enter employee's name :") weektime = eval(input('Enter number of hours worked in a week:')) hourly = eval(input('Enter hourly pay rate:')) federal = eval(input('Enter federal tax withholding rate:')) state = eval(input('Enter state tax withholding rate :')) GrossPay = weektime * hourly FeferalWithholding = GrossPay * federal StateWithholding = GrossPay * state TotalDeduction = FeferalWithholding + StateWithholding NetPay = GrossPay - TotalDeduction print('Employee Name:', name ,'\n', 'Hours Worked:', weektime , '\n','Pay rate: $', hourly , '\n','Gross pay: 4',GrossPay , '\n','Feferal Withholding(20.0%): $',FeferalWithholding , '\n','State Withholding(9.0%): $',StateWithholding , '\n','Employee Name:', name ,'\n', 'Total Deduction: $',TotalDeduction , '\n','Net Pay:', NetPay ) ``` - 7 <img src="../Photo/32.png"><img> ``` num = eval(input('Enter an integer:') ) a = num // 1000 b = num % 1000 // 100 c = num %100 // 10 d = num %10 print(d,end='') print(c,end='') print(b,end='') print (a,end='') #print('The reversed number is %d' %(num)) ``` - 8 进阶: > 加密一串文本,并将解密后的文件写入本地保存 ``` #邮箱加密 a = '1575771603@qq.com' res3 = '' for i in a: res = ord(i) + 1 res2 = chr(res) res3 = res3 + res2 print('加密信息:',res3) res4 = '' for i in res3: res = ord(i)-1 res2 = chr(res) res4 = res4 + res2 print('解密信息:',res4) ```
github_jupyter
<a href="https://colab.research.google.com/github/spyrosviz/Injury_Prediction_MidLong_Distance_Runners/blob/main/ML%20models/Models_Runners_Injury_Prediction.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` # Import Libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.ensemble import GradientBoostingClassifier, BaggingClassifier from xgboost.sklearn import XGBClassifier from sklearn.calibration import CalibratedClassifierCV from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV, RandomizedSearchCV, StratifiedKFold from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score from sklearn.model_selection import cross_val_score from sklearn.preprocessing import MinMaxScaler import itertools from collections import Counter !pip install imbalanced-learn from imblearn.over_sampling import SMOTE, RandomOverSampler, ADASYN from imblearn.under_sampling import RandomUnderSampler, TomekLinks import tensorflow as tf ``` **Use the following split if you want to hold out a specified number of athletes for train and test set. The last 10 athletes instances were kept for test set.** ``` '''Import data and hold out a specified test set''' # Import data from excel, select the first 63 athletes events for train set and the last 10 athletes for test set df = pd.read_excel(r'/content/drive/MyDrive/Runners_Injury_MLproject/Daily_Injury_Clean.xlsx',index_col = [0]) df_train = df[df['Athlete ID'] <= 63] df_train.drop(['Date','Athlete ID'],axis=1,inplace=True) df_test = df[df['Athlete ID'] > 63] df_test.drop(['Date','Athlete ID'],axis=1,inplace=True) # Check if df_train has any equal instances with df_test. We expect to return an empty dataframe if they do not share common instances print(df_train[df_test.eq(df_train).all(axis=1)==True]) ''' Set y ''' y_train = df_train['injury'].values y_test = df_test['injury'].values ''' Set all columns for X except injury which is the target''' X_train = df_train.drop(['injury'],axis=1).values X_test = df_test.drop(['injury'],axis=1).values column_names = df_train.drop(['injury'],axis=1).columns #selected_features = ['Total Weekly Distance','Acute Load','Strain','Monotony','injury'] ''' Set X after dropping selected features ''' #X_test = df_test.drop(selected_features,axis=1).values #X_train = df_train.drop(selected_features,axis=1).values #column_names = df_train.drop(selected_features,axis=1).columns ''' Set selected features as X ''' #X_train = df_train.loc[:,selected_features].values #X_test = df_test.loc[:,selected_features].values #column_names = df_train.loc[:,selected_features].columns # Print dataframes shapes and respective number of healthy and injury events print(column_names) print(Counter(df_train['injury'].values)) print(Counter(df_test['injury'].values)) ``` **Use the following dataset split if you want to hold out 2000 random healthy instancies and 50 random injury instancies** ``` '''Import data and holdout a random test set''' # Import data from excel and drop Date and Athlete ID column df = pd.read_excel(r'/content/drive/MyDrive/Runners_Injury_MLproject/run_injur_with_acuteloads.xlsx',index_col = [0]) # Hold out a test set with 100 random injury events and 100 random healthy events df_copy = df.copy() df_copy.drop(['Date','Athlete ID'],axis=1,inplace=True) df_inj = df_copy[df_copy['injury']==1].sample(50,random_state=42) df_uninj = df_copy[df_copy['injury']==0].sample(2000,random_state=42) df_test = pd.concat([df_inj,df_uninj],ignore_index=True) # Drop the test set from the original dataframe df_train = pd.concat([df_copy,df_test],ignore_index=True).drop_duplicates(keep=False) # Set X and y y_train = df_train['injury'].values y_test = df_test['injury'].values selected_features = ['Total Weekly Distance','Acute Load','Strain','Monotony','injury'] X_test = df_test.drop(selected_features,axis=1).values X_train = df_train.drop(selected_features,axis=1).values #X_train = df_train.loc[:,selected_features].values #X_test = df_test.loc[:,selected_features].values # Check if df_train has any equal instances with df_test. We expect to return an empty dataframe if they do not share common instances # Print dataframe shapes and respective number of healthy and injury events print(df_train[df_test.eq(df_train).all(axis=1)==True]) #print(df_train.drop(['Acute Load','Total Weekly Distance','Monotony','Strain','injury'],axis=1).columns) print(df_train.shape) print(Counter(df_train['injury'].values)) print(df_test.shape) print(Counter(df_test['injury'].values)) class_imbalance = len(df_train[df_train['injury']==1].values)/len(df_train[df_train['injury']==0].values) print(f'Class imbalance is {class_imbalance}') ``` **Write a function to pretiffy confusion matrix results. The function was found from Daniel Bourke's Tensorflow course** ``` def plot_confusion_matrix(y_true,y_pred,class_names,figsize=(10,10),text_size=15): # create the confusion matrix cm = confusion_matrix(y_true,y_pred) cm_norm = cm.astype('float') / cm.sum(axis=1)[:,np.newaxis] # normalize confusion matrix n_classes = cm.shape[0] fig, ax = plt.subplots(figsize=figsize) matrix_plot = ax.matshow(cm, cmap=plt.cm.Blues) fig.colorbar(matrix_plot) # Set labels to be classes if class_names: labels = class_names else: labels = np.arange(cm.shape[0]) # Label the axes ax.set(title='Confusion Matrix', xlabel = 'Predicted Label', ylabel = 'True Label', xticks = np.arange(n_classes), yticks = np.arange(n_classes), xticklabels = labels, yticklabels = labels) # Set x axis labels to bottom ax.xaxis.set_label_position('bottom') ax.xaxis.tick_bottom() # Adjust label size ax.yaxis.label.set_size(text_size) ax.xaxis.label.set_size(text_size) ax.title.set_size(text_size) # Set threshold for different colors threshold = (cm.max() + cm.min()) / 2 # Plot the text on each cell for i, j in itertools.product(range(cm.shape[0]),range(cm.shape[1])): plt.text(j,i,f'{cm[i,j]} ({cm_norm[i,j] * 100:.1f}%)', horizontalalignment='center', color='white' if cm[i,j] > threshold else 'black', size = text_size) ``` Because there is very high class imbalance in the injury variable that we want to predict, we will try the following techniques to overcome this problem and see what works best: * **Weighted XGBoost** * **XGBoost with Smote algorithm for Resampling** * **XGBoost model with Random Resampling** * **Bagging XGBoost model with Random Resampling** * **Neural Networks model with Random Undersampling** ``` # Set X and y with different resampling methods '''SMOTE algorithm for oversampling 15% ratio and random undersampling 1-1 ratio''' # Oversample the minority class to have number of instances equal with the 15% of the majority class smote = SMOTE(sampling_strategy=0.15,random_state=1) X_sm,y_sm = smote.fit_resample(X_train,y_train) # Downsample the majority class to have number of instances equal with the minority class undersamp = RandomUnderSampler(sampling_strategy=1,random_state=1) X_smus,y_smus = undersamp.fit_resample(X_sm,y_sm) '''Random oversampling 10% ratio and random undersampling 1-1 ratio''' # Random over sampler for minority class to 1:10 class ratio ros = RandomOverSampler(sampling_strategy=0.1,random_state=21) X_ros,y_ros = ros.fit_resample(X_train,y_train) # Undersample the majority class to have number of instances equal with the minority class undersamp = RandomUnderSampler(sampling_strategy=1,random_state=21) X_rosus,y_rosus = undersamp.fit_resample(X_ros,y_ros) '''Random undersampling 1-1 ratio''' # Random under sampler for majority class to 1:1 class ratio rus = RandomUnderSampler(sampling_strategy=1,random_state=21) X_rus,y_rus = rus.fit_resample(X_train,y_train) '''Tomek Links Undersampling''' tmkl = TomekLinks() X_tmk, y_tmk = tmkl.fit_resample(X_train,y_train) '''ADASYN for oversampling 15% ratio and random undersampler 1-1 ratio''' # ADASYN oversample minority class to 15% of the majority class adasyn = ADASYN(sampling_strategy=0.15,random_state=21) X_ada, y_ada = adasyn.fit_resample(X_train,y_train) # Random undersample the majority class to have equal instances with minority class adarus = RandomUnderSampler(sampling_strategy=1,random_state=21) X_adarus,y_adarus = adarus.fit_resample(X_ada,y_ada) # Stratify crossvalidation cv = StratifiedKFold(n_splits=5,shuffle=True,random_state=21) ``` ## 1) Weighted XGBoost Model ``` '''Weighted XGBoost''' # We will use scale_pos_weight argument in xgboost algorithm which increases the error for wrong positive class prediction. # From xgboost documentation it's suggested that the optimal value for scale_pos_weight argument is usually around the # sum(negative instances)/sum(positive instances). We will use randomizedsearchcv to find optimal value xgb_weight = XGBClassifier() param_grid_weight = {"gamma":[0.01,0.1,1,10,50,100,1000],'reg_lambda':[1,5,10,20], 'learning_rate':np.arange(0.01,1,0.01),'eta':np.arange(0.1,1,0.1),'scale_pos_weight':[60,70,80,90,100]} gscv_weight = RandomizedSearchCV(xgb_weight,param_distributions=param_grid_weight,cv=cv,scoring='roc_auc') gscv_weight.fit(X_train,y_train) print("Best param is {}".format(gscv_weight.best_params_)) print("Best score is {}".format(gscv_weight.best_score_)) optimal_gamma = gscv_weight.best_params_['gamma'] optimal_reg_lambda = gscv_weight.best_params_['reg_lambda'] optim_lr = gscv_weight.best_params_['learning_rate'] optimal_eta = gscv_weight.best_params_['eta'] optimal_scale_pos_weight = gscv_weight.best_params_['scale_pos_weight'] tuned_xgb_weight = XGBClassifier(gamma=optimal_gamma,learning_rate=optim_lr,eta=optimal_eta,reg_lambda=optimal_reg_lambda,scale_pos_weight=optimal_scale_pos_weight, colsample_bytree=0.5,min_child_weight=90,objective='binary:logistic',subsample=0.5) tuned_xgb_weight.fit(X_train,y_train,early_stopping_rounds=10,eval_metric='auc',eval_set=[(X_test,y_test)]) # Evaluate model's performance on the test set, with AUC, confusion matrix, sensitivity and specificity y_pred = tuned_xgb_weight.predict(X_test) print(f'Area under curve score is {roc_auc_score(y_test,tuned_xgb_weight.predict_proba(X_test)[:,1])}') # Compute true positives, true neagatives, false negatives and false positives tp = confusion_matrix(y_test,y_pred)[1,1] tn = confusion_matrix(y_test,y_pred)[0,0] fn = confusion_matrix(y_test,y_pred)[1,0] fp = confusion_matrix(y_test,y_pred)[0,1] # Compute sensitivity and specificity sensitivity = tp / (tp + fn) specificity = tn / (tn + fp) print(f'Sensitivity is {sensitivity*100}% and specificity is {specificity*100}%') plot_confusion_matrix(y_true=y_test, y_pred=y_pred, class_names=['Healthy events','Injury events']) ``` ##2) XGBoost Model with SMOTE combined with Random Undersampling ``` '''XGBoost Classifier and SMOTE (Synthetic Minority Oversampling Technique) combined with Random Undersampling''' # Check the number of instances for each class before and after resampling print(Counter(y_train)) print(Counter(y_smus)) xgb_sm = XGBClassifier() param_grid_sm = {"gamma":[0.01,0.1,1,10,50,100,1000],'learning_rate':np.arange(0.01,1,0.01),'eta':np.arange(0.1,1,0.1),'reg_lambda':[1,5,10,20]} gscv_sm = RandomizedSearchCV(xgb_sm,param_distributions=param_grid_sm,cv=5,scoring='roc_auc') gscv_sm.fit(X_smus,y_smus) print("Best param is {}".format(gscv_sm.best_params_)) print("Best score is {}".format(gscv_sm.best_score_)) optimal_gamma = gscv_sm.best_params_['gamma'] optim_lr = gscv_sm.best_params_['learning_rate'] optimal_eta = gscv_sm.best_params_['eta'] optimal_lambda = gscv_sm.best_params_['reg_lambda'] tuned_xgb_sm = XGBClassifier(gamma=optimal_gamma,learning_rate=optim_lr,eta=optimal_eta,reg_lambda=optimal_lambda,subsample=0.4, colsample_bytree=0.6,min_child_weight=90,objective='binary:logistic') tuned_xgb_sm.fit(X_smus,y_smus,early_stopping_rounds=10,eval_metric='auc',eval_set=[(X_test,y_test)]) # Evaluate model's performance on the test set, with AUC, confusion matrix, sensitivity and specificity y_pred = tuned_xgb_sm.predict(X_test) print(f'Area under curve score is {roc_auc_score(y_test,tuned_xgb_sm.predict_proba(X_test)[:,1])}') # Compute true positives, true neagatives, false negatives and false positives tp = confusion_matrix(y_test,y_pred)[1,1] tn = confusion_matrix(y_test,y_pred)[0,0] fn = confusion_matrix(y_test,y_pred)[1,0] fp = confusion_matrix(y_test,y_pred)[0,1] # Compute sensitivity and specificity sensitivity = tp / (tp + fn) specificity = tn / (tn + fp) print(f'Sensitivity is {sensitivity*100}% and specificity is {specificity*100}%') plot_confusion_matrix(y_true=y_test, y_pred=y_pred, class_names=['Healthy events','Injury events']) ``` ## 3) XGBoost Model with Random Resampling ``` '''XGBoost Classifier and Random Undersampling''' # Check the number of instances for each class before and after resampling print(Counter(y_train)) print(Counter(y_rosus)) xgb_rus = XGBClassifier() param_grid_rus = {"gamma":[0.01,0.1,1,10,50,100,1000],'reg_lambda':[1,5,10,20],'learning_rate':np.arange(0.01,1,0.01),'eta':np.arange(0.1,1,0.1)} gscv_rus = RandomizedSearchCV(xgb_rus,param_distributions=param_grid_rus,cv=5,scoring='roc_auc') gscv_rus.fit(X_rosus,y_rosus) print("Best param is {}".format(gscv_rus.best_params_)) print("Best score is {}".format(gscv_rus.best_score_)) optimal_gamma = gscv_rus.best_params_['gamma'] optimal_reg_lambda = gscv_rus.best_params_['reg_lambda'] optim_lr = gscv_rus.best_params_['learning_rate'] optimal_eta = gscv_rus.best_params_['eta'] tuned_xgb_rus = XGBClassifier(gamma=optimal_gamma,reg_lambda=optimal_reg_lambda,learning_rate=optim_lr,eta=optimal_eta, colsample_bytree=0.7,min_child_weight=9,objective='binary:logistic',subsample=0.8) tuned_xgb_rus.fit(X_rosus,y_rosus,early_stopping_rounds=10,eval_metric='auc',eval_set=[(X_test,y_test)]) # Evaluate model's performance on the test set, with AUC, confusion matrix, sensitivity and specificity y_pred = tuned_xgb_rus.predict(X_test) print(f'Area under curve score is {roc_auc_score(y_test,tuned_xgb_rus.predict_proba(X_test)[:,1])}') # Compute true positives, true neagatives, false negatives and false positives tp = confusion_matrix(y_test,y_pred)[1,1] tn = confusion_matrix(y_test,y_pred)[0,0] fn = confusion_matrix(y_test,y_pred)[1,0] fp = confusion_matrix(y_test,y_pred)[0,1] # Compute sensitivity and specificity sensitivity = tp / (tp + fn) specificity = tn / (tn + fp) print(f'Sensitivity is {sensitivity*100}% and specificity is {specificity*100}%') plot_confusion_matrix(y_true=y_test, y_pred=y_pred, class_names=['Healthy events','Injury events']) ``` ## 4) Bagging Model with XGBoost base estimators and Random Resampling ``` '''Bagging Classifier with XGBoost base estimators and Random Undersampling with combined Oversampling''' # Check the number of instances for each class before and after resampling print(Counter(y_train)) print(Counter(y_rosus)) base_est = XGBClassifier(gamma=optimal_gamma,reg_lambda=optimal_reg_lambda,learning_rate=optim_lr,eta=optimal_eta, colsample_bytree=0.6,min_child_weight=90,objective='binary:logistic',subsample=0.8,n_estimators=11) # XGBoost base classifier #base_est = XGBClassifier(n_estimators=512,learning_rate=0.01,max_depth=3) # Bagging XGBoost Classifier bagg = BaggingClassifier(base_estimator=base_est,n_estimators=9,max_samples=2048,random_state=21) # Platt's Scaling to get probabilities outputs calib_clf = CalibratedClassifierCV(bagg,cv=5) # Evaluate model's performance on the test set, with AUC, confusion matrix, sensitivity and specificity # You can switch threshold prob in order to bias sensitivity at the cost of specificity. It is set to default 0.5 calib_clf.fit(X_rosus,y_rosus) y_pred_calib = calib_clf.predict_proba(X_test) threshold_prob = 0.5 y_pred = [] for y_hat in y_pred_calib: if y_hat[1] > threshold_prob: y_pred.append(1) else: y_pred.append(0) print(f'Area under curve score is {roc_auc_score(y_test,calib_clf.predict_proba(X_test)[:,1])}') # Compute true positives, true neagatives, false negatives and false positives tp = confusion_matrix(y_test,np.array(y_pred))[1,1] tn = confusion_matrix(y_test,np.array(y_pred))[0,0] fn = confusion_matrix(y_test,np.array(y_pred))[1,0] fp = confusion_matrix(y_test,np.array(y_pred))[0,1] # Compute sensitivity and specificity sensitivity = tp / (tp + fn) specificity = tn / (tn + fp) print(f'Sensitivity is {sensitivity*100}% and specificity is {specificity*100}%') # Plot confusion matrix plot_confusion_matrix(y_true=y_test, y_pred=np.array(y_pred), class_names=['Healthy events','Injury events']) ``` ## 5) Neural Networks Model ``` '''Neural Networks Model''' # Check the number of instances for each class before and after resampling print(Counter(y_train)) print(Counter(y_rus)) # Scale X data X_scaled_rus = MinMaxScaler().fit_transform(X_rus) X_scaled_test = MinMaxScaler().fit_transform(X_test) # set random seed for reproducibility tf.random.set_seed(24) # create model with 9 hidden layers with 50 neurons each and 1 output layer nn_model = tf.keras.Sequential([tf.keras.layers.Dense(128,activation="relu"), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(128,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(128,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(128,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(32,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(32,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(1,activation="sigmoid") ]) # compile model nn_model.compile(loss="binary_crossentropy", optimizer=tf.keras.optimizers.Adam(learning_rate=0.002), metrics=['AUC']) # set callback to stop after 10 epochs if model doesn't improve and fit training data callback = tf.keras.callbacks.EarlyStopping(monitor='loss',patience=3) history = nn_model.fit(X_scaled_rus,y_rus,epochs=10,batch_size=32,callbacks=[callback]) # Evaluate model performance on test set, with AUC, confusion matrix, sensitivity and specificity y_prob_pred = nn_model.predict(X_scaled_test) y_pred = [] for i in y_prob_pred: if i <=0.5: y_pred.append(0) else: y_pred.append(1) y_pred = np.array(y_pred) print(y_pred[y_pred>1]) # Compute true positives, true neagatives, false negatives and false positives tp = confusion_matrix(y_test,np.array(y_pred))[1,1] tn = confusion_matrix(y_test,np.array(y_pred))[0,0] fn = confusion_matrix(y_test,np.array(y_pred))[1,0] fp = confusion_matrix(y_test,np.array(y_pred))[0,1] # Compute sensitivity and specificity sensitivity = tp / (tp + fn) specificity = tn / (tn + fp) print(f'Sensitivity is {sensitivity*100}% and specificity is {specificity*100}%') # Plot confusion matrix plot_confusion_matrix(y_true=y_test, y_pred=np.array(y_pred), class_names=['Healthy events','Injury events']) # evaluate the model print(f'Area Under Curve is {nn_model.evaluate(X_scaled_test,y_test)[1]}') '''Find optimal Learning Rate for nn_model''' # set random seed for reproducibility tf.random.set_seed(24) # create model with 2 hidden layers and 1 output layer nn_model = tf.keras.Sequential([tf.keras.layers.Dense(128,activation="relu"), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(128,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(128,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(128,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(32,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(32,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(1,activation="sigmoid") ]) # compile model nn_model.compile(loss="binary_crossentropy", optimizer=tf.keras.optimizers.Adam(), metrics=["AUC"]) # set callback to stop after 5 epochs if model doesn't improve and fit training data lr_scheduler = tf.keras.callbacks.LearningRateScheduler(lambda epoch: 1e-4 * 10 ** (epoch/20)) history = nn_model.fit(X_scaled_rus,y_rus,epochs=30,callbacks=[lr_scheduler]) # plot accuracy vs learning rate to find optimal learning rate plt.figure(figsize=[10,10]) plt.semilogx(1e-4 * (10 ** (tf.range(30)/20)),history.history["loss"]) plt.ylabel("Loss") plt.title("Learning Rate vs Loss") plt.show() '''Crossvalidation on nn_model''' from keras.wrappers.scikit_learn import KerasClassifier tf.random.set_seed(24) def create_nn_model(): # create model with 2 hidden layers and 1 output layer nn_model = tf.keras.Sequential([tf.keras.layers.Dense(128,activation="relu"), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(128,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(128,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(128,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(32,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(32,activation="relu"), #tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(1,activation="sigmoid") ]) # compile model nn_model.compile(loss="binary_crossentropy", optimizer=tf.keras.optimizers.Adam(learning_rate=0.002), metrics=["AUC"]) return nn_model neural_network = KerasClassifier(build_fn=create_nn_model, epochs=10) # Evaluate neural network using 5-fold cross-validation cv = StratifiedKFold(n_splits=5,shuffle=True,random_state=1) cross_val_score(neural_network, X_scaled_rus, y_rus, scoring='roc_auc', cv=cv) ```
github_jupyter
# Esercitazione 1 ## Esercizio ### Analisi del segnale Apriamo il segnale da analizzare con [Audacity](https://www.audacityteam.org/). Ascoltandolo possiamo chiaramente riconoscere una sequenza di tasti premuti su un tastierino telefonico, anche conosciuto come [DTMF](https://en.wikipedia.org/wiki/Dual-tone_multi-frequency_signaling) #### Analisi dello spettro di frequenze Analizzandone lo spettro di frequenze, possiamo notare come la sequenza appare in modo relativamente chiaro. Possiamo altresì notare che il segnale contiene molto rumore. ![Spettro di Frequenze](spectrogram-1.png) #### Pulizia del segnale Possiamo ora procedere alla rimozione del rumore di fondo, prendendo un campione di rumore ed utilizzando la funzionalità di *Noise Reduction* offerta da Audacity. ![Riduzione del rumore - campionamento](noise-reduction-1.png) Prendiamo dapprima un campione del rumore, selezioniamo `Effect => Noise Reduction => Get Noise Profile` per ottenere un profilo del rumore. Selezioniamo ora la traccia intera, riapriamo il menù di riduzione del rumore ed impostiamo i parametri in modo da ottenere solo i toni DTMF quando clicchiamo su "Preview" (nel mio caso, Noise Reduction impostata a 48dB, Sensitivity impostata a 19.50 e frequency smoothing a 0) Otteniamo così uno spettrogramma decisamente più pulito, e possiamo quindi procedere all'analisi: ![Spettrogramma "pulito"](spectrogram-2.png) #### Analisi dei toni Possiamo ora procedere all'analisi dei toni nel modo seguente: selezioniamo una parte del segnale nel quale è visibilmente presente un picco di una o più frequenze (parte più rossa nello spettrogramma precedente). Dopodiché usiamo lo strumento "Plot Spectrum" in "Analyze" e ricaviamo le frequenze analizzando i picchi: ![Plot Spectrum](plot-spectrum.png) In questo caso, possiamo ricavare le due frequenze dominanti, che risultano essere 853 Hz (-19.9 dB) e 1477 Hz (-18.2 db). Confrontandole con la tabella DTMF, possiamo dire con certezza che il primo tono rappresenta un 9. | | 1209 Hz | 1336 Hz | 1477 Hz | 1644 Hz | |-|------------|------------|------------|-------------| |697 Hz | 1 | 2 | 3 | A | |770 Hz | 4 | 5 | 6 | B | |852 Hz | 7 | 8 | **9** | C | |941 Hz | * | 0 | # | D | Possiamo ricavare allo stesso modo il resto della sequenza, che risulta essere: 9, 3, 3, 5, 5, 5, 5, 5, 5, ?, 3, 6, 6, 6, 6, 6, 3, 3 Se però consideriamo singoli i numeri, allora la sequenza diventa: 9, 3, 5, 5, ?, 3, 6, 6, 3 ### Ipotesi di risoluzione del problema Per risolvere il problema, si potrebbe analizzare il file wav tramite un software, effettuare l'FFT e ricavare il picco di frequenze, confrontandolo alla lista di frequenze ammesse dal DTMF. Se risulta una combinazione valida, possiamo salvarla, altrimenti la ignoriamo.
github_jupyter