blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e033462e31782d612e32cd9b6d92178973cb1203
|
32da81b5bff3273a3eb50375203479baf721e570
|
/Energy_emiss_accounting.R
|
d99ad9547a3265fc0941eba679af4917a01f8b8b
|
[] |
no_license
|
labyseson/Waste-LCA
|
a98dca8faddf68f5b51af503cf831dffa8c26bd9
|
dcf32905231627a447518350281da7e2a699b39f
|
refs/heads/master
| 2021-07-13T21:30:56.432761
| 2020-08-10T18:47:23
| 2020-08-10T18:47:23
| 187,103,631
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 198,875
|
r
|
Energy_emiss_accounting.R
|
#Author: Bo Liu, UCLA
#County-level calculation
waste <- waste_all[c(4,5,6,15,23,24)]
colnames(waste)[4] <- "Waste_type"
coefficients <- merge(coefficients, tech)
waste_1 <- merge(waste, coefficients)
waste_2 <- merge(waste_1, state_GHG_ef)
#Collection & Transportation_1 (energy - GJ, emiss - kg CO2e)
waste_2$Prod_ww <- waste_2$Prod/(1-waste_2$MC/100)
waste_2$collection_en <- waste_2$Prod_ww * waste_2$Collection_Diesel
waste_2$collection_emiss <- waste_2$collection_en * waste_2$Diesel_GHG
waste_2$transport1_en <- waste_2$Prod_ww * waste_2$Transport_km_1 * waste_2$Transport_Diesel
waste_2$transport1_emiss <- waste_2$transport1_en * waste_2$Diesel_GHG
### Accounting method 1: excluding biogenic CO2
#E1
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2$E1_elec <- waste_2$Prod_ww * waste_2$E1.e.out * (1-0.065)
waste_2$E1_heat <- waste_2$Prod_ww * waste_2$E1.h.out * (1-0.2)
waste_2$E1_energy <- waste_2$E1_elec + waste_2$E1_heat
waste_2$E1_energymain <- waste_2$E1_elec
waste_2$E1_energyco <- waste_2$E1_heat
waste_2$E1_netenergy <- (waste_2$E1_energy - waste_2$Prod_ww * (waste_2$E1.e.in + waste_2$E1.h.in + waste_2$E1.d.in) - waste_2$collection_en - waste_2$transport1_en) * waste_2$E1_tech
waste_2$E1_collectionemiss <- waste_2$collection_emiss * waste_2$E1_tech
waste_2$E1_transport1emiss <- waste_2$transport1_emiss * waste_2$E1_tech
waste_2$E1_processemiss <- waste_2$Prod_ww * (waste_2$E1.e.in * waste_2$Powergen_GHG
+ waste_2$E1.h.in * waste_2$Heatgen_GHG
+ waste_2$E1.d.in * waste_2$Diesel_GHG
+ waste_2$Nonbio_emiss1) * waste_2$E1_tech
waste_2$E1_enduseemiss <- 0
waste_2$E1_dispemiss <- 0 - waste_2$E1_elec* waste_2$Powergen_GHG - waste_2$E1_heat* waste_2$Heatgen_GHG
waste_2$E1_dispemissmain <- 0 - waste_2$E1_elec* waste_2$Powergen_GHG
waste_2$E1_dispemissco <- 0 - waste_2$E1_heat* waste_2$Heatgen_GHG
waste_2$E1_netemiss <- waste_2$E1_collectionemiss + waste_2$E1_transport1emiss + waste_2$E1_processemiss + waste_2$E1_enduseemiss + waste_2$E1_dispemiss
#E2
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2$E2_elec <- waste_2$Prod_ww * waste_2$E2.e.out * (1-0.065)
waste_2$E2_heat <- waste_2$Prod_ww * waste_2$E2.h.out * (1-0.2)
waste_2$E2_energy <- waste_2$E2_elec + waste_2$E2_heat
waste_2$E2_energymain <- waste_2$E2_elec
waste_2$E2_energyco <- waste_2$E2_heat
waste_2$E2_netenergy <- (waste_2$E2_energy - waste_2$Prod_ww * (waste_2$E2.e.in + waste_2$E2.h.in + waste_2$E2.d.in) - waste_2$collection_en - waste_2$transport1_en) * waste_2$E2_tech
waste_2$E2_collectionemiss <- waste_2$collection_emiss * waste_2$E2_tech
waste_2$E2_transport1emiss <- waste_2$transport1_emiss * waste_2$E2_tech
waste_2$E2_processemiss <- waste_2$Prod_ww * (waste_2$E2.e.in * waste_2$Powergen_GHG
+ waste_2$E2.h.in * waste_2$Heatgen_GHG
+ waste_2$E2.d.in * waste_2$Diesel_GHG
+ waste_2$Nonbio_emiss1) * waste_2$E2_tech
waste_2$E2_enduseemiss <- 0
waste_2$E2_dispemiss <- 0 - waste_2$E2_elec* waste_2$Powergen_GHG - waste_2$E2_heat* waste_2$Heatgen_GHG
waste_2$E2_dispemissmain <- 0 - waste_2$E2_elec* waste_2$Powergen_GHG
waste_2$E2_dispemissco <- 0 - waste_2$E2_heat* waste_2$Heatgen_GHG
waste_2$E2_netemiss <- waste_2$E2_collectionemiss + waste_2$E2_transport1emiss + waste_2$E2_processemiss + waste_2$E2_enduseemiss + waste_2$E2_dispemiss
#E3
#electricity T&D loss - 6.5%
waste_2$E3_energy <- waste_2$Prod_ww * waste_2$E3.e.out * (1-0.065)
waste_2$E3_energymain <- waste_2$E3_energy
waste_2$E3_netenergy <- (waste_2$E3_energy - waste_2$Prod_ww * (waste_2$E3.e.in + waste_2$E3.h.in + waste_2$E3.d.in) - waste_2$collection_en - waste_2$transport1_en) * waste_2$E3_tech
waste_2$E3_collectionemiss <- waste_2$collection_emiss * waste_2$E3_tech
waste_2$E3_transport1emiss <- waste_2$transport1_emiss * waste_2$E3_tech
waste_2$E3_processemiss <- waste_2$Prod_ww * (waste_2$E3.e.in * waste_2$Powergen_GHG
+ waste_2$E3.h.in * waste_2$Heatgen_GHG
+ waste_2$E3.d.in * waste_2$Diesel_GHG
+ waste_2$Nonbio_emiss1) * waste_2$E3_tech
waste_2$E3_enduseemiss <- 0
waste_2$E3_dispemiss <- 0 - waste_2$E3_energy* waste_2$Powergen_GHG
waste_2$E3_dispemissmain <- 0 - waste_2$E3_energy* waste_2$Powergen_GHG
waste_2$E3_netemiss <- waste_2$E3_collectionemiss + waste_2$E3_transport1emiss + waste_2$E3_processemiss + waste_2$E3_enduseemiss + waste_2$E3_dispemiss
#E4
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2$E4_elec <- waste_2$Prod_ww * waste_2$E4.e.out * (1-0.065)
waste_2$E4_heat <- waste_2$Prod_ww * waste_2$E4.h.out * (1-0.2)
waste_2$E4_energy <- waste_2$E4_elec + waste_2$E4_heat
waste_2$E4_energymain <- waste_2$E4_elec
waste_2$E4_energyco <- waste_2$E4_heat
waste_2$E4_netenergy <- (waste_2$E4_energy - waste_2$Prod_ww * (waste_2$E4.ng.in + waste_2$E4.d.in) - waste_2$collection_en - waste_2$transport1_en) * waste_2$E4_tech
waste_2$E4_collectionemiss <- waste_2$collection_emiss * waste_2$E4_tech
waste_2$E4_transport1emiss <- waste_2$transport1_emiss * waste_2$E4_tech
waste_2$E4_processemiss <- waste_2$Prod_ww * (waste_2$E4.ng.in * waste_2$NG_GHG
+ waste_2$E4.d.in * waste_2$Diesel_GHG
+ waste_2$Nonbio_emiss1) * waste_2$E4_tech
waste_2$E4_enduseemiss <- 0
waste_2$E4_dispemiss <- 0 - waste_2$E4_elec* waste_2$Powergen_GHG - waste_2$E4_heat* waste_2$Heatgen_GHG
waste_2$E4_dispemissmain <- 0 - waste_2$E4_elec* waste_2$Powergen_GHG
waste_2$E4_dispemissco <- 0 - waste_2$E4_heat* waste_2$Heatgen_GHG
waste_2$E4_netemiss <- waste_2$E4_collectionemiss + waste_2$E4_transport1emiss + waste_2$E4_processemiss + waste_2$E4_enduseemiss + waste_2$E4_dispemiss
#M1
#methane leakage - 2%
waste_2$M1_energy <- waste_2$Prod_ww * waste_2$M1.m.out * (1-0.02)
waste_2$M1_energymain <- waste_2$M1_energy
waste_2$M1_netenergy <- (waste_2$M1_energy - waste_2$Prod_ww * (waste_2$M1.e.in + waste_2$M1.h.in) - waste_2$collection_en - waste_2$transport1_en) * waste_2$M1_tech
waste_2$M1_collectionemiss <- waste_2$collection_emiss * waste_2$M1_tech
waste_2$M1_transport1emiss <- waste_2$transport1_emiss * waste_2$M1_tech
waste_2$M1_processemiss <- waste_2$Prod_ww * (waste_2$M1.e.in * waste_2$Powergen_GHG + waste_2$M1.h.in * waste_2$Heatgen_GHG)* waste_2$M1_tech
waste_2$M1_transport2emiss <- waste_2$Prod_ww * waste_2$M1.m.out *0.02 /50 *28 * waste_2$M1_tech
waste_2$M1_enduseemiss <- waste_2$Prod_ww * waste_2$Nonbio_emiss2 * waste_2$M1_tech
waste_2$M1_dispemiss <- 0 - waste_2$M1_energy* waste_2$NG_GHG
waste_2$M1_dispemissmain <- waste_2$M1_dispemiss
waste_2$M1_netemiss <- waste_2$M1_collectionemiss + waste_2$M1_transport1emiss + waste_2$M1_processemiss + waste_2$M1_transport2emiss + waste_2$M1_enduseemiss + waste_2$M1_dispemiss
#M2
#methane leakage - 2%
waste_2$M2_energy <- waste_2$Prod_ww * waste_2$M2.m.out * (1-0.02)
waste_2$M2_energymain <- waste_2$M2_energy
waste_2$M2_netenergy <- (waste_2$M2_energy - waste_2$Prod_ww * (waste_2$M2.e.in + waste_2$M2.h.in + waste_2$M2.d.in) - waste_2$collection_en - waste_2$transport1_en) * waste_2$M2_tech
waste_2$M2_collectionemiss <- waste_2$collection_emiss * waste_2$M2_tech
waste_2$M2_transport1emiss <- waste_2$transport1_emiss * waste_2$M2_tech
waste_2$M2_processemiss <- waste_2$Prod_ww * (waste_2$M2.e.in * waste_2$Powergen_GHG +
waste_2$M2.h.in * waste_2$Heatgen_GHG +
waste_2$M2.d.in * waste_2$Diesel_GHG) * waste_2$M2_tech
waste_2$M2_transport2emiss <- waste_2$Prod_ww * waste_2$M2.m.out * 0.02 /50 *28 * waste_2$M1_tech
waste_2$M2_enduseemiss <- waste_2$Prod_ww * waste_2$Nonbio_emiss2 * waste_2$M2_tech
waste_2$M2_dispemiss <- 0 - waste_2$M2_energy* waste_2$NG_GHG
waste_2$M2_dispemissmain <- waste_2$M2_dispemiss
waste_2$M2_netemiss <- waste_2$M2_collectionemiss + waste_2$M2_transport1emiss + waste_2$M2_processemiss + waste_2$M2_transport2emiss + waste_2$M2_enduseemiss + waste_2$M2_dispemiss
#Eth1
#energy intensity of ethanol - 26.95 MJ/kg
waste_2$Eth1_elec <- waste_2$Prod_ww * waste_2$Eth1.e.out * (1-0.065)
waste_2$Eth1_eth <- waste_2$Prod_ww * waste_2$Eth1.eth.out
waste_2$Eth1_energy <- waste_2$Eth1_elec + waste_2$Eth1_eth
waste_2$Eth1_energymain <- waste_2$Eth1_eth
waste_2$Eth1_energyco <- waste_2$Eth1_elec
waste_2$Eth1_netenergy <- (waste_2$Eth1_energy - waste_2$Prod_ww * (waste_2$Eth1.ng.in + waste_2$Eth1.d.in) -
(waste_2$collection_en + waste_2$transport1_en) * waste_2$M1_tech -
waste_2$Eth1_eth / 26.95 * waste_2$Transport_km_2 * waste_2$Transport_Diesel) * waste_2$Eth1_tech
waste_2$Eth1_collectionemiss <- waste_2$collection_emiss * waste_2$Eth1_tech
waste_2$Eth1_transport1emiss <- waste_2$transport1_emiss * waste_2$Eth1_tech
waste_2$Eth1_processemiss <- waste_2$Prod_ww * (waste_2$Eth1.ng.in * waste_2$Heatgen_GHG + waste_2$Eth1.d.in * waste_2$Diesel_GHG) * waste_2$Eth1_tech
waste_2$Eth1_transport2emiss <- waste_2$Eth1_eth / 26.95 * waste_2$Transport_km_2 * waste_2$Transport_Diesel * waste_2$Diesel_GHG
waste_2$Eth1_enduseemiss <- waste_2$Prod_ww * waste_2$Nonbio_emiss2 * waste_2$Eth1_tech
waste_2$Eth1_dispemiss <- 0 - waste_2$Eth1_eth* waste_2$Gasoline_GHG - waste_2$Eth1_elec * waste_2$Powergen_GHG
waste_2$Eth1_dispemissmain <- 0 - waste_2$Eth1_eth* waste_2$Gasoline_GHG
waste_2$Eth1_dispemissco <- 0 - waste_2$Eth1_elec * waste_2$Powergen_GHG
waste_2$Eth1_netemiss <- waste_2$Eth1_collectionemiss + waste_2$Eth1_transport1emiss + waste_2$Eth1_processemiss + waste_2$Eth1_transport2emiss + waste_2$Eth1_enduseemiss + waste_2$Eth1_dispemiss
#Rd1
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
#electricity T&D loss - 6.5%, methane leakage - 2%
waste_2$Rd1_d <- waste_2$Prod_ww * waste_2$Rd1.d.out
waste_2$Rd1_g <- waste_2$Prod_ww * waste_2$Rd1.g.out
waste_2$Rd1_j <- waste_2$Prod_ww * waste_2$Rd1.j.out
waste_2$Rd1_m <- waste_2$Prod_ww * waste_2$Rd1.m.out * (1-0.02)
waste_2$Rd1_elec <- waste_2$Prod_ww * waste_2$Rd1.e.out * (1-0.065)
waste_2$Rd1_energy <- waste_2$Rd1_d + waste_2$Rd1_g + waste_2$Rd1_j + waste_2$Rd1_m + waste_2$Rd1_elec
waste_2$Rd1_energymain <- waste_2$Rd1_d
waste_2$Rd1_energyco <- waste_2$Rd1_g + waste_2$Rd1_j + waste_2$Rd1_m + waste_2$Rd1_elec
waste_2$Rd1_netenergy <-(waste_2$Rd1_energy - waste_2$Prod_ww * waste_2$Rd1.e.in -
(waste_2$collection_en + waste_2$transport1_en) -
waste_2$Rd1_d / 42.79 * waste_2$Transport_km_2 * waste_2$Transport_Diesel -
waste_2$Rd1_g / 41.74 * waste_2$Transport_km_2 * waste_2$Transport_Diesel -
waste_2$Rd1_j / 43.10 * waste_2$Transport_km_2 * waste_2$Transport_Diesel ) * waste_2$Rd1_tech
waste_2$Rd1_collectionemiss <- waste_2$collection_emiss * waste_2$Rd1_tech
waste_2$Rd1_transport1emiss <- waste_2$transport1_emiss * waste_2$Rd1_tech
waste_2$Rd1_processemiss <- waste_2$Prod_ww * waste_2$Rd1.e.in * waste_2$Powergen_GHG * waste_2$Rd1_tech
waste_2$Rd1_transport2emiss <- (waste_2$Rd1_d / 42.79 + waste_2$Rd1_g / 41.74 + waste_2$Rd1_j / 43.10) * waste_2$Transport_km_2 * waste_2$Transport_Diesel * waste_2$Diesel_GHG
waste_2$Rd1_enduseemiss <- waste_2$Prod_ww * waste_2$Nonbio_emiss2 * waste_2$Rd1_tech
waste_2$Rd1_dispemiss <- 0 - waste_2$Rd1_d *waste_2$Diesel_GHG - waste_2$Rd1_g * waste_2$Gasoline_GHG - waste_2$Rd1_j * waste_2$Jet_GHG -
waste_2$Rd1_m * waste_2$NG_GHG - waste_2$Rd1_elec * waste_2$Powergen_GHG
waste_2$Rd1_dispemissmain <- 0 - waste_2$Rd1_d *waste_2$Diesel_GHG
waste_2$Rd1_dispemissco <- 0 - waste_2$Rd1_g * waste_2$Gasoline_GHG - waste_2$Rd1_j * waste_2$Jet_GHG -
waste_2$Rd1_m * waste_2$NG_GHG - waste_2$Rd1_elec * waste_2$Powergen_GHG
waste_2$Rd1_netemiss <- waste_2$Rd1_collectionemiss + waste_2$Rd1_transport1emiss + waste_2$Rd1_processemiss + waste_2$Rd1_enduseemiss + waste_2$Rd1_transport2emiss + waste_2$Rd1_dispemiss
#Rd2
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74
waste_2$Rd2_d <- waste_2$Prod_ww * waste_2$Rd2.d.out
waste_2$Rd2_g <- waste_2$Prod_ww * waste_2$Rd2.g.out
waste_2$Rd2_energy <- waste_2$Rd2_d + waste_2$Rd2_g
waste_2$Rd2_energymain <- waste_2$Rd2_d
waste_2$Rd2_energyco <- waste_2$Rd2_g
waste_2$Rd2_netenergy <-(waste_2$Rd2_energy - waste_2$Prod_ww * (waste_2$Rd2.e.in + waste_2$Rd2.ng.in) -
(waste_2$collection_en + waste_2$transport1_en) -
waste_2$Rd2_d / 42.79 * waste_2$Transport_km_2 * waste_2$Transport_Diesel -
waste_2$Rd2_g / 41.74 * waste_2$Transport_km_2 * waste_2$Transport_Diesel ) * waste_2$Rd2_tech
waste_2$Rd2_collectionemiss <- waste_2$collection_emiss * waste_2$Rd2_tech
waste_2$Rd2_transport1emiss <- waste_2$transport1_emiss * waste_2$Rd2_tech
waste_2$Rd2_processemiss <- waste_2$Prod_ww * (waste_2$Rd2.e.in * waste_2$Powergen_GHG + waste_2$Rd2.ng.in * waste_2$H2_GHG) * waste_2$Rd2_tech
waste_2$Rd2_transport2emiss <- (waste_2$Rd2_d / 42.79 + waste_2$Rd2_g / 41.74) * waste_2$Transport_km_2 * waste_2$Transport_Diesel * waste_2$Diesel_GHG
waste_2$Rd2_enduseemiss <- waste_2$Prod_ww * waste_2$Nonbio_emiss2 * waste_2$Rd2_tech
waste_2$Rd2_dispemiss <- 0 - waste_2$Rd2_d *waste_2$Diesel_GHG - waste_2$Rd2_g* waste_2$Gasoline_GHG
waste_2$Rd2_dispemissmain <- 0 - waste_2$Rd2_d *waste_2$Diesel_GHG
waste_2$Rd2_dispemissco <- 0 - waste_2$Rd2_g* waste_2$Gasoline_GHG
waste_2$Rd2_netemiss <- waste_2$Rd2_collectionemiss + waste_2$Rd2_transport1emiss + waste_2$Rd2_processemiss + waste_2$Rd2_transport2emiss + waste_2$Rd2_enduseemiss + waste_2$Rd2_dispemiss
#Bj1
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2$Bj1_d <- waste_2$Prod_ww * waste_2$Bj1.d.out
waste_2$Bj1_g <- waste_2$Prod_ww * waste_2$Bj1.g.out
waste_2$Bj1_j <- waste_2$Prod_ww * waste_2$Bj1.j.out
waste_2$Bj1_energy <- waste_2$Bj1_d + waste_2$Bj1_g + waste_2$Bj1_j
waste_2$Bj1_energymain <- waste_2$Bj1_d
waste_2$Bj1_energyco <- waste_2$Bj1_g + waste_2$Bj1_j
waste_2$Bj1_netenergy <-(waste_2$Bj1_energy - waste_2$Prod_ww * waste_2$Bj1.h2.in -
(waste_2$collection_en + waste_2$transport1_en) -
(waste_2$Bj1_d / 42.79 + waste_2$Bj1_g / 41.74 +waste_2$Bj1_j / 43.10) * waste_2$Transport_km_2 * waste_2$Transport_Diesel) * waste_2$Bj1_tech
waste_2$Bj1_collectionemiss <- waste_2$collection_emiss * waste_2$Bj1_tech
waste_2$Bj1_transport1emiss <- waste_2$transport1_emiss * waste_2$Bj1_tech
waste_2$Bj1_processemiss <- waste_2$Prod_ww * waste_2$Bj1.h2.in * waste_2$H2_GHG * waste_2$Bj1_tech
waste_2$Bj1_transport2emiss <- (waste_2$Bj1_d / 42.79 + waste_2$Bj1_g / 41.74 + waste_2$Bj1_j / 43.10) * waste_2$Transport_km_2 * waste_2$Transport_Diesel * waste_2$Diesel_GHG
waste_2$Bj1_enduseemiss <- waste_2$Prod_ww * waste_2$Nonbio_emiss2 * waste_2$Bj1_tech
waste_2$Bj1_dispemiss <- 0 - waste_2$Bj1_d *waste_2$Diesel_GHG - waste_2$Bj1_g* waste_2$Gasoline_GHG - waste_2$Bj1_j * waste_2$Jet_GHG
waste_2$Bj1_dispemissmain <- 0 - waste_2$Bj1_d *waste_2$Diesel_GHG
waste_2$Bj1_dispemissco <- 0 - waste_2$Bj1_g* waste_2$Gasoline_GHG - waste_2$Bj1_j * waste_2$Jet_GHG
waste_2$Bj1_netemiss <- waste_2$Bj1_collectionemiss + waste_2$Bj1_transport1emiss + waste_2$Bj1_processemiss + waste_2$Bj1_transport2emiss + waste_2$Bj1_enduseemiss + waste_2$Bj1_dispemiss
#Bj2
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2$Bj2_energy <- waste_2$Prod_ww * waste_2$Bj2.j.out
waste_2$Bj2_energymain <- waste_2$Bj1_energy
waste_2$Bj2_netenergy <-(waste_2$Bj2_energy - waste_2$Prod_ww * waste_2$Bj2.h2.in -
(waste_2$collection_en + waste_2$transport1_en) -
waste_2$Bj2_energy / 43.10 * waste_2$Transport_km_2 * waste_2$Transport_Diesel ) * waste_2$Bj2_tech
waste_2$Bj2_collectionemiss <- waste_2$collection_emiss * waste_2$Bj2_tech
waste_2$Bj2_transport1emiss <- waste_2$transport1_emiss * waste_2$Bj2_tech
waste_2$Bj2_processemiss <- waste_2$Prod_ww * waste_2$Bj2.h2.in * waste_2$H2_GHG
waste_2$Bj2_transport2emiss <- waste_2$Bj2_energy / 43.10 * waste_2$Transport_km_2 * waste_2$Transport_Diesel * waste_2$Diesel_GHG
waste_2$Bj2_enduseemiss <- waste_2$Prod_ww * waste_2$Nonbio_emiss2 * waste_2$Bj2_tech
waste_2$Bj2_dispemiss <- 0 - waste_2$Bj2_energy * waste_2$Jet_GHG
waste_2$Bj2_dispemissmain <- waste_2$Bj2_dispemiss
waste_2$Bj2_netemiss <- waste_2$Bj2_collectionemiss + waste_2$Bj2_transport1emiss + waste_2$Bj2_processemiss + waste_2$Bj2_transport2emiss + waste_2$Bj2_enduseemiss + waste_2$Bj2_dispemiss
#Bj3
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2$Bj3_d <- waste_2$Prod_ww * waste_2$Bj3.d.out
waste_2$Bj3_g <- waste_2$Prod_ww * waste_2$Bj3.g.out
waste_2$Bj3_j <- waste_2$Prod_ww * waste_2$Bj3.j.out
waste_2$Bj3_energy <- waste_2$Bj3_d + waste_2$Bj3_g + waste_2$Bj3_j
waste_2$Bj3_energymain <- waste_2$Bj3_j
waste_2$Bj3_energyco <- waste_2$Bj3_d + waste_2$Bj3_g
waste_2$Bj3_netenergy <-(waste_2$Bj3_energy - waste_2$Prod_ww * waste_2$Bj3.e.in -
(waste_2$collection_en + waste_2$transport1_en) -
(waste_2$Bj3_d / 42.79 + waste_2$Bj3_g / 41.74 +waste_2$Bj3_j / 43.10) * waste_2$Transport_km_2 * waste_2$Transport_Diesel) * waste_2$Bj3_tech
waste_2$Bj3_collectionemiss <- waste_2$collection_emiss * waste_2$Bj3_tech
waste_2$Bj3_transport1emiss <- waste_2$transport1_emiss * waste_2$Bj3_tech
waste_2$Bj3_processemiss <- waste_2$Prod_ww * waste_2$Bj3.e.in * waste_2$Powergen_GHG * waste_2$Bj3_tech
waste_2$Bj3_transport2emiss <- (waste_2$Bj3_d / 42.79 + waste_2$Bj3_g / 41.74 + waste_2$Bj3_j / 43.10) * waste_2$Transport_km_2 * waste_2$Transport_Diesel * waste_2$Diesel_GHG
waste_2$Bj3_enduseemiss <- waste_2$Prod_ww * waste_2$Nonbio_emiss2 * waste_2$Bj3_tech
waste_2$Bj3_dispemiss <- 0 - waste_2$Bj3_d *waste_2$Diesel_GHG - waste_2$Bj3_g* waste_2$Gasoline_GHG - waste_2$Bj3_j * waste_2$Jet_GHG
waste_2$Bj3_dispemissmain <- 0 - waste_2$Bj3_j * waste_2$Jet_GHG
waste_2$Bj3_dispemissco <- 0 - waste_2$Bj3_d * waste_2$Diesel_GHG - waste_2$Bj3_g * waste_2$Gasoline_GHG
waste_2$Bj3_netemiss <- waste_2$Bj3_collectionemiss + waste_2$Bj3_transport1emiss + waste_2$Bj3_processemiss + waste_2$Bj3_transport2emiss + waste_2$Bj3_enduseemiss + waste_2$Bj3_dispemiss
#Bj4
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2$Bj4_d <- waste_2$Prod_ww * waste_2$Bj4.d.out
waste_2$Bj4_g <- waste_2$Prod_ww * waste_2$Bj4.g.out
waste_2$Bj4_j <- waste_2$Prod_ww * waste_2$Bj4.j.out
waste_2$Bj4_energy <- waste_2$Bj4_d + waste_2$Bj4_g + waste_2$Bj4_j
waste_2$Bj4_energymain <- waste_2$Bj4_j
waste_2$Bj4_energyco <- waste_2$Bj4_d + waste_2$Bj4_g
waste_2$Bj4_netenergy <-(waste_2$Bj4_energy - waste_2$Prod_ww * waste_2$Bj4.h2.in -
(waste_2$collection_en + waste_2$transport1_en) -
(waste_2$Bj4_d / 42.79 + waste_2$Bj4_g / 41.74 +waste_2$Bj4_j / 43.10) * waste_2$Transport_km_2 * waste_2$Transport_Diesel) * waste_2$Bj4_tech
waste_2$Bj4_collectionemiss <- waste_2$collection_emiss * waste_2$Bj4_tech
waste_2$Bj4_transport1emiss <- waste_2$transport1_emiss * waste_2$Bj4_tech
waste_2$Bj4_processemiss <- waste_2$Prod_ww * waste_2$Bj4.h2.in * waste_2$H2_GHG * waste_2$Bj4_tech
waste_2$Bj4_transport2emiss <- (waste_2$Bj4_d / 42.79 + waste_2$Bj4_g / 41.74 + waste_2$Bj4_j / 43.10) * waste_2$Transport_km_2 * waste_2$Transport_Diesel * waste_2$Diesel_GHG
waste_2$Bj4_enduseemiss <- waste_2$Prod_ww * waste_2$Nonbio_emiss2 * waste_2$Bj4_tech
waste_2$Bj4_dispemiss <- 0 - waste_2$Bj4_d *waste_2$Diesel_GHG - waste_2$Bj4_g* waste_2$Gasoline_GHG - waste_2$Bj4_j * waste_2$Jet_GHG
waste_2$Bj4_dispemissmain <- 0 - waste_2$Bj4_j * waste_2$Jet_GHG
waste_2$Bj4_dispemissco <- 0 - waste_2$Bj4_d * waste_2$Diesel_GHG - waste_2$Bj4_g * waste_2$Gasoline_GHG
waste_2$Bj4_netemiss <- waste_2$Bj4_collectionemiss + waste_2$Bj4_transport1emiss + waste_2$Bj4_processemiss + waste_2$Bj4_transport2emiss+ waste_2$Bj4_enduseemiss + waste_2$Bj4_dispemiss
#Bj5
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2$Bj5_d <- waste_2$Prod_ww * waste_2$Bj5.d.out
waste_2$Bj5_g <- waste_2$Prod_ww * waste_2$Bj5.g.out
waste_2$Bj5_j <- waste_2$Prod_ww * waste_2$Bj5.j.out
waste_2$Bj5_energy <- waste_2$Bj5_d + waste_2$Bj5_g + waste_2$Bj5_j
waste_2$Bj5_energymain <- waste_2$Bj5_j
waste_2$Bj5_energyco <- waste_2$Bj5_d + waste_2$Bj5_g
waste_2$Bj5_netenergy <-(waste_2$Bj5_energy - waste_2$Prod_ww * waste_2$Bj5.e.in -
(waste_2$collection_en + waste_2$transport1_en) -
(waste_2$Bj5_d / 42.79 + waste_2$Bj5_g / 41.74 +waste_2$Bj5_j / 43.10) * waste_2$Transport_km_2 * waste_2$Transport_Diesel) * waste_2$Bj5_tech
waste_2$Bj5_collectionemiss <- waste_2$collection_emiss * waste_2$Bj5_tech
waste_2$Bj5_transport1emiss <- waste_2$transport1_emiss * waste_2$Bj5_tech
waste_2$Bj5_processemiss <- waste_2$Prod_ww * waste_2$Bj5.e.in * waste_2$Powergen_GHG * waste_2$Bj5_tech
waste_2$Bj5_transport2emiss <- (waste_2$Bj5_d / 42.79 + waste_2$Bj5_g / 41.74 + waste_2$Bj5_j / 43.10) * waste_2$Transport_km_2 * waste_2$Transport_Diesel * waste_2$Diesel_GHG
waste_2$Bj5_enduseemiss <- waste_2$Prod_ww * waste_2$Nonbio_emiss2 * waste_2$Bj5_tech
waste_2$Bj5_dispemiss <- 0 - waste_2$Bj5_d *waste_2$Diesel_GHG - waste_2$Bj5_g* waste_2$Gasoline_GHG - waste_2$Bj5_j * waste_2$Jet_GHG
waste_2$Bj5_dispemissmain <- 0 - waste_2$Bj5_j * waste_2$Jet_GHG
waste_2$Bj5_dispemissco <- 0 - waste_2$Bj5_d * waste_2$Diesel_GHG - waste_2$Bj5_g * waste_2$Gasoline_GHG
waste_2$Bj5_netemiss <- waste_2$Bj5_collectionemiss + waste_2$Bj5_transport1emiss + waste_2$Bj5_processemiss + waste_2$Bj5_transport2emiss + waste_2$Bj5_enduseemiss + waste_2$Bj5_dispemiss
#Bj6
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2$Bj6_d <- waste_2$Prod_ww * waste_2$Bj6.d.out
waste_2$Bj6_g <- waste_2$Prod_ww * waste_2$Bj6.g.out
waste_2$Bj6_j <- waste_2$Prod_ww * waste_2$Bj6.j.out
waste_2$Bj6_energy <- waste_2$Bj6_d + waste_2$Bj6_g + waste_2$Bj6_j
waste_2$Bj6_energymain <- waste_2$Bj6_j
waste_2$Bj6_energyco <- waste_2$Bj6_d + waste_2$Bj6_g
waste_2$Bj6_netenergy <-(waste_2$Bj6_energy - waste_2$Prod_ww * waste_2$Bj6.h2.in - waste_2$Prod_ww * waste_2$Bj6.e.in -
(waste_2$collection_en + waste_2$transport1_en) -
(waste_2$Bj6_d / 42.79 + waste_2$Bj6_g / 41.74 +waste_2$Bj6_j / 43.10) * waste_2$Transport_km_2 * waste_2$Transport_Diesel) * waste_2$Bj6_tech
waste_2$Bj6_collectionemiss <- waste_2$collection_emiss * waste_2$Bj6_tech
waste_2$Bj6_transport1emiss <- waste_2$transport1_emiss * waste_2$Bj6_tech
waste_2$Bj6_processemiss <- waste_2$Prod_ww * (waste_2$Bj6.h2.in * waste_2$H2_GHG + waste_2$Bj6.e.in * waste_2$Powergen_GHG) * waste_2$Bj6_tech
waste_2$Bj6_transport2emiss <- (waste_2$Bj6_d / 42.79 + waste_2$Bj6_g / 41.74 + waste_2$Bj6_j / 43.10) * waste_2$Transport_km_2 * waste_2$Transport_Diesel * waste_2$Diesel_GHG
waste_2$Bj6_enduseemiss <- waste_2$Prod_ww * waste_2$Nonbio_emiss2 * waste_2$Bj6_tech
waste_2$Bj6_dispemiss <- 0 - waste_2$Bj6_d *waste_2$Diesel_GHG - waste_2$Bj6_g* waste_2$Gasoline_GHG - waste_2$Bj6_j * waste_2$Jet_GHG
waste_2$Bj6_dispemissmain <- 0 - waste_2$Bj6_j * waste_2$Jet_GHG
waste_2$Bj6_dispemissco <- 0 - waste_2$Bj6_d * waste_2$Diesel_GHG - waste_2$Bj6_g * waste_2$Gasoline_GHG
waste_2$Bj6_netemiss <- waste_2$Bj6_collectionemiss + waste_2$Bj6_transport1emiss + waste_2$Bj6_processemiss + waste_2$Bj6_transport2emiss + waste_2$Bj6_enduseemiss + waste_2$Bj6_dispemiss
#US results by waste type
#energy main & co-products
waste_energy_US <- subset(waste_2, select = c( Waste_type, Prod_ww,
E1_energymain, E1_energyco,
E2_energymain, E2_energyco,
E3_energymain,
E4_energymain, E4_energyco,
M1_energymain,
M2_energymain,
Eth1_energymain, Eth1_energyco,
Rd1_energymain, Rd1_energyco,
Rd2_energymain, Rd2_energyco,
Bj1_energymain, Bj1_energyco,
Bj2_energymain,
Bj3_energymain, Bj3_energyco,
Bj4_energymain, Bj4_energyco,
Bj5_energymain, Bj5_energyco,
Bj6_energymain, Bj6_energyco))
waste_energy_US <- aggregate(.~Waste_type, waste_energy_US, sum)
waste_energy_US_1 <- gather(waste_energy_US, category, Energy_GJ, E1_energymain:Bj6_energyco)
waste_energy_US_1$Energy_GJ_per <- waste_energy_US_1$Energy_GJ / waste_energy_US_1$Prod_ww
waste_energy_US_1 <- separate(data = waste_energy_US_1, col = category, into = c("Tech", "Category"), sep = "\\_")
waste_energy_US_1$Tech <- factor(waste_energy_US_1$Tech,
levels = c("E1", "E2", "E3", "E4", "M1", "M2", "Eth1", "Rd1", "Rd2",
"Bj1", "Bj2", "Bj3", "Bj4", "Bj5", "Bj6"))
#netenergy
waste_netenergy_US <- subset(waste_2, select = c(Waste_type, Prod_ww,
E1_netenergy,
E2_netenergy,
E3_netenergy,
E4_netenergy,
M1_netenergy,
M2_netenergy,
Eth1_netenergy,
Rd1_netenergy,
Rd2_netenergy,
Bj1_netenergy,
Bj2_netenergy,
Bj3_netenergy,
Bj4_netenergy,
Bj5_netenergy,
Bj6_netenergy))
waste_netenergy_US <- aggregate(.~Waste_type, waste_netenergy_US, sum)
waste_netenergy_US_1 <- gather(waste_netenergy_US, category, Netenergy_GJ, E1_netenergy:Bj6_netenergy)
waste_netenergy_US_1$Netenergy_GJ_per <- waste_netenergy_US_1$Netenergy_GJ / waste_netenergy_US_1$Prod_ww
waste_netenergy_US_1 <- separate(data = waste_netenergy_US_1, col = category, into = c("Tech", "Category"), sep = "\\_")
waste_netenergy_US_1$Tech <- factor(waste_netenergy_US_1$Tech,
levels = c("E1", "E2", "E3", "E4", "M1", "M2", "Eth1", "Rd1", "Rd2",
"Bj1", "Bj2", "Bj3", "Bj4", "Bj5", "Bj6"))
#EROI
waste_energy_tot <- subset(waste_2, select = c(Waste_type, Prod_ww,
E1_energy,
E2_energy,
E3_energy,
E4_energy,
M1_energy,
M2_energy,
Eth1_energy,
Rd1_energy,
Rd2_energy,
Bj1_energy,
Bj2_energy,
Bj3_energy,
Bj4_energy,
Bj5_energy,
Bj6_energy))
waste_energy_tot <- aggregate(.~Waste_type, waste_energy_tot, sum)
waste_energy_tot_1 <- gather(waste_energy_tot, category, Energy_GJ, E1_energy:Bj6_energy)
waste_energy_tot_1$Energy_GJ_per <- waste_energy_tot_1$Energy_GJ / waste_energy_tot_1$Prod_ww
waste_energy_tot_1 <- separate(data = waste_energy_tot_1, col = category, into = c("Tech", "Category"), sep = "\\_")
waste_energy_tot_1$Tech <- factor(waste_energy_tot_1$Tech,
levels = c("E1", "E2", "E3", "E4", "M1", "M2", "Eth1", "Rd1", "Rd2",
"Bj1", "Bj2", "Bj3", "Bj4", "Bj5", "Bj6"))
waste_eroi <- merge(subset(waste_energy_tot_1, select = -c(Category)), subset(waste_netenergy_US_1, select = -c(Category)), by=c("Waste_type", "Tech", "Prod_ww"))
waste_eroi_1 <- waste_eroi[which(waste_eroi$Energy_GJ_per>0),]
waste_eroi_1$EROI <- waste_eroi_1$Energy_GJ_per / (waste_eroi_1$Energy_GJ_per - waste_eroi_1$Netenergy_GJ_per)
#emissions by stage
waste_emiss_US <- subset(waste_2, select = c(Waste_type, Prod_ww,
E1_collectionemiss, E1_transport1emiss, E1_processemiss, E1_dispemissmain, E1_dispemissco, E1_netemiss, E1_enduseemiss,
E2_collectionemiss, E2_transport1emiss, E2_processemiss, E2_dispemissmain, E2_dispemissco, E2_netemiss, E2_enduseemiss,
E3_collectionemiss, E3_transport1emiss, E3_processemiss, E3_dispemissmain, E3_netemiss, E3_enduseemiss,
E4_collectionemiss, E4_transport1emiss, E4_processemiss, E4_dispemissmain, E4_dispemissco, E4_netemiss, E4_enduseemiss,
M1_collectionemiss, M1_transport1emiss, M1_processemiss, M1_dispemissmain, M1_netemiss, M1_enduseemiss,
M2_collectionemiss, M2_transport1emiss, M2_processemiss, M2_dispemissmain, M2_netemiss, M2_enduseemiss,
Eth1_collectionemiss, Eth1_transport1emiss, Eth1_processemiss, Eth1_transport2emiss, Eth1_dispemissmain, Eth1_dispemissco, Eth1_netemiss, Eth1_enduseemiss,
Rd1_collectionemiss, Rd1_transport1emiss, Rd1_processemiss, Rd1_transport2emiss, Rd1_dispemissmain, Rd1_dispemissco, Rd1_netemiss, Rd1_enduseemiss,
Rd2_collectionemiss, Rd2_transport1emiss, Rd2_processemiss, Rd2_transport2emiss, Rd2_dispemissmain, Rd2_dispemissco, Rd2_netemiss, Rd2_enduseemiss,
Bj1_collectionemiss, Bj1_transport1emiss, Bj1_processemiss, Bj1_transport2emiss, Bj1_dispemissmain, Bj1_dispemissco, Bj1_netemiss, Bj1_enduseemiss,
Bj2_collectionemiss, Bj2_transport1emiss, Bj2_processemiss, Bj2_transport2emiss, Bj2_dispemissmain, Bj2_netemiss, Bj2_enduseemiss,
Bj3_collectionemiss, Bj3_transport1emiss, Bj3_processemiss, Bj3_transport2emiss, Bj3_dispemissmain, Bj3_dispemissco, Bj3_netemiss, Bj3_enduseemiss,
Bj4_collectionemiss, Bj4_transport1emiss, Bj4_processemiss, Bj4_transport2emiss, Bj4_dispemissmain, Bj4_dispemissco, Bj4_netemiss, Bj4_enduseemiss,
Bj5_collectionemiss, Bj5_transport1emiss, Bj5_processemiss, Bj5_transport2emiss, Bj5_dispemissmain, Bj5_dispemissco, Bj5_netemiss, Bj5_enduseemiss,
Bj6_collectionemiss, Bj6_transport1emiss, Bj6_processemiss, Bj6_transport2emiss, Bj6_dispemissmain, Bj6_dispemissco, Bj6_netemiss, Bj6_enduseemiss))
waste_emiss_US <- aggregate(.~Waste_type, waste_emiss_US, sum)
waste_emiss_US_1 <- gather(waste_emiss_US, category, emiss_kg, E1_collectionemiss:Bj6_enduseemiss)
waste_emiss_US_1$category <- gsub("emiss", "", waste_emiss_US_1$category)
waste_emiss_US_1 <- separate(data = waste_emiss_US_1, col = category, into = c("Tech", "Stage"), sep = "\\_")
waste_emiss_US_1$emiss_MT_per <- waste_emiss_US_1$emiss_kg / waste_emiss_US_1$Prod_ww / 1000
waste_emiss_US_1$Tech <- factor(waste_emiss_US_1$Tech,
levels = c("E1", "E2","E3","E4", "M1", "M2", "Eth1", "Rd1", "Rd2",
"Bj1", "Bj2", "Bj3", "Bj4", "Bj5", "Bj6", "Current"))
waste_emiss_US_1$Stage <- factor(waste_emiss_US_1$Stage,
levels = c("collection", "transport1","process","transport2", "dispmain", "dispco", "net", "enduse"))
waste_emiss_US_2 <- subset(waste_emiss_US_1, waste_emiss_US_1$Stage != "net")
waste_emiss_US_3 <- rbind(waste_emiss_US_2, current)
waste_emiss_US_4 <- subset(waste_emiss_US_1, waste_emiss_US_1$Stage == "net")
#US results by feedstock
#energy main & co-products
waste_energy_fs <- subset(waste_2, select = c(Feedstock, Prod_ww,
E1_energymain, E1_energyco,
E2_energymain, E2_energyco,
E3_energymain,
E4_energymain, E4_energyco,
M1_energymain,
M2_energymain,
Eth1_energymain, Eth1_energyco,
Rd1_energymain, Rd1_energyco,
Rd2_energymain, Rd2_energyco,
Bj1_energymain, Bj1_energyco,
Bj2_energymain,
Bj3_energymain, Bj3_energyco,
Bj4_energymain, Bj4_energyco,
Bj5_energymain, Bj5_energyco,
Bj6_energymain, Bj6_energyco))
waste_energy_fs <- aggregate(.~Feedstock, waste_energy_fs, sum)
waste_energy_fs_1 <- gather(waste_energy_fs, category, Energy_GJ, E1_energymain:Bj6_energyco)
waste_energy_fs_1$Energy_GJ_per <- waste_energy_fs_1$Energy_GJ / waste_energy_fs_1$Prod_ww
waste_energy_fs_1 <- separate(data = waste_energy_fs_1, col = category, into = c("Tech", "Category"), sep = "\\_")
waste_energy_fs_1$Tech <- factor(waste_energy_fs_1$Tech,
levels = c("E1", "E2", "E3", "E4", "M1", "M2", "Eth1", "Rd1", "Rd2",
"Bj1", "Bj2", "Bj3", "Bj4", "Bj5", "Bj6"))
waste_energy_fs_1$Waste_type <- tech$Waste_type[match(waste_energy_fs_1$Feedstock, tech$Feedstock)]
waste_energy_fs_1$Feedstock <- factor(waste_energy_fs_1$Feedstock,
levels = c("Barley straw", "Citrus residues", "Corn stover", "Cotton gin trash", "Cotton residue", "Noncitrus residues", "Oats straw", "Rice hulls",
"Rice straw", "Sorghum stubble", "Sugarcane bagasse", "Sugarcane trash", "Tree nut residues", "Wheat straw", "Hogs, 1000+ head",
"Milk cows, 500+ head", "Food waste", "Primary mill residue", "Secondary mill residue", "Other forest residue", "Other forest thinnings",
"CD waste", "MSW wood", "Other MSW", "Paper and paperboard", "Plastics", "Rubber and leather", "Textiles", "Yard trimmings"))
#netenergy
waste_netenergy_fs <- subset(waste_2, select = c(Feedstock, Prod_ww,
E1_netenergy,
E2_netenergy,
E3_netenergy,
E4_netenergy,
M1_netenergy,
M2_netenergy,
Eth1_netenergy,
Rd1_netenergy,
Rd2_netenergy,
Bj1_netenergy,
Bj2_netenergy,
Bj3_netenergy,
Bj4_netenergy,
Bj5_netenergy,
Bj6_netenergy))
waste_netenergy_fs <- aggregate(.~Feedstock, waste_netenergy_fs, sum)
waste_netenergy_fs_1 <- gather(waste_netenergy_fs, category, Netenergy_GJ, E1_netenergy:Bj6_netenergy)
waste_netenergy_fs_1$Netenergy_GJ_per <- waste_netenergy_fs_1$Netenergy_GJ / waste_netenergy_fs_1$Prod_ww
waste_netenergy_fs_1 <- separate(data = waste_netenergy_fs_1, col = category, into = c("Tech", "Category"), sep = "\\_")
waste_netenergy_fs_1$Tech <- factor(waste_netenergy_fs_1$Tech,
levels = c("E1", "E2", "E3", "E4", "M1", "M2", "Eth1", "Rd1", "Rd2",
"Bj1", "Bj2", "Bj3", "Bj4", "Bj5", "Bj6"))
waste_netenergy_fs_1$Waste_type <- tech$Waste_type[match(waste_netenergy_fs_1$Feedstock, tech$Feedstock)]
waste_netenergy_fs_1$Feedstock <- factor(waste_netenergy_fs_1$Feedstock,
levels = c("Barley straw", "Citrus residues", "Corn stover", "Cotton gin trash", "Cotton residue", "Noncitrus residues", "Oats straw", "Rice hulls",
"Rice straw", "Sorghum stubble", "Sugarcane bagasse", "Sugarcane trash", "Tree nut residues", "Wheat straw", "Hogs, 1000+ head",
"Milk cows, 500+ head", "Food waste", "Primary mill residue", "Secondary mill residue", "Other forest residue", "Other forest thinnings",
"CD waste", "MSW wood", "Other MSW", "Paper and paperboard", "Plastics", "Rubber and leather", "Textiles", "Yard trimmings"))
#EROI
waste_energy_tot_fs <- subset(waste_2, select = c(Feedstock, Prod_ww,
E1_energy,
E2_energy,
E3_energy,
E4_energy,
M1_energy,
M2_energy,
Eth1_energy,
Rd1_energy,
Rd2_energy,
Bj1_energy,
Bj2_energy,
Bj3_energy,
Bj4_energy,
Bj5_energy,
Bj6_energy))
waste_energy_tot_fs <- aggregate(.~Feedstock, waste_energy_tot_fs, sum)
waste_energy_tot_fs_1 <- gather(waste_energy_tot_fs, category, Energy_GJ, E1_energy:Bj6_energy)
waste_energy_tot_fs_1$Energy_GJ_per <- waste_energy_tot_fs_1$Energy_GJ / waste_energy_tot_fs_1$Prod_ww
waste_energy_tot_fs_1 <- separate(data = waste_energy_tot_fs_1, col = category, into = c("Tech", "Category"), sep = "\\_")
waste_energy_tot_fs_1$Tech <- factor(waste_energy_tot_fs_1$Tech,
levels = c("E1", "E2", "E3", "E4", "M1", "M2", "Eth1", "Rd1", "Rd2",
"Bj1", "Bj2", "Bj3", "Bj4", "Bj5", "Bj6"))
waste_eroi_fs <- merge(subset(waste_energy_tot_fs_1, select = -c(Category)), subset(waste_netenergy_fs_1, select = -c(Category)), by=c("Feedstock", "Tech", "Prod_ww"))
waste_eroi_fs_1 <- waste_eroi_fs[which(waste_eroi_fs$Energy_GJ_per>0),]
waste_eroi_fs_1$EROI <- waste_eroi_fs_1$Energy_GJ_per / (waste_eroi_fs_1$Energy_GJ_per - waste_eroi_fs_1$Netenergy_GJ_per)
waste_eroi_fs_1$Feedstock <- factor(waste_eroi_fs_1$Feedstock,
levels = c("Barley straw", "Citrus residues", "Corn stover", "Cotton gin trash", "Cotton residue", "Noncitrus residues", "Oats straw", "Rice hulls",
"Rice straw", "Sorghum stubble", "Sugarcane bagasse", "Sugarcane trash", "Tree nut residues", "Wheat straw", "Hogs, 1000+ head",
"Milk cows, 500+ head", "Food waste", "Primary mill residue", "Secondary mill residue", "Other forest residue", "Other forest thinnings",
"CD waste", "MSW wood", "Other MSW", "Paper and paperboard", "Plastics", "Rubber and leather", "Textiles", "Yard trimmings"))
#emissions by stage
waste_emiss_fs <- subset(waste_2, select = c(Feedstock, Prod_ww,
E1_collectionemiss, E1_transport1emiss, E1_processemiss, E1_dispemissmain, E1_dispemissco, E1_netemiss, E1_enduseemiss,
E2_collectionemiss, E2_transport1emiss, E2_processemiss, E2_dispemissmain, E2_dispemissco, E2_netemiss, E2_enduseemiss,
E3_collectionemiss, E3_transport1emiss, E3_processemiss, E3_dispemissmain, E3_netemiss, E3_enduseemiss,
E4_collectionemiss, E4_transport1emiss, E4_processemiss, E4_dispemissmain, E4_dispemissco, E4_netemiss, E4_enduseemiss,
M1_collectionemiss, M1_transport1emiss, M1_processemiss, M1_dispemissmain, M1_netemiss, M1_enduseemiss,
M2_collectionemiss, M2_transport1emiss, M2_processemiss, M2_dispemissmain, M2_netemiss, M2_enduseemiss,
Eth1_collectionemiss, Eth1_transport1emiss, Eth1_processemiss, Eth1_transport2emiss, Eth1_dispemissmain, Eth1_dispemissco, Eth1_netemiss, Eth1_enduseemiss,
Rd1_collectionemiss, Rd1_transport1emiss, Rd1_processemiss, Rd1_transport2emiss, Rd1_dispemissmain, Rd1_dispemissco, Rd1_netemiss, Rd1_enduseemiss,
Rd2_collectionemiss, Rd2_transport1emiss, Rd2_processemiss, Rd2_transport2emiss, Rd2_dispemissmain, Rd2_dispemissco, Rd2_netemiss, Rd2_enduseemiss,
Bj1_collectionemiss, Bj1_transport1emiss, Bj1_processemiss, Bj1_transport2emiss, Bj1_dispemissmain, Bj1_dispemissco, Bj1_netemiss, Bj1_enduseemiss,
Bj2_collectionemiss, Bj2_transport1emiss, Bj2_processemiss, Bj2_transport2emiss, Bj2_dispemissmain, Bj2_netemiss, Bj2_enduseemiss,
Bj3_collectionemiss, Bj3_transport1emiss, Bj3_processemiss, Bj3_transport2emiss, Bj3_dispemissmain, Bj3_dispemissco, Bj3_netemiss, Bj3_enduseemiss,
Bj4_collectionemiss, Bj4_transport1emiss, Bj4_processemiss, Bj4_transport2emiss, Bj4_dispemissmain, Bj4_dispemissco, Bj4_netemiss, Bj4_enduseemiss,
Bj5_collectionemiss, Bj5_transport1emiss, Bj5_processemiss, Bj5_transport2emiss, Bj5_dispemissmain, Bj5_dispemissco, Bj5_netemiss, Bj5_enduseemiss,
Bj6_collectionemiss, Bj6_transport1emiss, Bj6_processemiss, Bj6_transport2emiss, Bj6_dispemissmain, Bj6_dispemissco, Bj6_netemiss, Bj6_enduseemiss))
waste_emiss_fs <- aggregate(.~Feedstock, waste_emiss_fs, sum)
waste_emiss_fs_1 <- gather(waste_emiss_fs, category, emiss_kg, E1_collectionemiss:Bj6_enduseemiss)
waste_emiss_fs_1$category <- gsub("emiss", "", waste_emiss_fs_1$category)
waste_emiss_fs_1 <- separate(data = waste_emiss_fs_1, col = category, into = c("Tech", "Stage"), sep = "\\_")
waste_emiss_fs_1$emiss_MT_per <- waste_emiss_fs_1$emiss_kg / waste_emiss_fs_1$Prod_ww / 1000
waste_emiss_fs_1$Tech <- factor(waste_emiss_fs_1$Tech,
levels = c("E1", "E2","E3","E4", "M1", "M2", "Eth1", "Rd1", "Rd2",
"Bj1", "Bj2", "Bj3", "Bj4", "Bj5", "Bj6"))
waste_emiss_fs_1$Stage <- factor(waste_emiss_fs_1$Stage,
levels = c("collection", "transport1","process","transport2", "dispmain", "dispco", "net", "enduse"))
waste_emiss_fs_1$Feedstock <- factor(waste_emiss_fs_1$Feedstock,
levels = c("Barley straw", "Citrus residues", "Corn stover", "Cotton gin trash", "Cotton residue", "Noncitrus residues", "Oats straw", "Rice hulls",
"Rice straw", "Sorghum stubble", "Sugarcane bagasse", "Sugarcane trash", "Tree nut residues", "Wheat straw", "Hogs, 1000+ head",
"Milk cows, 500+ head", "Food waste", "Primary mill residue", "Secondary mill residue", "Other forest residue", "Other forest thinnings",
"CD waste", "MSW wood", "Other MSW", "Paper and paperboard", "Plastics", "Rubber and leather", "Textiles", "Yard trimmings"))
waste_emiss_fs_2 <- subset(waste_emiss_fs_1, waste_emiss_fs_1$Stage != "net")
waste_emiss_fs_3 <- subset(waste_emiss_fs_1, waste_emiss_fs_1$Stage == "net")
##Charts
#By waste type
#figure 2a - energy
colors_energy <- c("energymain" = "#2171B5",
"energyco" = "#4292C6",
"netenergy" = "#08306B")
p <- ggplot()+
geom_bar(data = waste_energy_US_1[which(waste_energy_US_1$Energy_GJ_per>0),],
aes(x=Tech, y=Energy_GJ_per, fill=Category), stat="identity", position = "stack") +
geom_bar(data = waste_netenergy_US_1[which(waste_netenergy_US_1$Netenergy_GJ_per != 0),],
aes(x=Tech, y=Netenergy_GJ_per, fill=Category), stat="identity", width=0.5)+
geom_hline(yintercept=0, size=0.05)+
theme_bw() +
theme(text = element_text(size=20)) +
scale_fill_manual(values=colors_energy, breaks=c("energymain", "energyco","netenergy"),
labels = c("Energy production/main product", "Energy production/co-product(s)", "Net energy")) +
guides(fill = guide_legend(title = "", label.theme = element_text(size = 20, angle = 0))) +
scale_y_continuous(name="GJ/Mg ww", limits = c(-5,15)) +
labs(x = '') +
theme(legend.position="top",
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.3),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
facet_grid(~Waste_type, scales = "free_x", space = "free_x")+
theme(strip.background =element_rect(fill="white"), strip.text = element_text(size = 20, face = "bold")) +
ggtitle("(a) Energy") +
theme(plot.title = element_text(face = "bold", size = 24, hjust = 0))
ggsave(paste(County_FOLDER, "/energy_nobioCO2.png", sep=""), plot=p, width=18.5,height=4.5,units="in",dpi=300)
#figure 2b - EROI
p <- ggplot()+
geom_bar(data = waste_eroi_1, aes(x=Tech, y=EROI), stat="identity", fill="#F16913") +
geom_hline(yintercept=0, size=0.05)+
geom_hline(yintercept=1, size=0.2)+
theme_bw() +
theme(text = element_text(size=20)) +
guides(fill = FALSE) +
scale_y_continuous(name="", limits = c(0,20)) +
labs(x = '') +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.3),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
facet_grid(~Waste_type, scales = "free_x", space = "free_x")+
theme(strip.background =element_rect(fill="white"), strip.text = element_text(size = 20, face = "bold")) +
ggtitle("(b) Energy return on investment") +
theme(plot.title = element_text(face = "bold", size = 24, hjust = 0))
ggsave(paste(County_FOLDER, "/eroi_nobioCO2.png", sep=""), plot=p, width=18.5,height=4.5,units="in",dpi=300)
#figure 2c - emiss by stage (3 column legend)
colors_stage <- c("dispmain" = "#33A02C",
"dispco" = "#66C2A5",
"collection" = "#FC8D62",
"transport1" = "#A6CEE3",
"process" = "#1F78B4",
"transport2" = "#FDBF6F",
"enduse" = "#FB9A99",
"net" = "#762A83",
"all" = "#B3B3B3")
p <- ggplot()+
geom_bar(data = waste_emiss_US_3[which(waste_emiss_US_3$emiss_MT_per != 0),],
aes(x=Tech, y=emiss_MT_per, fill=Stage), stat="identity", position = "stack") +
geom_bar(data = waste_emiss_US_4[which(waste_emiss_US_4$emiss_MT_per != 0),],
aes(x=Tech, y=emiss_MT_per, fill=Stage), stat="identity", width=0.5)+
geom_hline(yintercept=0, size=0.05)+
theme_bw() +
theme(text = element_text(size=20)) +
scale_fill_manual(values=colors_stage, breaks=c("collection", "transport1","process","transport2", "enduse", "dispmain", "dispco", "net", "all"),
labels = c("Collection", "Transport to facility", "Processing",
"Transmission & Distribution", "Enduse", "Displacement/main product", "Displacement/co-product(s)","Net GWP", "Current management practices")) +
guides(fill = guide_legend(title = "", label.theme = element_text(size = 20, angle = 0), ncol = 5)) +
scale_y_continuous(name="Metric ton CO2e/Mg ww", limits = c(-1.5,2)) +
labs(x = '') +
theme(legend.position="top",
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.3),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
facet_grid(~Waste_type, scales = "free_x", space = "free_x")+
theme(strip.background =element_rect(fill="white"), strip.text = element_text(size = 20, face = "bold"))+
ggtitle("(c) GWP - excluding biogenic CO2") +
theme(plot.title = element_text(face = "bold", size = 24, hjust = 0))
ggsave(paste(County_FOLDER, "/emiss_by_stage_nobioCO2.png", sep=""), plot=p, width=20.5,height=6,units="in",dpi=300)
#ggsave(paste(County_FOLDER, "/emiss_by_stage_nobioCO2.pdf", sep=""), plot=p, width=20.5,height=6,units="in")
p <- ggplot()+
geom_bar(data = waste_emiss_US_3[which(waste_emiss_US_3$emiss_MT_per != 0),],
aes(x=Tech, y=emiss_MT_per, fill=Stage), stat="identity", position = "stack") +
geom_bar(data = waste_emiss_US_4[which(waste_emiss_US_4$emiss_MT_per != 0),],
aes(x=Tech, y=emiss_MT_per, fill=Stage), stat="identity", width=0.5)+
geom_hline(yintercept=0, size=0.05)+
theme_bw() +
theme(text = element_text(size=20)) +
scale_fill_manual(values=colors_stage, breaks=c("collection", "transport1","process","transport2", "enduse", "dispmain", "dispco", "net", "all"),
labels = c("Collection", "Transport to facility", "Processing",
"Transmission & Distribution", "Enduse", "Displacement/main product", "Displacement/co-product(s)","Net GWP", "Current management practices")) +
guides(fill = guide_legend(title = "", label.theme = element_text(size = 20, angle = 0), ncol = 5)) +
scale_y_continuous(name="Metric ton CO2e/Mg ww", limits = c(-1.5,2)) +
labs(x = '') +
theme(legend.position="top",
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.3),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
facet_grid(~Waste_type, scales = "free_x", space = "free_x")+
theme(strip.background =element_rect(fill="white"), strip.text = element_text(size = 20, face = "bold"))+
ggtitle("(d) GWP - excluding biogenic CO2") +
theme(plot.title = element_text(face = "bold", size = 24, hjust = 0))
ggsave(paste(County_FOLDER, "/emiss_by_stage_nobioCO2_1.png", sep=""), plot=p, width=20.5,height=6,units="in",dpi=300)
#ggsave(paste(County_FOLDER, "/emiss_by_stage_nobioCO2_1.pdf", sep=""), plot=p, width=20.5,height=6,units="in")
#by feedstok
##energy
p <- ggplot()+
geom_bar(data = waste_energy_fs_1[which(waste_energy_fs_1$Energy_GJ_per>0),],
aes(x=Tech, y=Energy_GJ_per, fill=Category), stat="identity", position = "stack") +
geom_bar(data = waste_netenergy_fs_1[which(waste_netenergy_fs_1$Netenergy_GJ_per != 0),],
aes(x=Tech, y=Netenergy_GJ_per, fill=Category), stat="identity", width=0.5)+
geom_hline(yintercept=0, size=0.05)+
theme_bw() +
theme(text = element_text(size=32)) +
scale_fill_manual(values=colors_energy, breaks=c("energymain", "energyco","netenergy"),
labels = c("Energy production/main product", "Energy production/co-product(s)", "Net energy")) +
guides(fill = guide_legend(title = "", label.theme = element_text(size = 44, angle = 0))) +
scale_y_continuous(name="GJ/Mg ww", limits = c(-5,30)) +
labs(x = '') +
theme(legend.position=c(0.8,0.1),
legend.key = element_rect(size = 32),
legend.key.height = unit(1, "in"),
legend.key.width = unit(1, "in"),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.3),
axis.title.y = element_text(size=40, face = "bold"),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
facet_wrap(~Feedstock, scales = "free", ncol = 8)+
theme(strip.background =element_rect(fill="white"), strip.text = element_text(size = 32, face = "bold"))
#ggtitle("(a) Energy") +
#theme(plot.title = element_text(face = "bold", size = 24, hjust = 0))
ggsave(paste(County_FOLDER, "/energy_fs_nobioCO2.png", sep=""), plot=p, width=60,height=40,units="in",dpi=300, limitsize = FALSE)
##eroi
p <- ggplot()+
geom_bar(data = waste_eroi_fs_1, aes(x=Tech, y=EROI), stat="identity", fill="#F16913") +
geom_hline(yintercept=0, size=0.05)+
geom_hline(yintercept=1, size=0.2)+
theme_bw() +
theme(text = element_text(size=32)) +
guides(fill = FALSE) +
scale_y_continuous(name="", limits = c(0,30)) +
labs(x = '') +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.3),
axis.title.y = element_text(size=40, face = "bold"),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
facet_wrap(~Feedstock, scales = "free", ncol = 8)+
theme(strip.background =element_rect(fill="white"), strip.text = element_text(size = 32, face = "bold"))
#ggtitle("(b) Energy return on investment") +
#theme(plot.title = element_text(face = "bold", size = 24, hjust = 0))
ggsave(paste(County_FOLDER, "/eroi_fs_nobioCO2.png", sep=""), plot=p, width=60,height=40,units="in",dpi=300, limitsize = FALSE)
##emissions
p <- ggplot()+
geom_bar(data = waste_emiss_fs_2[which(waste_emiss_fs_2$emiss_MT_per != 0),],
aes(x=Tech, y=emiss_MT_per, fill=Stage), stat="identity", position = "stack") +
geom_bar(data = waste_emiss_fs_3[which(waste_emiss_fs_3$emiss_MT_per != 0),],
aes(x=Tech, y=emiss_MT_per, fill=Stage), stat="identity", width=0.5)+
geom_hline(yintercept=0, size=0.05)+
theme_bw() +
theme(text = element_text(size=32)) +
scale_fill_manual(values=colors_stage, breaks=c("collection", "transport1","process","transport2", "enduse", "dispmain", "dispco", "net"),
labels = c("Collection", "Transport to facility", "Processing",
"Transmission & Distribution", "Enduse", "Displacement/main product", "Displacement/co-product(s)","Net GWP")) +
guides(fill = guide_legend(title = "", label.theme = element_text(size = 40, angle = 0))) +
scale_y_continuous(name="Metric ton CO2e/Mg ww", limits = c(-4,4)) +
labs(x = '') +
theme(legend.position=c(0.8,0.1),
legend.key = element_rect(size = 32),
legend.key.height = unit(1, "in"),
legend.key.width = unit(1, "in"),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.3),
axis.title.y = element_text(size=40, face = "bold"),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
facet_wrap(~Feedstock, scales = "free", ncol = 8)+
theme(strip.background =element_rect(fill="white"), strip.text = element_text(size = 32, face = "bold"))
ggsave(paste(County_FOLDER, "/emiss_by_stage_fs_nobioCO2.png", sep=""), plot=p, width=60,height=40,units="in",dpi=300, limitsize = FALSE)
###sensitivity analysis on power generation
#clean power (-50%)
waste_2_1 <- merge(waste_1, state_GHG_ef1)
#Collection & Transportation_1 (energy - GJ, emiss - kg CO1e)
waste_2_1$Prod_ww <- waste_2_1$Prod/(1-waste_2_1$MC/100)
waste_2_1$collection_en <- waste_2_1$Prod_ww * waste_2_1$Collection_Diesel
waste_2_1$collection_emiss <- waste_2_1$collection_en * waste_2_1$Diesel_GHG
waste_2_1$transport1_en <- waste_2_1$Prod_ww * waste_2_1$Transport_km_1 * waste_2_1$Transport_Diesel
waste_2_1$transport1_emiss <- waste_2_1$transport1_en * waste_2_1$Diesel_GHG
#E1
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2_1$E1_elec <- waste_2_1$Prod_ww * waste_2_1$E1.e.out * (1-0.065)
waste_2_1$E1_heat <- waste_2_1$Prod_ww * waste_2_1$E1.h.out * (1-0.2)
waste_2_1$E1_energy <- waste_2_1$E1_elec + waste_2_1$E1_heat
waste_2_1$E1_energymain <- waste_2_1$E1_elec
waste_2_1$E1_energyco <- waste_2_1$E1_heat
waste_2_1$E1_netenergy <- (waste_2_1$E1_energy - waste_2_1$Prod_ww * (waste_2_1$E1.e.in + waste_2_1$E1.h.in + waste_2_1$E1.d.in) - waste_2_1$collection_en - waste_2_1$transport1_en) * waste_2_1$E1_tech
waste_2_1$E1_collectionemiss <- waste_2_1$collection_emiss * waste_2_1$E1_tech
waste_2_1$E1_transport1emiss <- waste_2_1$transport1_emiss * waste_2_1$E1_tech
waste_2_1$E1_processemiss <- waste_2_1$Prod_ww * (waste_2_1$E1.e.in * waste_2_1$Powergen_GHG
+ waste_2_1$E1.h.in * waste_2_1$Heatgen_GHG
+ waste_2_1$E1.d.in * waste_2_1$Diesel_GHG
+ waste_2_1$Nonbio_emiss1) * waste_2_1$E1_tech
waste_2_1$E1_enduseemiss <- 0
waste_2_1$E1_dispemiss <- 0 - waste_2_1$E1_elec* waste_2_1$Powergen_GHG - waste_2_1$E1_heat* waste_2_1$Heatgen_GHG
waste_2_1$E1_dispemissmain <- 0 - waste_2_1$E1_elec* waste_2_1$Powergen_GHG
waste_2_1$E1_dispemissco <- 0 - waste_2_1$E1_heat* waste_2_1$Heatgen_GHG
waste_2_1$E1_netemiss <- waste_2_1$E1_collectionemiss + waste_2_1$E1_transport1emiss + waste_2_1$E1_processemiss + waste_2_1$E1_enduseemiss + waste_2_1$E1_dispemiss
#E2
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2_1$E2_elec <- waste_2_1$Prod_ww * waste_2_1$E2.e.out * (1-0.065)
waste_2_1$E2_heat <- waste_2_1$Prod_ww * waste_2_1$E2.h.out * (1-0.2)
waste_2_1$E2_energy <- waste_2_1$E2_elec + waste_2_1$E2_heat
waste_2_1$E2_energymain <- waste_2_1$E2_elec
waste_2_1$E2_energyco <- waste_2_1$E2_heat
waste_2_1$E2_netenergy <- (waste_2_1$E2_energy - waste_2_1$Prod_ww * (waste_2_1$E2.e.in + waste_2_1$E2.h.in + waste_2_1$E2.d.in) - waste_2_1$collection_en - waste_2_1$transport1_en) * waste_2_1$E2_tech
waste_2_1$E2_collectionemiss <- waste_2_1$collection_emiss * waste_2_1$E2_tech
waste_2_1$E2_transport1emiss <- waste_2_1$transport1_emiss * waste_2_1$E2_tech
waste_2_1$E2_processemiss <- waste_2_1$Prod_ww * (waste_2_1$E2.e.in * waste_2_1$Powergen_GHG
+ waste_2_1$E2.h.in * waste_2_1$Heatgen_GHG
+ waste_2_1$E2.d.in * waste_2_1$Diesel_GHG
+ waste_2_1$Nonbio_emiss1) * waste_2_1$E2_tech
waste_2_1$E2_enduseemiss <- 0
waste_2_1$E2_dispemiss <- 0 - waste_2_1$E2_elec* waste_2_1$Powergen_GHG - waste_2_1$E2_heat* waste_2_1$Heatgen_GHG
waste_2_1$E2_dispemissmain <- 0 - waste_2_1$E2_elec* waste_2_1$Powergen_GHG
waste_2_1$E2_dispemissco <- 0 - waste_2_1$E2_heat* waste_2_1$Heatgen_GHG
waste_2_1$E2_netemiss <- waste_2_1$E2_collectionemiss + waste_2_1$E2_transport1emiss + waste_2_1$E2_processemiss + waste_2_1$E2_enduseemiss + waste_2_1$E2_dispemiss
#E3
#electricity T&D loss - 6.5%
waste_2_1$E3_energy <- waste_2_1$Prod_ww * waste_2_1$E3.e.out * (1-0.065)
waste_2_1$E3_energymain <- waste_2_1$E3_energy
waste_2_1$E3_netenergy <- (waste_2_1$E3_energy - waste_2_1$Prod_ww * (waste_2_1$E3.e.in + waste_2_1$E3.h.in + waste_2_1$E3.d.in) - waste_2_1$collection_en - waste_2_1$transport1_en) * waste_2_1$E3_tech
waste_2_1$E3_collectionemiss <- waste_2_1$collection_emiss * waste_2_1$E3_tech
waste_2_1$E3_transport1emiss <- waste_2_1$transport1_emiss * waste_2_1$E3_tech
waste_2_1$E3_processemiss <- waste_2_1$Prod_ww * (waste_2_1$E3.e.in * waste_2_1$Powergen_GHG
+ waste_2_1$E3.h.in * waste_2_1$Heatgen_GHG
+ waste_2_1$E3.d.in * waste_2_1$Diesel_GHG
+ waste_2_1$Nonbio_emiss1) * waste_2_1$E3_tech
waste_2_1$E3_enduseemiss <- 0
waste_2_1$E3_dispemiss <- 0 - waste_2_1$E3_energy* waste_2_1$Powergen_GHG
waste_2_1$E3_dispemissmain <- 0 - waste_2_1$E3_energy* waste_2_1$Powergen_GHG
waste_2_1$E3_netemiss <- waste_2_1$E3_collectionemiss + waste_2_1$E3_transport1emiss + waste_2_1$E3_processemiss + waste_2_1$E3_enduseemiss + waste_2_1$E3_dispemiss
#E4
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2_1$E4_elec <- waste_2_1$Prod_ww * waste_2_1$E4.e.out * (1-0.065)
waste_2_1$E4_heat <- waste_2_1$Prod_ww * waste_2_1$E4.h.out * (1-0.2)
waste_2_1$E4_energy <- waste_2_1$E4_elec + waste_2_1$E4_heat
waste_2_1$E4_energymain <- waste_2_1$E4_elec
waste_2_1$E4_energyco <- waste_2_1$E4_heat
waste_2_1$E4_netenergy <- (waste_2_1$E4_energy - waste_2_1$Prod_ww * (waste_2_1$E4.ng.in + waste_2_1$E4.d.in) - waste_2_1$collection_en - waste_2_1$transport1_en) * waste_2_1$E4_tech
waste_2_1$E4_collectionemiss <- waste_2_1$collection_emiss * waste_2_1$E4_tech
waste_2_1$E4_transport1emiss <- waste_2_1$transport1_emiss * waste_2_1$E4_tech
waste_2_1$E4_processemiss <- waste_2_1$Prod_ww * (waste_2_1$E4.ng.in * waste_2_1$NG_GHG
+ waste_2_1$E4.d.in * waste_2_1$Diesel_GHG
+ waste_2_1$Nonbio_emiss1) * waste_2_1$E4_tech
waste_2_1$E4_enduseemiss <- 0
waste_2_1$E4_dispemiss <- 0 - waste_2_1$E4_elec* waste_2_1$Powergen_GHG - waste_2_1$E4_heat* waste_2_1$Heatgen_GHG
waste_2_1$E4_dispemissmain <- 0 - waste_2_1$E4_elec* waste_2_1$Powergen_GHG
waste_2_1$E4_dispemissco <- 0 - waste_2_1$E4_heat* waste_2_1$Heatgen_GHG
waste_2_1$E4_netemiss <- waste_2_1$E4_collectionemiss + waste_2_1$E4_transport1emiss + waste_2_1$E4_processemiss + waste_2_1$E4_enduseemiss + waste_2_1$E4_dispemiss
#M1
#methane leakage - 2%
waste_2_1$M1_energy <- waste_2_1$Prod_ww * waste_2_1$M1.m.out * (1-0.02)
waste_2_1$M1_energymain <- waste_2_1$M1_energy
waste_2_1$M1_netenergy <- (waste_2_1$M1_energy - waste_2_1$Prod_ww * (waste_2_1$M1.e.in + waste_2_1$M1.h.in) - waste_2_1$collection_en - waste_2_1$transport1_en) * waste_2_1$M1_tech
waste_2_1$M1_collectionemiss <- waste_2_1$collection_emiss * waste_2_1$M1_tech
waste_2_1$M1_transport1emiss <- waste_2_1$transport1_emiss * waste_2_1$M1_tech
waste_2_1$M1_processemiss <- waste_2_1$Prod_ww * (waste_2_1$M1.e.in * waste_2_1$Powergen_GHG + waste_2_1$M1.h.in * waste_2_1$Heatgen_GHG)* waste_2_1$M1_tech
waste_2_1$M1_transport2emiss <- waste_2_1$Prod_ww * waste_2_1$M1.m.out *0.02 /50 *28 * waste_2_1$M1_tech
waste_2_1$M1_enduseemiss <- waste_2_1$Prod_ww * waste_2_1$Nonbio_emiss2 * waste_2_1$M1_tech
waste_2_1$M1_dispemiss <- 0 - waste_2_1$M1_energy* waste_2_1$NG_GHG
waste_2_1$M1_dispemissmain <- waste_2_1$M1_dispemiss
waste_2_1$M1_netemiss <- waste_2_1$M1_collectionemiss + waste_2_1$M1_transport1emiss + waste_2_1$M1_processemiss + waste_2_1$M1_transport2emiss + waste_2_1$M1_enduseemiss + waste_2_1$M1_dispemiss
#M2
#methane leakage - 2%
waste_2_1$M2_energy <- waste_2_1$Prod_ww * waste_2_1$M2.m.out * (1-0.02)
waste_2_1$M2_energymain <- waste_2_1$M2_energy
waste_2_1$M2_netenergy <- (waste_2_1$M2_energy - waste_2_1$Prod_ww * (waste_2_1$M2.e.in + waste_2_1$M2.h.in + waste_2_1$M2.d.in) - waste_2_1$collection_en - waste_2_1$transport1_en) * waste_2_1$M2_tech
waste_2_1$M2_collectionemiss <- waste_2_1$collection_emiss * waste_2_1$M2_tech
waste_2_1$M2_transport1emiss <- waste_2_1$transport1_emiss * waste_2_1$M2_tech
waste_2_1$M2_processemiss <- waste_2_1$Prod_ww * (waste_2_1$M2.e.in * waste_2_1$Powergen_GHG +
waste_2_1$M2.h.in * waste_2_1$Heatgen_GHG +
waste_2_1$M2.d.in * waste_2_1$Diesel_GHG) * waste_2_1$M2_tech
waste_2_1$M2_transport2emiss <- waste_2_1$Prod_ww * waste_2_1$M2.m.out *0.02 /50 *28 * waste_2_1$M1_tech
waste_2_1$M2_enduseemiss <- waste_2_1$Prod_ww * waste_2_1$Nonbio_emiss2 * waste_2_1$M2_tech
waste_2_1$M2_dispemiss <- 0 - waste_2_1$M2_energy* waste_2_1$NG_GHG
waste_2_1$M2_dispemissmain <- waste_2_1$M2_dispemiss
waste_2_1$M2_netemiss <- waste_2_1$M2_collectionemiss + waste_2_1$M2_transport1emiss + waste_2_1$M2_processemiss + waste_2_1$M2_transport2emiss + waste_2_1$M2_enduseemiss + waste_2_1$M2_dispemiss
#Eth1
#energy intensity of ethanol - 26.95 MJ/kg
waste_2_1$Eth1_elec <- waste_2_1$Prod_ww * waste_2_1$Eth1.e.out * (1-0.065)
waste_2_1$Eth1_eth <- waste_2_1$Prod_ww * waste_2_1$Eth1.eth.out
waste_2_1$Eth1_energy <- waste_2_1$Eth1_elec + waste_2_1$Eth1_eth
waste_2_1$Eth1_energymain <- waste_2_1$Eth1_eth
waste_2_1$Eth1_energyco <- waste_2_1$Eth1_elec
waste_2_1$Eth1_netenergy <- (waste_2_1$Eth1_energy - waste_2_1$Prod_ww * (waste_2_1$Eth1.ng.in + waste_2_1$Eth1.d.in) -
(waste_2_1$collection_en + waste_2_1$transport1_en) * waste_2_1$M1_tech -
waste_2_1$Eth1_eth / 26.95 * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel) * waste_2_1$Eth1_tech
waste_2_1$Eth1_collectionemiss <- waste_2_1$collection_emiss * waste_2_1$Eth1_tech
waste_2_1$Eth1_transport1emiss <- waste_2_1$transport1_emiss * waste_2_1$Eth1_tech
waste_2_1$Eth1_processemiss <- waste_2_1$Prod_ww * (waste_2_1$Eth1.ng.in * waste_2_1$Heatgen_GHG + waste_2_1$Eth1.d.in * waste_2_1$Diesel_GHG) * waste_2_1$Eth1_tech
waste_2_1$Eth1_transport2emiss <- waste_2_1$Eth1_eth / 26.95 * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel * waste_2_1$Diesel_GHG
waste_2_1$Eth1_enduseemiss <- waste_2_1$Prod_ww * waste_2_1$Nonbio_emiss2 * waste_2_1$Eth1_tech
waste_2_1$Eth1_dispemiss <- 0 - waste_2_1$Eth1_eth* waste_2_1$Gasoline_GHG - waste_2_1$Eth1_elec * waste_2_1$Powergen_GHG
waste_2_1$Eth1_dispemissmain <- 0 - waste_2_1$Eth1_eth* waste_2_1$Gasoline_GHG
waste_2_1$Eth1_dispemissco <- 0 - waste_2_1$Eth1_elec * waste_2_1$Powergen_GHG
waste_2_1$Eth1_netemiss <- waste_2_1$Eth1_collectionemiss + waste_2_1$Eth1_transport1emiss + waste_2_1$Eth1_processemiss + waste_2_1$Eth1_transport2emiss + waste_2_1$Eth1_enduseemiss + waste_2_1$Eth1_dispemiss
#Rd1
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
#electricity T&D loss - 6.5%, methane leakage - 2%
waste_2_1$Rd1_d <- waste_2_1$Prod_ww * waste_2_1$Rd1.d.out
waste_2_1$Rd1_g <- waste_2_1$Prod_ww * waste_2_1$Rd1.g.out
waste_2_1$Rd1_j <- waste_2_1$Prod_ww * waste_2_1$Rd1.j.out
waste_2_1$Rd1_m <- waste_2_1$Prod_ww * waste_2_1$Rd1.m.out * (1-0.02)
waste_2_1$Rd1_elec <- waste_2_1$Prod_ww * waste_2_1$Rd1.e.out * (1-0.065)
waste_2_1$Rd1_energy <- waste_2_1$Rd1_d + waste_2_1$Rd1_g + waste_2_1$Rd1_j + waste_2_1$Rd1_m + waste_2_1$Rd1_elec
waste_2_1$Rd1_energymain <- waste_2_1$Rd1_d
waste_2_1$Rd1_energyco <- waste_2_1$Rd1_g + waste_2_1$Rd1_j + waste_2_1$Rd1_m + waste_2_1$Rd1_elec
waste_2_1$Rd1_netenergy <-(waste_2_1$Rd1_energy - waste_2_1$Prod_ww * waste_2_1$Rd1.e.in -
(waste_2_1$collection_en + waste_2_1$transport1_en) -
waste_2_1$Rd1_d / 42.79 * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel -
waste_2_1$Rd1_g / 41.74 * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel -
waste_2_1$Rd1_j / 43.10 * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel ) * waste_2_1$Rd1_tech
waste_2_1$Rd1_collectionemiss <- waste_2_1$collection_emiss * waste_2_1$Rd1_tech
waste_2_1$Rd1_transport1emiss <- waste_2_1$transport1_emiss * waste_2_1$Rd1_tech
waste_2_1$Rd1_processemiss <- waste_2_1$Prod_ww * waste_2_1$Rd1.e.in * waste_2_1$Powergen_GHG * waste_2_1$Rd1_tech
waste_2_1$Rd1_transport2emiss <- (waste_2_1$Rd1_d / 42.79 + waste_2_1$Rd1_g / 41.74 + waste_2_1$Rd1_j / 43.10) * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel * waste_2_1$Diesel_GHG
waste_2_1$Rd1_enduseemiss <- waste_2_1$Prod_ww * waste_2_1$Nonbio_emiss2 * waste_2_1$Rd1_tech
waste_2_1$Rd1_dispemiss <- 0 - waste_2_1$Rd1_d *waste_2_1$Diesel_GHG - waste_2_1$Rd1_g * waste_2_1$Gasoline_GHG - waste_2_1$Rd1_j * waste_2_1$Jet_GHG -
waste_2_1$Rd1_m * waste_2_1$NG_GHG - waste_2_1$Rd1_elec * waste_2_1$Powergen_GHG
waste_2_1$Rd1_dispemissmain <- 0 - waste_2_1$Rd1_d *waste_2_1$Diesel_GHG
waste_2_1$Rd1_dispemissco <- 0 - waste_2_1$Rd1_g * waste_2_1$Gasoline_GHG - waste_2_1$Rd1_j * waste_2_1$Jet_GHG -
waste_2_1$Rd1_m * waste_2_1$NG_GHG - waste_2_1$Rd1_elec * waste_2_1$Powergen_GHG
waste_2_1$Rd1_netemiss <- waste_2_1$Rd1_collectionemiss + waste_2_1$Rd1_transport1emiss + waste_2_1$Rd1_processemiss + waste_2_1$Rd1_enduseemiss + waste_2_1$Rd1_transport2emiss + waste_2_1$Rd1_dispemiss
#Rd2
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74
waste_2_1$Rd2_d <- waste_2_1$Prod_ww * waste_2_1$Rd2.d.out
waste_2_1$Rd2_g <- waste_2_1$Prod_ww * waste_2_1$Rd2.g.out
waste_2_1$Rd2_energy <- waste_2_1$Rd2_d + waste_2_1$Rd2_g
waste_2_1$Rd2_energymain <- waste_2_1$Rd2_d
waste_2_1$Rd2_energyco <- waste_2_1$Rd2_g
waste_2_1$Rd2_netenergy <-(waste_2_1$Rd2_energy - waste_2_1$Prod_ww * (waste_2_1$Rd2.e.in + waste_2_1$Rd2.ng.in) -
(waste_2_1$collection_en + waste_2_1$transport1_en) -
waste_2_1$Rd2_d / 42.79 * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel -
waste_2_1$Rd2_g / 41.74 * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel ) * waste_2_1$Rd2_tech
waste_2_1$Rd2_collectionemiss <- waste_2_1$collection_emiss * waste_2_1$Rd2_tech
waste_2_1$Rd2_transport1emiss <- waste_2_1$transport1_emiss * waste_2_1$Rd2_tech
waste_2_1$Rd2_processemiss <- waste_2_1$Prod_ww * (waste_2_1$Rd2.e.in * waste_2_1$Powergen_GHG + waste_2_1$Rd2.ng.in * waste_2_1$H2_GHG) * waste_2_1$Rd2_tech
waste_2_1$Rd2_transport2emiss <- (waste_2_1$Rd2_d / 42.79 + waste_2_1$Rd2_g / 41.74) * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel * waste_2_1$Diesel_GHG
waste_2_1$Rd2_enduseemiss <- waste_2_1$Prod_ww * waste_2_1$Nonbio_emiss2 * waste_2_1$Rd2_tech
waste_2_1$Rd2_dispemiss <- 0 - waste_2_1$Rd2_d *waste_2_1$Diesel_GHG - waste_2_1$Rd2_g* waste_2_1$Gasoline_GHG
waste_2_1$Rd2_dispemissmain <- 0 - waste_2_1$Rd2_d *waste_2_1$Diesel_GHG
waste_2_1$Rd2_dispemissco <- 0 - waste_2_1$Rd2_g* waste_2_1$Gasoline_GHG
waste_2_1$Rd2_netemiss <- waste_2_1$Rd2_collectionemiss + waste_2_1$Rd2_transport1emiss + waste_2_1$Rd2_processemiss + waste_2_1$Rd2_transport2emiss + waste_2_1$Rd2_enduseemiss + waste_2_1$Rd2_dispemiss
#Bj1
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_1$Bj1_d <- waste_2_1$Prod_ww * waste_2_1$Bj1.d.out
waste_2_1$Bj1_g <- waste_2_1$Prod_ww * waste_2_1$Bj1.g.out
waste_2_1$Bj1_j <- waste_2_1$Prod_ww * waste_2_1$Bj1.j.out
waste_2_1$Bj1_energy <- waste_2_1$Bj1_d + waste_2_1$Bj1_g + waste_2_1$Bj1_j
waste_2_1$Bj1_energymain <- waste_2_1$Bj1_d
waste_2_1$Bj1_energyco <- waste_2_1$Bj1_g + waste_2_1$Bj1_j
waste_2_1$Bj1_netenergy <-(waste_2_1$Bj1_energy - waste_2_1$Prod_ww * waste_2_1$Bj1.h2.in -
(waste_2_1$collection_en + waste_2_1$transport1_en) -
(waste_2_1$Bj1_d / 42.79 + waste_2_1$Bj1_g / 41.74 +waste_2_1$Bj1_j / 43.10) * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel) * waste_2_1$Bj1_tech
waste_2_1$Bj1_collectionemiss <- waste_2_1$collection_emiss * waste_2_1$Bj1_tech
waste_2_1$Bj1_transport1emiss <- waste_2_1$transport1_emiss * waste_2_1$Bj1_tech
waste_2_1$Bj1_processemiss <- waste_2_1$Prod_ww * waste_2_1$Bj1.h2.in * waste_2_1$H2_GHG * waste_2_1$Bj1_tech
waste_2_1$Bj1_transport2emiss <- (waste_2_1$Bj1_d / 42.79 + waste_2_1$Bj1_g / 41.74 + waste_2_1$Bj1_j / 43.10) * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel * waste_2_1$Diesel_GHG
waste_2_1$Bj1_enduseemiss <- waste_2_1$Prod_ww * waste_2_1$Nonbio_emiss2 * waste_2_1$Bj1_tech
waste_2_1$Bj1_dispemiss <- 0 - waste_2_1$Bj1_d *waste_2_1$Diesel_GHG - waste_2_1$Bj1_g* waste_2_1$Gasoline_GHG - waste_2_1$Bj1_j * waste_2_1$Jet_GHG
waste_2_1$Bj1_dispemissmain <- 0 - waste_2_1$Bj1_d *waste_2_1$Diesel_GHG
waste_2_1$Bj1_dispemissco <- 0 - waste_2_1$Bj1_g* waste_2_1$Gasoline_GHG - waste_2_1$Bj1_j * waste_2_1$Jet_GHG
waste_2_1$Bj1_netemiss <- waste_2_1$Bj1_collectionemiss + waste_2_1$Bj1_transport1emiss + waste_2_1$Bj1_processemiss + waste_2_1$Bj1_transport2emiss + waste_2_1$Bj1_enduseemiss + waste_2_1$Bj1_dispemiss
#Bj2
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_1$Bj2_energy <- waste_2_1$Prod_ww * waste_2_1$Bj2.j.out
waste_2_1$Bj2_energymain <- waste_2_1$Bj1_energy
waste_2_1$Bj2_netenergy <-(waste_2_1$Bj2_energy - waste_2_1$Prod_ww * waste_2_1$Bj2.h2.in -
(waste_2_1$collection_en + waste_2_1$transport1_en) -
waste_2_1$Bj2_energy / 43.10 * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel ) * waste_2_1$Bj2_tech
waste_2_1$Bj2_collectionemiss <- waste_2_1$collection_emiss * waste_2_1$Bj2_tech
waste_2_1$Bj2_transport1emiss <- waste_2_1$transport1_emiss * waste_2_1$Bj2_tech
waste_2_1$Bj2_processemiss <- waste_2_1$Prod_ww * waste_2_1$Bj2.h2.in * waste_2_1$H2_GHG
waste_2_1$Bj2_transport2emiss <- waste_2_1$Bj2_energy / 43.10 * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel * waste_2_1$Diesel_GHG
waste_2_1$Bj2_enduseemiss <- waste_2_1$Prod_ww * waste_2_1$Nonbio_emiss2 * waste_2_1$Bj2_tech
waste_2_1$Bj2_dispemiss <- 0 - waste_2_1$Bj2_energy * waste_2_1$Jet_GHG
waste_2_1$Bj2_dispemissmain <- waste_2_1$Bj2_dispemiss
waste_2_1$Bj2_netemiss <- waste_2_1$Bj2_collectionemiss + waste_2_1$Bj2_transport1emiss + waste_2_1$Bj2_processemiss + waste_2_1$Bj2_transport2emiss + waste_2_1$Bj2_enduseemiss + waste_2_1$Bj2_dispemiss
#Bj3
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_1$Bj3_d <- waste_2_1$Prod_ww * waste_2_1$Bj3.d.out
waste_2_1$Bj3_g <- waste_2_1$Prod_ww * waste_2_1$Bj3.g.out
waste_2_1$Bj3_j <- waste_2_1$Prod_ww * waste_2_1$Bj3.j.out
waste_2_1$Bj3_energy <- waste_2_1$Bj3_d + waste_2_1$Bj3_g + waste_2_1$Bj3_j
waste_2_1$Bj3_energymain <- waste_2_1$Bj3_j
waste_2_1$Bj3_energyco <- waste_2_1$Bj3_d + waste_2_1$Bj3_g
waste_2_1$Bj3_netenergy <-(waste_2_1$Bj3_energy - waste_2_1$Prod_ww * waste_2_1$Bj3.e.in -
(waste_2_1$collection_en + waste_2_1$transport1_en) -
(waste_2_1$Bj3_d / 42.79 + waste_2_1$Bj3_g / 41.74 +waste_2_1$Bj3_j / 43.10) * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel) * waste_2_1$Bj3_tech
waste_2_1$Bj3_collectionemiss <- waste_2_1$collection_emiss * waste_2_1$Bj3_tech
waste_2_1$Bj3_transport1emiss <- waste_2_1$transport1_emiss * waste_2_1$Bj3_tech
waste_2_1$Bj3_processemiss <- waste_2_1$Prod_ww * waste_2_1$Bj3.e.in * waste_2_1$Powergen_GHG * waste_2_1$Bj3_tech
waste_2_1$Bj3_transport2emiss <- (waste_2_1$Bj3_d / 42.79 + waste_2_1$Bj3_g / 41.74 + waste_2_1$Bj3_j / 43.10) * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel * waste_2_1$Diesel_GHG
waste_2_1$Bj3_enduseemiss <- waste_2_1$Prod_ww * waste_2_1$Nonbio_emiss2 * waste_2_1$Bj3_tech
waste_2_1$Bj3_dispemiss <- 0 - waste_2_1$Bj3_d *waste_2_1$Diesel_GHG - waste_2_1$Bj3_g* waste_2_1$Gasoline_GHG - waste_2_1$Bj3_j * waste_2_1$Jet_GHG
waste_2_1$Bj3_dispemissmain <- 0 - waste_2_1$Bj3_j * waste_2_1$Jet_GHG
waste_2_1$Bj3_dispemissco <- 0 - waste_2_1$Bj3_d * waste_2_1$Diesel_GHG - waste_2_1$Bj3_g * waste_2_1$Gasoline_GHG
waste_2_1$Bj3_netemiss <- waste_2_1$Bj3_collectionemiss + waste_2_1$Bj3_transport1emiss + waste_2_1$Bj3_processemiss + waste_2_1$Bj3_transport2emiss + waste_2_1$Bj3_enduseemiss + waste_2_1$Bj3_dispemiss
#Bj4
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_1$Bj4_d <- waste_2_1$Prod_ww * waste_2_1$Bj4.d.out
waste_2_1$Bj4_g <- waste_2_1$Prod_ww * waste_2_1$Bj4.g.out
waste_2_1$Bj4_j <- waste_2_1$Prod_ww * waste_2_1$Bj4.j.out
waste_2_1$Bj4_energy <- waste_2_1$Bj4_d + waste_2_1$Bj4_g + waste_2_1$Bj4_j
waste_2_1$Bj4_energymain <- waste_2_1$Bj4_j
waste_2_1$Bj4_energyco <- waste_2_1$Bj4_d + waste_2_1$Bj4_g
waste_2_1$Bj4_netenergy <-(waste_2_1$Bj4_energy - waste_2_1$Prod_ww * waste_2_1$Bj4.h2.in -
(waste_2_1$collection_en + waste_2_1$transport1_en) -
(waste_2_1$Bj4_d / 42.79 + waste_2_1$Bj4_g / 41.74 +waste_2_1$Bj4_j / 43.10) * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel) * waste_2_1$Bj4_tech
waste_2_1$Bj4_collectionemiss <- waste_2_1$collection_emiss * waste_2_1$Bj4_tech
waste_2_1$Bj4_transport1emiss <- waste_2_1$transport1_emiss * waste_2_1$Bj4_tech
waste_2_1$Bj4_processemiss <- waste_2_1$Prod_ww * waste_2_1$Bj4.h2.in * waste_2_1$H2_GHG * waste_2_1$Bj4_tech
waste_2_1$Bj4_transport2emiss <- (waste_2_1$Bj4_d / 42.79 + waste_2_1$Bj4_g / 41.74 + waste_2_1$Bj4_j / 43.10) * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel * waste_2_1$Diesel_GHG
waste_2_1$Bj4_enduseemiss <- waste_2_1$Prod_ww * waste_2_1$Nonbio_emiss2 * waste_2_1$Bj4_tech
waste_2_1$Bj4_dispemiss <- 0 - waste_2_1$Bj4_d *waste_2_1$Diesel_GHG - waste_2_1$Bj4_g* waste_2_1$Gasoline_GHG - waste_2_1$Bj4_j * waste_2_1$Jet_GHG
waste_2_1$Bj4_dispemissmain <- 0 - waste_2_1$Bj4_j * waste_2_1$Jet_GHG
waste_2_1$Bj4_dispemissco <- 0 - waste_2_1$Bj4_d * waste_2_1$Diesel_GHG - waste_2_1$Bj4_g * waste_2_1$Gasoline_GHG
waste_2_1$Bj4_netemiss <- waste_2_1$Bj4_collectionemiss + waste_2_1$Bj4_transport1emiss + waste_2_1$Bj4_processemiss + waste_2_1$Bj4_transport2emiss+ waste_2_1$Bj4_enduseemiss + waste_2_1$Bj4_dispemiss
#Bj5
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_1$Bj5_d <- waste_2_1$Prod_ww * waste_2_1$Bj5.d.out
waste_2_1$Bj5_g <- waste_2_1$Prod_ww * waste_2_1$Bj5.g.out
waste_2_1$Bj5_j <- waste_2_1$Prod_ww * waste_2_1$Bj5.j.out
waste_2_1$Bj5_energy <- waste_2_1$Bj5_d + waste_2_1$Bj5_g + waste_2_1$Bj5_j
waste_2_1$Bj5_energymain <- waste_2_1$Bj5_j
waste_2_1$Bj5_energyco <- waste_2_1$Bj5_d + waste_2_1$Bj5_g
waste_2_1$Bj5_netenergy <-(waste_2_1$Bj5_energy - waste_2_1$Prod_ww * waste_2_1$Bj5.e.in -
(waste_2_1$collection_en + waste_2_1$transport1_en) -
(waste_2_1$Bj5_d / 42.79 + waste_2_1$Bj5_g / 41.74 +waste_2_1$Bj5_j / 43.10) * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel) * waste_2_1$Bj5_tech
waste_2_1$Bj5_collectionemiss <- waste_2_1$collection_emiss * waste_2_1$Bj5_tech
waste_2_1$Bj5_transport1emiss <- waste_2_1$transport1_emiss * waste_2_1$Bj5_tech
waste_2_1$Bj5_processemiss <- waste_2_1$Prod_ww * waste_2_1$Bj5.e.in * waste_2_1$Powergen_GHG * waste_2_1$Bj5_tech
waste_2_1$Bj5_transport2emiss <- (waste_2_1$Bj5_d / 42.79 + waste_2_1$Bj5_g / 41.74 + waste_2_1$Bj5_j / 43.10) * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel * waste_2_1$Diesel_GHG
waste_2_1$Bj5_enduseemiss <- waste_2_1$Prod_ww * waste_2_1$Nonbio_emiss2 * waste_2_1$Bj5_tech
waste_2_1$Bj5_dispemiss <- 0 - waste_2_1$Bj5_d *waste_2_1$Diesel_GHG - waste_2_1$Bj5_g* waste_2_1$Gasoline_GHG - waste_2_1$Bj5_j * waste_2_1$Jet_GHG
waste_2_1$Bj5_dispemissmain <- 0 - waste_2_1$Bj5_j * waste_2_1$Jet_GHG
waste_2_1$Bj5_dispemissco <- 0 - waste_2_1$Bj5_d * waste_2_1$Diesel_GHG - waste_2_1$Bj5_g * waste_2_1$Gasoline_GHG
waste_2_1$Bj5_netemiss <- waste_2_1$Bj5_collectionemiss + waste_2_1$Bj5_transport1emiss + waste_2_1$Bj5_processemiss + waste_2_1$Bj5_transport2emiss + waste_2_1$Bj5_enduseemiss + waste_2_1$Bj5_dispemiss
#Bj6
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_1$Bj6_d <- waste_2_1$Prod_ww * waste_2_1$Bj6.d.out
waste_2_1$Bj6_g <- waste_2_1$Prod_ww * waste_2_1$Bj6.g.out
waste_2_1$Bj6_j <- waste_2_1$Prod_ww * waste_2_1$Bj6.j.out
waste_2_1$Bj6_energy <- waste_2_1$Bj6_d + waste_2_1$Bj6_g + waste_2_1$Bj6_j
waste_2_1$Bj6_energymain <- waste_2_1$Bj6_j
waste_2_1$Bj6_energyco <- waste_2_1$Bj6_d + waste_2_1$Bj6_g
waste_2_1$Bj6_netenergy <-(waste_2_1$Bj6_energy - waste_2_1$Prod_ww * waste_2_1$Bj6.h2.in - waste_2_1$Prod_ww * waste_2_1$Bj6.e.in -
(waste_2_1$collection_en + waste_2_1$transport1_en) -
(waste_2_1$Bj6_d / 42.79 + waste_2_1$Bj6_g / 41.74 +waste_2_1$Bj6_j / 43.10) * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel) * waste_2_1$Bj6_tech
waste_2_1$Bj6_collectionemiss <- waste_2_1$collection_emiss * waste_2_1$Bj6_tech
waste_2_1$Bj6_transport1emiss <- waste_2_1$transport1_emiss * waste_2_1$Bj6_tech
waste_2_1$Bj6_processemiss <- waste_2_1$Prod_ww * (waste_2_1$Bj6.h2.in * waste_2_1$H2_GHG + waste_2_1$Bj6.e.in * waste_2_1$Powergen_GHG) * waste_2_1$Bj6_tech
waste_2_1$Bj6_transport2emiss <- (waste_2_1$Bj6_d / 42.79 + waste_2_1$Bj6_g / 41.74 + waste_2_1$Bj6_j / 43.10) * waste_2_1$Transport_km_2 * waste_2_1$Transport_Diesel * waste_2_1$Diesel_GHG
waste_2_1$Bj6_enduseemiss <- waste_2_1$Prod_ww * waste_2_1$Nonbio_emiss2 * waste_2_1$Bj6_tech
waste_2_1$Bj6_dispemiss <- 0 - waste_2_1$Bj6_d *waste_2_1$Diesel_GHG - waste_2_1$Bj6_g* waste_2_1$Gasoline_GHG - waste_2_1$Bj6_j * waste_2_1$Jet_GHG
waste_2_1$Bj6_dispemissmain <- 0 - waste_2_1$Bj6_j * waste_2_1$Jet_GHG
waste_2_1$Bj6_dispemissco <- 0 - waste_2_1$Bj6_d * waste_2_1$Diesel_GHG - waste_2_1$Bj6_g * waste_2_1$Gasoline_GHG
waste_2_1$Bj6_netemiss <- waste_2_1$Bj6_collectionemiss + waste_2_1$Bj6_transport1emiss + waste_2_1$Bj6_processemiss + waste_2_1$Bj6_transport2emiss + waste_2_1$Bj6_enduseemiss + waste_2_1$Bj6_dispemiss
#fossil roll back (+50%)
waste_2_2 <- merge(waste_1, state_GHG_ef2)
#Collection & Transportation_1 (energy - GJ, emiss - kg CO1e)
waste_2_2$Prod_ww <- waste_2_2$Prod/(1-waste_2_2$MC/100)
waste_2_2$collection_en <- waste_2_2$Prod_ww * waste_2_2$Collection_Diesel
waste_2_2$collection_emiss <- waste_2_2$collection_en * waste_2_2$Diesel_GHG
waste_2_2$transport1_en <- waste_2_2$Prod_ww * waste_2_2$Transport_km_1 * waste_2_2$Transport_Diesel
waste_2_2$transport1_emiss <- waste_2_2$transport1_en * waste_2_2$Diesel_GHG
#E1
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2_2$E1_elec <- waste_2_2$Prod_ww * waste_2_2$E1.e.out * (1-0.065)
waste_2_2$E1_heat <- waste_2_2$Prod_ww * waste_2_2$E1.h.out * (1-0.2)
waste_2_2$E1_energy <- waste_2_2$E1_elec + waste_2_2$E1_heat
waste_2_2$E1_energymain <- waste_2_2$E1_elec
waste_2_2$E1_energyco <- waste_2_2$E1_heat
waste_2_2$E1_netenergy <- (waste_2_2$E1_energy - waste_2_2$Prod_ww * (waste_2_2$E1.e.in + waste_2_2$E1.h.in + waste_2_2$E1.d.in) - waste_2_2$collection_en - waste_2_2$transport1_en) * waste_2_2$E1_tech
waste_2_2$E1_collectionemiss <- waste_2_2$collection_emiss * waste_2_2$E1_tech
waste_2_2$E1_transport1emiss <- waste_2_2$transport1_emiss * waste_2_2$E1_tech
waste_2_2$E1_processemiss <- waste_2_2$Prod_ww * (waste_2_2$E1.e.in * waste_2_2$Powergen_GHG
+ waste_2_2$E1.h.in * waste_2_2$Heatgen_GHG
+ waste_2_2$E1.d.in * waste_2_2$Diesel_GHG
+ waste_2_2$Nonbio_emiss1) * waste_2_2$E1_tech
waste_2_2$E1_enduseemiss <- 0
waste_2_2$E1_dispemiss <- 0 - waste_2_2$E1_elec* waste_2_2$Powergen_GHG - waste_2_2$E1_heat* waste_2_2$Heatgen_GHG
waste_2_2$E1_dispemissmain <- 0 - waste_2_2$E1_elec* waste_2_2$Powergen_GHG
waste_2_2$E1_dispemissco <- 0 - waste_2_2$E1_heat* waste_2_2$Heatgen_GHG
waste_2_2$E1_netemiss <- waste_2_2$E1_collectionemiss + waste_2_2$E1_transport1emiss + waste_2_2$E1_processemiss + waste_2_2$E1_enduseemiss + waste_2_2$E1_dispemiss
#E2
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2_2$E2_elec <- waste_2_2$Prod_ww * waste_2_2$E2.e.out * (1-0.065)
waste_2_2$E2_heat <- waste_2_2$Prod_ww * waste_2_2$E2.h.out * (1-0.2)
waste_2_2$E2_energy <- waste_2_2$E2_elec + waste_2_2$E2_heat
waste_2_2$E2_energymain <- waste_2_2$E2_elec
waste_2_2$E2_energyco <- waste_2_2$E2_heat
waste_2_2$E2_netenergy <- (waste_2_2$E2_energy - waste_2_2$Prod_ww * (waste_2_2$E2.e.in + waste_2_2$E2.h.in + waste_2_2$E2.d.in) - waste_2_2$collection_en - waste_2_2$transport1_en) * waste_2_2$E2_tech
waste_2_2$E2_collectionemiss <- waste_2_2$collection_emiss * waste_2_2$E2_tech
waste_2_2$E2_transport1emiss <- waste_2_2$transport1_emiss * waste_2_2$E2_tech
waste_2_2$E2_processemiss <- waste_2_2$Prod_ww * (waste_2_2$E2.e.in * waste_2_2$Powergen_GHG
+ waste_2_2$E2.h.in * waste_2_2$Heatgen_GHG
+ waste_2_2$E2.d.in * waste_2_2$Diesel_GHG
+ waste_2_2$Nonbio_emiss1) * waste_2_2$E2_tech
waste_2_2$E2_enduseemiss <- 0
waste_2_2$E2_dispemiss <- 0 - waste_2_2$E2_elec* waste_2_2$Powergen_GHG - waste_2_2$E2_heat* waste_2_2$Heatgen_GHG
waste_2_2$E2_dispemissmain <- 0 - waste_2_2$E2_elec* waste_2_2$Powergen_GHG
waste_2_2$E2_dispemissco <- 0 - waste_2_2$E2_heat* waste_2_2$Heatgen_GHG
waste_2_2$E2_netemiss <- waste_2_2$E2_collectionemiss + waste_2_2$E2_transport1emiss + waste_2_2$E2_processemiss + waste_2_2$E2_enduseemiss + waste_2_2$E2_dispemiss
#E3
#electricity T&D loss - 6.5%
waste_2_2$E3_energy <- waste_2_2$Prod_ww * waste_2_2$E3.e.out * (1-0.065)
waste_2_2$E3_energymain <- waste_2_2$E3_energy
waste_2_2$E3_netenergy <- (waste_2_2$E3_energy - waste_2_2$Prod_ww * (waste_2_2$E3.e.in + waste_2_2$E3.h.in + waste_2_2$E3.d.in) - waste_2_2$collection_en - waste_2_2$transport1_en) * waste_2_2$E3_tech
waste_2_2$E3_collectionemiss <- waste_2_2$collection_emiss * waste_2_2$E3_tech
waste_2_2$E3_transport1emiss <- waste_2_2$transport1_emiss * waste_2_2$E3_tech
waste_2_2$E3_processemiss <- waste_2_2$Prod_ww * (waste_2_2$E3.e.in * waste_2_2$Powergen_GHG
+ waste_2_2$E3.h.in * waste_2_2$Heatgen_GHG
+ waste_2_2$E3.d.in * waste_2_2$Diesel_GHG
+ waste_2_2$Nonbio_emiss1) * waste_2_2$E3_tech
waste_2_2$E3_enduseemiss <- 0
waste_2_2$E3_dispemiss <- 0 - waste_2_2$E3_energy* waste_2_2$Powergen_GHG
waste_2_2$E3_dispemissmain <- 0 - waste_2_2$E3_energy* waste_2_2$Powergen_GHG
waste_2_2$E3_netemiss <- waste_2_2$E3_collectionemiss + waste_2_2$E3_transport1emiss + waste_2_2$E3_processemiss + waste_2_2$E3_enduseemiss + waste_2_2$E3_dispemiss
#E4
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2_2$E4_elec <- waste_2_2$Prod_ww * waste_2_2$E4.e.out * (1-0.065)
waste_2_2$E4_heat <- waste_2_2$Prod_ww * waste_2_2$E4.h.out * (1-0.2)
waste_2_2$E4_energy <- waste_2_2$E4_elec + waste_2_2$E4_heat
waste_2_2$E4_energymain <- waste_2_2$E4_elec
waste_2_2$E4_energyco <- waste_2_2$E4_heat
waste_2_2$E4_netenergy <- (waste_2_2$E4_energy - waste_2_2$Prod_ww * (waste_2_2$E4.ng.in + waste_2_2$E4.d.in) - waste_2_2$collection_en - waste_2_2$transport1_en) * waste_2_2$E4_tech
waste_2_2$E4_collectionemiss <- waste_2_2$collection_emiss * waste_2_2$E4_tech
waste_2_2$E4_transport1emiss <- waste_2_2$transport1_emiss * waste_2_2$E4_tech
waste_2_2$E4_processemiss <- waste_2_2$Prod_ww * (waste_2_2$E4.ng.in * waste_2_2$NG_GHG
+ waste_2_2$E4.d.in * waste_2_2$Diesel_GHG
+ waste_2_2$Nonbio_emiss1) * waste_2_2$E4_tech
waste_2_2$E4_enduseemiss <- 0
waste_2_2$E4_dispemiss <- 0 - waste_2_2$E4_elec* waste_2_2$Powergen_GHG - waste_2_2$E4_heat* waste_2_2$Heatgen_GHG
waste_2_2$E4_dispemissmain <- 0 - waste_2_2$E4_elec* waste_2_2$Powergen_GHG
waste_2_2$E4_dispemissco <- 0 - waste_2_2$E4_heat* waste_2_2$Heatgen_GHG
waste_2_2$E4_netemiss <- waste_2_2$E4_collectionemiss + waste_2_2$E4_transport1emiss + waste_2_2$E4_processemiss + waste_2_2$E4_enduseemiss + waste_2_2$E4_dispemiss
#M1
#methane leakage - 2%
waste_2_2$M1_energy <- waste_2_2$Prod_ww * waste_2_2$M1.m.out * (1-0.02)
waste_2_2$M1_energymain <- waste_2_2$M1_energy
waste_2_2$M1_netenergy <- (waste_2_2$M1_energy - waste_2_2$Prod_ww * (waste_2_2$M1.e.in + waste_2_2$M1.h.in) - waste_2_2$collection_en - waste_2_2$transport1_en) * waste_2_2$M1_tech
waste_2_2$M1_collectionemiss <- waste_2_2$collection_emiss * waste_2_2$M1_tech
waste_2_2$M1_transport1emiss <- waste_2_2$transport1_emiss * waste_2_2$M1_tech
waste_2_2$M1_processemiss <- waste_2_2$Prod_ww * (waste_2_2$M1.e.in * waste_2_2$Powergen_GHG + waste_2_2$M1.h.in * waste_2_2$Heatgen_GHG)* waste_2_2$M1_tech
waste_2_2$M1_transport2emiss <- waste_2_2$Prod_ww * waste_2_2$M1.m.out *0.02 /50 *28 * waste_2_2$M1_tech
waste_2_2$M1_enduseemiss <- waste_2_2$Prod_ww * waste_2_2$Nonbio_emiss2 * waste_2_2$M1_tech
waste_2_2$M1_dispemiss <- 0 - waste_2_2$M1_energy* waste_2_2$NG_GHG
waste_2_2$M1_dispemissmain <- waste_2_2$M1_dispemiss
waste_2_2$M1_netemiss <- waste_2_2$M1_collectionemiss + waste_2_2$M1_transport1emiss + waste_2_2$M1_processemiss + waste_2_2$M1_transport2emiss + waste_2_2$M1_enduseemiss + waste_2_2$M1_dispemiss
#M2
#methane leakage - 2%
waste_2_2$M2_energy <- waste_2_2$Prod_ww * waste_2_2$M2.m.out * (1-0.02)
waste_2_2$M2_energymain <- waste_2_2$M2_energy
waste_2_2$M2_netenergy <- (waste_2_2$M2_energy - waste_2_2$Prod_ww * (waste_2_2$M2.e.in + waste_2_2$M2.h.in + waste_2_2$M2.d.in) - waste_2_2$collection_en - waste_2_2$transport1_en) * waste_2_2$M2_tech
waste_2_2$M2_collectionemiss <- waste_2_2$collection_emiss * waste_2_2$M2_tech
waste_2_2$M2_transport1emiss <- waste_2_2$transport1_emiss * waste_2_2$M2_tech
waste_2_2$M2_processemiss <- waste_2_2$Prod_ww * (waste_2_2$M2.e.in * waste_2_2$Powergen_GHG +
waste_2_2$M2.h.in * waste_2_2$Heatgen_GHG +
waste_2_2$M2.d.in * waste_2_2$Diesel_GHG) * waste_2_2$M2_tech
waste_2_2$M2_transport2emiss <- waste_2_2$Prod_ww * waste_2_2$M2.m.out *0.02 /50 *28 * waste_2_2$M1_tech
waste_2_2$M2_enduseemiss <- waste_2_2$Prod_ww * waste_2_2$Nonbio_emiss2 * waste_2_2$M2_tech
waste_2_2$M2_dispemiss <- 0 - waste_2_2$M2_energy* waste_2_2$NG_GHG
waste_2_2$M2_dispemissmain <- waste_2_2$M2_dispemiss
waste_2_2$M2_netemiss <- waste_2_2$M2_collectionemiss + waste_2_2$M2_transport1emiss + waste_2_2$M2_processemiss + waste_2_2$M2_transport2emiss + waste_2_2$M2_enduseemiss + waste_2_2$M2_dispemiss
#Eth1
#energy intensity of ethanol - 26.95 MJ/kg
waste_2_2$Eth1_elec <- waste_2_2$Prod_ww * waste_2_2$Eth1.e.out * (1-0.065)
waste_2_2$Eth1_eth <- waste_2_2$Prod_ww * waste_2_2$Eth1.eth.out
waste_2_2$Eth1_energy <- waste_2_2$Eth1_elec + waste_2_2$Eth1_eth
waste_2_2$Eth1_energymain <- waste_2_2$Eth1_eth
waste_2_2$Eth1_energyco <- waste_2_2$Eth1_elec
waste_2_2$Eth1_netenergy <- (waste_2_2$Eth1_energy - waste_2_2$Prod_ww * (waste_2_2$Eth1.ng.in + waste_2_2$Eth1.d.in) -
(waste_2_2$collection_en + waste_2_2$transport1_en) * waste_2_2$M1_tech -
waste_2_2$Eth1_eth / 26.95 * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel) * waste_2_2$Eth1_tech
waste_2_2$Eth1_collectionemiss <- waste_2_2$collection_emiss * waste_2_2$Eth1_tech
waste_2_2$Eth1_transport1emiss <- waste_2_2$transport1_emiss * waste_2_2$Eth1_tech
waste_2_2$Eth1_processemiss <- waste_2_2$Prod_ww * (waste_2_2$Eth1.ng.in * waste_2_2$Heatgen_GHG + waste_2_2$Eth1.d.in * waste_2_2$Diesel_GHG) * waste_2_2$Eth1_tech
waste_2_2$Eth1_transport2emiss <- waste_2_2$Eth1_eth / 26.95 * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel * waste_2_2$Diesel_GHG
waste_2_2$Eth1_enduseemiss <- waste_2_2$Prod_ww * waste_2_2$Nonbio_emiss2 * waste_2_2$Eth1_tech
waste_2_2$Eth1_dispemiss <- 0 - waste_2_2$Eth1_eth* waste_2_2$Gasoline_GHG - waste_2_2$Eth1_elec * waste_2_2$Powergen_GHG
waste_2_2$Eth1_dispemissmain <- 0 - waste_2_2$Eth1_eth* waste_2_2$Gasoline_GHG
waste_2_2$Eth1_dispemissco <- 0 - waste_2_2$Eth1_elec * waste_2_2$Powergen_GHG
waste_2_2$Eth1_netemiss <- waste_2_2$Eth1_collectionemiss + waste_2_2$Eth1_transport1emiss + waste_2_2$Eth1_processemiss + waste_2_2$Eth1_transport2emiss + waste_2_2$Eth1_enduseemiss + waste_2_2$Eth1_dispemiss
#Rd1
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
#electricity T&D loss - 6.5%, methane leakage - 2%
waste_2_2$Rd1_d <- waste_2_2$Prod_ww * waste_2_2$Rd1.d.out
waste_2_2$Rd1_g <- waste_2_2$Prod_ww * waste_2_2$Rd1.g.out
waste_2_2$Rd1_j <- waste_2_2$Prod_ww * waste_2_2$Rd1.j.out
waste_2_2$Rd1_m <- waste_2_2$Prod_ww * waste_2_2$Rd1.m.out * (1-0.02)
waste_2_2$Rd1_elec <- waste_2_2$Prod_ww * waste_2_2$Rd1.e.out * (1-0.065)
waste_2_2$Rd1_energy <- waste_2_2$Rd1_d + waste_2_2$Rd1_g + waste_2_2$Rd1_j + waste_2_2$Rd1_m + waste_2_2$Rd1_elec
waste_2_2$Rd1_energymain <- waste_2_2$Rd1_d
waste_2_2$Rd1_energyco <- waste_2_2$Rd1_g + waste_2_2$Rd1_j + waste_2_2$Rd1_m + waste_2_2$Rd1_elec
waste_2_2$Rd1_netenergy <-(waste_2_2$Rd1_energy - waste_2_2$Prod_ww * waste_2_2$Rd1.e.in -
(waste_2_2$collection_en + waste_2_2$transport1_en) -
waste_2_2$Rd1_d / 42.79 * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel -
waste_2_2$Rd1_g / 41.74 * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel -
waste_2_2$Rd1_j / 43.10 * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel ) * waste_2_2$Rd1_tech
waste_2_2$Rd1_collectionemiss <- waste_2_2$collection_emiss * waste_2_2$Rd1_tech
waste_2_2$Rd1_transport1emiss <- waste_2_2$transport1_emiss * waste_2_2$Rd1_tech
waste_2_2$Rd1_processemiss <- waste_2_2$Prod_ww * waste_2_2$Rd1.e.in * waste_2_2$Powergen_GHG * waste_2_2$Rd1_tech
waste_2_2$Rd1_transport2emiss <- (waste_2_2$Rd1_d / 42.79 + waste_2_2$Rd1_g / 41.74 + waste_2_2$Rd1_j / 43.10) * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel * waste_2_2$Diesel_GHG
waste_2_2$Rd1_enduseemiss <- waste_2_2$Prod_ww * waste_2_2$Nonbio_emiss2 * waste_2_2$Rd1_tech
waste_2_2$Rd1_dispemiss <- 0 - waste_2_2$Rd1_d *waste_2_2$Diesel_GHG - waste_2_2$Rd1_g * waste_2_2$Gasoline_GHG - waste_2_2$Rd1_j * waste_2_2$Jet_GHG -
waste_2_2$Rd1_m * waste_2_2$NG_GHG - waste_2_2$Rd1_elec * waste_2_2$Powergen_GHG
waste_2_2$Rd1_dispemissmain <- 0 - waste_2_2$Rd1_d *waste_2_2$Diesel_GHG
waste_2_2$Rd1_dispemissco <- 0 - waste_2_2$Rd1_g * waste_2_2$Gasoline_GHG - waste_2_2$Rd1_j * waste_2_2$Jet_GHG -
waste_2_2$Rd1_m * waste_2_2$NG_GHG - waste_2_2$Rd1_elec * waste_2_2$Powergen_GHG
waste_2_2$Rd1_netemiss <- waste_2_2$Rd1_collectionemiss + waste_2_2$Rd1_transport1emiss + waste_2_2$Rd1_processemiss + waste_2_2$Rd1_enduseemiss + waste_2_2$Rd1_transport2emiss + waste_2_2$Rd1_dispemiss
#Rd2
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74
waste_2_2$Rd2_d <- waste_2_2$Prod_ww * waste_2_2$Rd2.d.out
waste_2_2$Rd2_g <- waste_2_2$Prod_ww * waste_2_2$Rd2.g.out
waste_2_2$Rd2_energy <- waste_2_2$Rd2_d + waste_2_2$Rd2_g
waste_2_2$Rd2_energymain <- waste_2_2$Rd2_d
waste_2_2$Rd2_energyco <- waste_2_2$Rd2_g
waste_2_2$Rd2_netenergy <-(waste_2_2$Rd2_energy - waste_2_2$Prod_ww * (waste_2_2$Rd2.e.in + waste_2_2$Rd2.ng.in) -
(waste_2_2$collection_en + waste_2_2$transport1_en) -
waste_2_2$Rd2_d / 42.79 * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel -
waste_2_2$Rd2_g / 41.74 * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel ) * waste_2_2$Rd2_tech
waste_2_2$Rd2_collectionemiss <- waste_2_2$collection_emiss * waste_2_2$Rd2_tech
waste_2_2$Rd2_transport1emiss <- waste_2_2$transport1_emiss * waste_2_2$Rd2_tech
waste_2_2$Rd2_processemiss <- waste_2_2$Prod_ww * (waste_2_2$Rd2.e.in * waste_2_2$Powergen_GHG + waste_2_2$Rd2.ng.in * waste_2_2$H2_GHG) * waste_2_2$Rd2_tech
waste_2_2$Rd2_transport2emiss <- (waste_2_2$Rd2_d / 42.79 + waste_2_2$Rd2_g / 41.74) * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel * waste_2_2$Diesel_GHG
waste_2_2$Rd2_enduseemiss <- waste_2_2$Prod_ww * waste_2_2$Nonbio_emiss2 * waste_2_2$Rd2_tech
waste_2_2$Rd2_dispemiss <- 0 - waste_2_2$Rd2_d *waste_2_2$Diesel_GHG - waste_2_2$Rd2_g* waste_2_2$Gasoline_GHG
waste_2_2$Rd2_dispemissmain <- 0 - waste_2_2$Rd2_d *waste_2_2$Diesel_GHG
waste_2_2$Rd2_dispemissco <- 0 - waste_2_2$Rd2_g* waste_2_2$Gasoline_GHG
waste_2_2$Rd2_netemiss <- waste_2_2$Rd2_collectionemiss + waste_2_2$Rd2_transport1emiss + waste_2_2$Rd2_processemiss + waste_2_2$Rd2_transport2emiss + waste_2_2$Rd2_enduseemiss + waste_2_2$Rd2_dispemiss
#Bj1
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_2$Bj1_d <- waste_2_2$Prod_ww * waste_2_2$Bj1.d.out
waste_2_2$Bj1_g <- waste_2_2$Prod_ww * waste_2_2$Bj1.g.out
waste_2_2$Bj1_j <- waste_2_2$Prod_ww * waste_2_2$Bj1.j.out
waste_2_2$Bj1_energy <- waste_2_2$Bj1_d + waste_2_2$Bj1_g + waste_2_2$Bj1_j
waste_2_2$Bj1_energymain <- waste_2_2$Bj1_d
waste_2_2$Bj1_energyco <- waste_2_2$Bj1_g + waste_2_2$Bj1_j
waste_2_2$Bj1_netenergy <-(waste_2_2$Bj1_energy - waste_2_2$Prod_ww * waste_2_2$Bj1.h2.in -
(waste_2_2$collection_en + waste_2_2$transport1_en) -
(waste_2_2$Bj1_d / 42.79 + waste_2_2$Bj1_g / 41.74 +waste_2_2$Bj1_j / 43.10) * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel) * waste_2_2$Bj1_tech
waste_2_2$Bj1_collectionemiss <- waste_2_2$collection_emiss * waste_2_2$Bj1_tech
waste_2_2$Bj1_transport1emiss <- waste_2_2$transport1_emiss * waste_2_2$Bj1_tech
waste_2_2$Bj1_processemiss <- waste_2_2$Prod_ww * waste_2_2$Bj1.h2.in * waste_2_2$H2_GHG * waste_2_2$Bj1_tech
waste_2_2$Bj1_transport2emiss <- (waste_2_2$Bj1_d / 42.79 + waste_2_2$Bj1_g / 41.74 + waste_2_2$Bj1_j / 43.10) * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel * waste_2_2$Diesel_GHG
waste_2_2$Bj1_enduseemiss <- waste_2_2$Prod_ww * waste_2_2$Nonbio_emiss2 * waste_2_2$Bj1_tech
waste_2_2$Bj1_dispemiss <- 0 - waste_2_2$Bj1_d *waste_2_2$Diesel_GHG - waste_2_2$Bj1_g* waste_2_2$Gasoline_GHG - waste_2_2$Bj1_j * waste_2_2$Jet_GHG
waste_2_2$Bj1_dispemissmain <- 0 - waste_2_2$Bj1_d *waste_2_2$Diesel_GHG
waste_2_2$Bj1_dispemissco <- 0 - waste_2_2$Bj1_g* waste_2_2$Gasoline_GHG - waste_2_2$Bj1_j * waste_2_2$Jet_GHG
waste_2_2$Bj1_netemiss <- waste_2_2$Bj1_collectionemiss + waste_2_2$Bj1_transport1emiss + waste_2_2$Bj1_processemiss + waste_2_2$Bj1_transport2emiss + waste_2_2$Bj1_enduseemiss + waste_2_2$Bj1_dispemiss
#Bj2
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_2$Bj2_energy <- waste_2_2$Prod_ww * waste_2_2$Bj2.j.out
waste_2_2$Bj2_energymain <- waste_2_2$Bj1_energy
waste_2_2$Bj2_netenergy <-(waste_2_2$Bj2_energy - waste_2_2$Prod_ww * waste_2_2$Bj2.h2.in -
(waste_2_2$collection_en + waste_2_2$transport1_en) -
waste_2_2$Bj2_energy / 43.10 * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel ) * waste_2_2$Bj2_tech
waste_2_2$Bj2_collectionemiss <- waste_2_2$collection_emiss * waste_2_2$Bj2_tech
waste_2_2$Bj2_transport1emiss <- waste_2_2$transport1_emiss * waste_2_2$Bj2_tech
waste_2_2$Bj2_processemiss <- waste_2_2$Prod_ww * waste_2_2$Bj2.h2.in * waste_2_2$H2_GHG
waste_2_2$Bj2_transport2emiss <- waste_2_2$Bj2_energy / 43.10 * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel * waste_2_2$Diesel_GHG
waste_2_2$Bj2_enduseemiss <- waste_2_2$Prod_ww * waste_2_2$Nonbio_emiss2 * waste_2_2$Bj2_tech
waste_2_2$Bj2_dispemiss <- 0 - waste_2_2$Bj2_energy * waste_2_2$Jet_GHG
waste_2_2$Bj2_dispemissmain <- waste_2_2$Bj2_dispemiss
waste_2_2$Bj2_netemiss <- waste_2_2$Bj2_collectionemiss + waste_2_2$Bj2_transport1emiss + waste_2_2$Bj2_processemiss + waste_2_2$Bj2_transport2emiss + waste_2_2$Bj2_enduseemiss + waste_2_2$Bj2_dispemiss
#Bj3
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_2$Bj3_d <- waste_2_2$Prod_ww * waste_2_2$Bj3.d.out
waste_2_2$Bj3_g <- waste_2_2$Prod_ww * waste_2_2$Bj3.g.out
waste_2_2$Bj3_j <- waste_2_2$Prod_ww * waste_2_2$Bj3.j.out
waste_2_2$Bj3_energy <- waste_2_2$Bj3_d + waste_2_2$Bj3_g + waste_2_2$Bj3_j
waste_2_2$Bj3_energymain <- waste_2_2$Bj3_j
waste_2_2$Bj3_energyco <- waste_2_2$Bj3_d + waste_2_2$Bj3_g
waste_2_2$Bj3_netenergy <-(waste_2_2$Bj3_energy - waste_2_2$Prod_ww * waste_2_2$Bj3.e.in -
(waste_2_2$collection_en + waste_2_2$transport1_en) -
(waste_2_2$Bj3_d / 42.79 + waste_2_2$Bj3_g / 41.74 +waste_2_2$Bj3_j / 43.10) * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel) * waste_2_2$Bj3_tech
waste_2_2$Bj3_collectionemiss <- waste_2_2$collection_emiss * waste_2_2$Bj3_tech
waste_2_2$Bj3_transport1emiss <- waste_2_2$transport1_emiss * waste_2_2$Bj3_tech
waste_2_2$Bj3_processemiss <- waste_2_2$Prod_ww * waste_2_2$Bj3.e.in * waste_2_2$Powergen_GHG * waste_2_2$Bj3_tech
waste_2_2$Bj3_transport2emiss <- (waste_2_2$Bj3_d / 42.79 + waste_2_2$Bj3_g / 41.74 + waste_2_2$Bj3_j / 43.10) * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel * waste_2_2$Diesel_GHG
waste_2_2$Bj3_enduseemiss <- waste_2_2$Prod_ww * waste_2_2$Nonbio_emiss2 * waste_2_2$Bj3_tech
waste_2_2$Bj3_dispemiss <- 0 - waste_2_2$Bj3_d *waste_2_2$Diesel_GHG - waste_2_2$Bj3_g* waste_2_2$Gasoline_GHG - waste_2_2$Bj3_j * waste_2_2$Jet_GHG
waste_2_2$Bj3_dispemissmain <- 0 - waste_2_2$Bj3_j * waste_2_2$Jet_GHG
waste_2_2$Bj3_dispemissco <- 0 - waste_2_2$Bj3_d * waste_2_2$Diesel_GHG - waste_2_2$Bj3_g * waste_2_2$Gasoline_GHG
waste_2_2$Bj3_netemiss <- waste_2_2$Bj3_collectionemiss + waste_2_2$Bj3_transport1emiss + waste_2_2$Bj3_processemiss + waste_2_2$Bj3_transport2emiss + waste_2_2$Bj3_enduseemiss + waste_2_2$Bj3_dispemiss
#Bj4
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_2$Bj4_d <- waste_2_2$Prod_ww * waste_2_2$Bj4.d.out
waste_2_2$Bj4_g <- waste_2_2$Prod_ww * waste_2_2$Bj4.g.out
waste_2_2$Bj4_j <- waste_2_2$Prod_ww * waste_2_2$Bj4.j.out
waste_2_2$Bj4_energy <- waste_2_2$Bj4_d + waste_2_2$Bj4_g + waste_2_2$Bj4_j
waste_2_2$Bj4_energymain <- waste_2_2$Bj4_j
waste_2_2$Bj4_energyco <- waste_2_2$Bj4_d + waste_2_2$Bj4_g
waste_2_2$Bj4_netenergy <-(waste_2_2$Bj4_energy - waste_2_2$Prod_ww * waste_2_2$Bj4.h2.in -
(waste_2_2$collection_en + waste_2_2$transport1_en) -
(waste_2_2$Bj4_d / 42.79 + waste_2_2$Bj4_g / 41.74 +waste_2_2$Bj4_j / 43.10) * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel) * waste_2_2$Bj4_tech
waste_2_2$Bj4_collectionemiss <- waste_2_2$collection_emiss * waste_2_2$Bj4_tech
waste_2_2$Bj4_transport1emiss <- waste_2_2$transport1_emiss * waste_2_2$Bj4_tech
waste_2_2$Bj4_processemiss <- waste_2_2$Prod_ww * waste_2_2$Bj4.h2.in * waste_2_2$H2_GHG * waste_2_2$Bj4_tech
waste_2_2$Bj4_transport2emiss <- (waste_2_2$Bj4_d / 42.79 + waste_2_2$Bj4_g / 41.74 + waste_2_2$Bj4_j / 43.10) * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel * waste_2_2$Diesel_GHG
waste_2_2$Bj4_enduseemiss <- waste_2_2$Prod_ww * waste_2_2$Nonbio_emiss2 * waste_2_2$Bj4_tech
waste_2_2$Bj4_dispemiss <- 0 - waste_2_2$Bj4_d *waste_2_2$Diesel_GHG - waste_2_2$Bj4_g* waste_2_2$Gasoline_GHG - waste_2_2$Bj4_j * waste_2_2$Jet_GHG
waste_2_2$Bj4_dispemissmain <- 0 - waste_2_2$Bj4_j * waste_2_2$Jet_GHG
waste_2_2$Bj4_dispemissco <- 0 - waste_2_2$Bj4_d * waste_2_2$Diesel_GHG - waste_2_2$Bj4_g * waste_2_2$Gasoline_GHG
waste_2_2$Bj4_netemiss <- waste_2_2$Bj4_collectionemiss + waste_2_2$Bj4_transport1emiss + waste_2_2$Bj4_processemiss + waste_2_2$Bj4_transport2emiss+ waste_2_2$Bj4_enduseemiss + waste_2_2$Bj4_dispemiss
#Bj5
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_2$Bj5_d <- waste_2_2$Prod_ww * waste_2_2$Bj5.d.out
waste_2_2$Bj5_g <- waste_2_2$Prod_ww * waste_2_2$Bj5.g.out
waste_2_2$Bj5_j <- waste_2_2$Prod_ww * waste_2_2$Bj5.j.out
waste_2_2$Bj5_energy <- waste_2_2$Bj5_d + waste_2_2$Bj5_g + waste_2_2$Bj5_j
waste_2_2$Bj5_energymain <- waste_2_2$Bj5_j
waste_2_2$Bj5_energyco <- waste_2_2$Bj5_d + waste_2_2$Bj5_g
waste_2_2$Bj5_netenergy <-(waste_2_2$Bj5_energy - waste_2_2$Prod_ww * waste_2_2$Bj5.e.in -
(waste_2_2$collection_en + waste_2_2$transport1_en) -
(waste_2_2$Bj5_d / 42.79 + waste_2_2$Bj5_g / 41.74 +waste_2_2$Bj5_j / 43.10) * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel) * waste_2_2$Bj5_tech
waste_2_2$Bj5_collectionemiss <- waste_2_2$collection_emiss * waste_2_2$Bj5_tech
waste_2_2$Bj5_transport1emiss <- waste_2_2$transport1_emiss * waste_2_2$Bj5_tech
waste_2_2$Bj5_processemiss <- waste_2_2$Prod_ww * waste_2_2$Bj5.e.in * waste_2_2$Powergen_GHG * waste_2_2$Bj5_tech
waste_2_2$Bj5_transport2emiss <- (waste_2_2$Bj5_d / 42.79 + waste_2_2$Bj5_g / 41.74 + waste_2_2$Bj5_j / 43.10) * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel * waste_2_2$Diesel_GHG
waste_2_2$Bj5_enduseemiss <- waste_2_2$Prod_ww * waste_2_2$Nonbio_emiss2 * waste_2_2$Bj5_tech
waste_2_2$Bj5_dispemiss <- 0 - waste_2_2$Bj5_d *waste_2_2$Diesel_GHG - waste_2_2$Bj5_g* waste_2_2$Gasoline_GHG - waste_2_2$Bj5_j * waste_2_2$Jet_GHG
waste_2_2$Bj5_dispemissmain <- 0 - waste_2_2$Bj5_j * waste_2_2$Jet_GHG
waste_2_2$Bj5_dispemissco <- 0 - waste_2_2$Bj5_d * waste_2_2$Diesel_GHG - waste_2_2$Bj5_g * waste_2_2$Gasoline_GHG
waste_2_2$Bj5_netemiss <- waste_2_2$Bj5_collectionemiss + waste_2_2$Bj5_transport1emiss + waste_2_2$Bj5_processemiss + waste_2_2$Bj5_transport2emiss + waste_2_2$Bj5_enduseemiss + waste_2_2$Bj5_dispemiss
#Bj6
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_2$Bj6_d <- waste_2_2$Prod_ww * waste_2_2$Bj6.d.out
waste_2_2$Bj6_g <- waste_2_2$Prod_ww * waste_2_2$Bj6.g.out
waste_2_2$Bj6_j <- waste_2_2$Prod_ww * waste_2_2$Bj6.j.out
waste_2_2$Bj6_energy <- waste_2_2$Bj6_d + waste_2_2$Bj6_g + waste_2_2$Bj6_j
waste_2_2$Bj6_energymain <- waste_2_2$Bj6_j
waste_2_2$Bj6_energyco <- waste_2_2$Bj6_d + waste_2_2$Bj6_g
waste_2_2$Bj6_netenergy <-(waste_2_2$Bj6_energy - waste_2_2$Prod_ww * waste_2_2$Bj6.h2.in - waste_2_2$Prod_ww * waste_2_2$Bj6.e.in -
(waste_2_2$collection_en + waste_2_2$transport1_en) -
(waste_2_2$Bj6_d / 42.79 + waste_2_2$Bj6_g / 41.74 +waste_2_2$Bj6_j / 43.10) * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel) * waste_2_2$Bj6_tech
waste_2_2$Bj6_collectionemiss <- waste_2_2$collection_emiss * waste_2_2$Bj6_tech
waste_2_2$Bj6_transport1emiss <- waste_2_2$transport1_emiss * waste_2_2$Bj6_tech
waste_2_2$Bj6_processemiss <- waste_2_2$Prod_ww * (waste_2_2$Bj6.h2.in * waste_2_2$H2_GHG + waste_2_2$Bj6.e.in * waste_2_2$Powergen_GHG) * waste_2_2$Bj6_tech
waste_2_2$Bj6_transport2emiss <- (waste_2_2$Bj6_d / 42.79 + waste_2_2$Bj6_g / 41.74 + waste_2_2$Bj6_j / 43.10) * waste_2_2$Transport_km_2 * waste_2_2$Transport_Diesel * waste_2_2$Diesel_GHG
waste_2_2$Bj6_enduseemiss <- waste_2_2$Prod_ww * waste_2_2$Nonbio_emiss2 * waste_2_2$Bj6_tech
waste_2_2$Bj6_dispemiss <- 0 - waste_2_2$Bj6_d *waste_2_2$Diesel_GHG - waste_2_2$Bj6_g* waste_2_2$Gasoline_GHG - waste_2_2$Bj6_j * waste_2_2$Jet_GHG
waste_2_2$Bj6_dispemissmain <- 0 - waste_2_2$Bj6_j * waste_2_2$Jet_GHG
waste_2_2$Bj6_dispemissco <- 0 - waste_2_2$Bj6_d * waste_2_2$Diesel_GHG - waste_2_2$Bj6_g * waste_2_2$Gasoline_GHG
waste_2_2$Bj6_netemiss <- waste_2_2$Bj6_collectionemiss + waste_2_2$Bj6_transport1emiss + waste_2_2$Bj6_processemiss + waste_2_2$Bj6_transport2emiss + waste_2_2$Bj6_enduseemiss + waste_2_2$Bj6_dispemiss
##extracting net emissions
waste_sen1 <- subset(waste_2_1, select = c(Waste_type, Prod_ww, E1_netemiss,E2_netemiss,E3_netemiss,E4_netemiss,M1_netemiss,
M2_netemiss,Eth1_netemiss, Rd1_netemiss,Rd2_netemiss,Bj1_netemiss,Bj2_netemiss,
Bj3_netemiss,Bj4_netemiss,Bj5_netemiss, Bj6_netemiss))
waste_sen1_1 <- aggregate(.~Waste_type, waste_sen1, sum)
waste_sen1_1 <- gather(waste_sen1_1, category, emiss_kg, E1_netemiss:Bj6_netemiss)
waste_sen1_1$category <- gsub("emiss", "", waste_sen1_1$category)
waste_sen1_1 <- separate(data = waste_sen1_1, col = category, into = c("Tech", "Stage"), sep = "\\_")
waste_sen1_1$emiss_MT_per <- waste_sen1_1$emiss_kg / waste_sen1_1$Prod_ww / 1000
waste_sen1_1$Tech <- factor(waste_sen1_1$Tech,
levels = c("E1", "E2","E3","E4", "M1", "M2", "Eth1", "Rd1", "Rd2",
"Bj1", "Bj2", "Bj3", "Bj4", "Bj5", "Bj6"))
waste_sen1_1$Power_gen <- "Cleaner power (-50% carbon intensity)"
waste_sen2 <- subset(waste_2_2, select = c(Waste_type, Prod_ww, E1_netemiss,E2_netemiss,E3_netemiss,E4_netemiss,M1_netemiss,
M2_netemiss,Eth1_netemiss, Rd1_netemiss,Rd2_netemiss,Bj1_netemiss,Bj2_netemiss,
Bj3_netemiss,Bj4_netemiss,Bj5_netemiss, Bj6_netemiss))
waste_sen2_1 <- aggregate(.~Waste_type, waste_sen2, sum)
waste_sen2_1 <- gather(waste_sen2_1, category, emiss_kg, E1_netemiss:Bj6_netemiss)
waste_sen2_1$category <- gsub("emiss", "", waste_sen2_1$category)
waste_sen2_1 <- separate(data = waste_sen2_1, col = category, into = c("Tech", "Stage"), sep = "\\_")
waste_sen2_1$emiss_MT_per <- waste_sen2_1$emiss_kg / waste_sen2_1$Prod_ww / 1000
waste_sen2_1$Tech <- factor(waste_sen2_1$Tech,
levels = c("E1", "E2","E3","E4", "M1", "M2", "Eth1", "Rd1", "Rd2",
"Bj1", "Bj2", "Bj3", "Bj4", "Bj5", "Bj6"))
waste_sen2_1$Power_gen <- "Fossil rollback (+50% carbon intensity)"
waste_sen3 <- subset(waste_2, select = c(Waste_type, Prod_ww, E1_netemiss,E2_netemiss,E3_netemiss,E4_netemiss,M1_netemiss,
M2_netemiss,Eth1_netemiss, Rd1_netemiss,Rd2_netemiss,Bj1_netemiss,Bj2_netemiss,
Bj3_netemiss,Bj4_netemiss,Bj5_netemiss, Bj6_netemiss))
waste_sen3_1 <- aggregate(.~Waste_type, waste_sen3, sum)
waste_sen3_1 <- gather(waste_sen3_1, category, emiss_kg, E1_netemiss:Bj6_netemiss)
waste_sen3_1$category <- gsub("emiss", "", waste_sen3_1$category)
waste_sen3_1 <- separate(data = waste_sen3_1, col = category, into = c("Tech", "Stage"), sep = "\\_")
waste_sen3_1$emiss_MT_per <- waste_sen3_1$emiss_kg / waste_sen3_1$Prod_ww / 1000
waste_sen3_1$Tech <- factor(waste_sen3_1$Tech,
levels = c("E1", "E2","E3","E4", "M1", "M2", "Eth1", "Rd1", "Rd2",
"Bj1", "Bj2", "Bj3", "Bj4", "Bj5", "Bj6"))
waste_sen3_1$Power_gen <- "Current state power grids"
waste_sen <- rbind(waste_sen1_1[ , -which(names(waste_sen1_1) == "Stage")], waste_sen2_1[ , -which(names(waste_sen2_1) == "Stage")],
waste_sen3_1[ , -which(names(waste_sen3_1) == "Stage")])
waste_sen$Power_gen <- factor(waste_sen$Power_gen,
levels = c("Cleaner power (-50% carbon intensity)", "Current state power grids","Fossil rollback (+50% carbon intensity)"))
#sensitivity chart
colors_power <- c("Cleaner power (-50% carbon intensity)" = "#7FC97F",
"Current state power grids" = "#386CB0",
"Fossil rollback (+50% carbon intensity)" = "grey50")
p <- ggplot()+
geom_bar(data = waste_sen[which(waste_sen$emiss_MT_per != 0),],
aes(x=Tech, y=emiss_MT_per, fill=Power_gen), stat="identity", position = "dodge") +
geom_hline(yintercept=0, size=0.05)+
theme_bw() +
theme(text = element_text(size=20)) +
scale_fill_manual(values=colors_power) +
guides(fill = guide_legend(title = "", label.theme = element_text(size = 20, angle = 0))) +
scale_y_continuous(name="Metric ton CO2e/Mg ww", limits = c(-1.5, 1)) +
labs(x = '') +
theme(legend.position="top",
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.3),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
facet_grid(~Waste_type, scales = "free_x", space = "free_x")+
theme(strip.background =element_rect(fill="white"), strip.text = element_text(size = 20, face = "bold"))
#ggtitle("Net GHG emissions from waste biomass utilization - sensitivity analysis") +
#theme(plot.title = element_text(face = "bold", size = 14, hjust = 0))
ggsave(paste(County_FOLDER, "/US_netemiss_SA.png", sep=""), plot=p, width=20,height=6,units="in",dpi=300)
##sensitivity analysis on transport distance
# TD = 25km
coefficients1 <- merge(coefficients1, tech)
waste_1_1 <- merge(waste, coefficients1)
waste_2_3 <- merge(waste_1_1, state_GHG_ef)
#Collection & Transportation_1 (energy - GJ, emiss - kg CO1e)
waste_2_3$Prod_ww <- waste_2_3$Prod/(1-waste_2_3$MC/100)
waste_2_3$collection_en <- waste_2_3$Prod_ww * waste_2_3$Collection_Diesel
waste_2_3$collection_emiss <- waste_2_3$collection_en * waste_2_3$Diesel_GHG
waste_2_3$transport1_en <- waste_2_3$Prod_ww * waste_2_3$Transport_km_1 * waste_2_3$Transport_Diesel
waste_2_3$transport1_emiss <- waste_2_3$transport1_en * waste_2_3$Diesel_GHG
#E1
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2_3$E1_elec <- waste_2_3$Prod_ww * waste_2_3$E1.e.out * (1-0.065)
waste_2_3$E1_heat <- waste_2_3$Prod_ww * waste_2_3$E1.h.out * (1-0.2)
waste_2_3$E1_energy <- waste_2_3$E1_elec + waste_2_3$E1_heat
waste_2_3$E1_energymain <- waste_2_3$E1_elec
waste_2_3$E1_energyco <- waste_2_3$E1_heat
waste_2_3$E1_netenergy <- (waste_2_3$E1_energy - waste_2_3$Prod_ww * (waste_2_3$E1.e.in + waste_2_3$E1.h.in + waste_2_3$E1.d.in) - waste_2_3$collection_en - waste_2_3$transport1_en) * waste_2_3$E1_tech
waste_2_3$E1_collectionemiss <- waste_2_3$collection_emiss * waste_2_3$E1_tech
waste_2_3$E1_transport1emiss <- waste_2_3$transport1_emiss * waste_2_3$E1_tech
waste_2_3$E1_processemiss <- waste_2_3$Prod_ww * (waste_2_3$E1.e.in * waste_2_3$Powergen_GHG
+ waste_2_3$E1.h.in * waste_2_3$Heatgen_GHG
+ waste_2_3$E1.d.in * waste_2_3$Diesel_GHG
+ waste_2_3$Nonbio_emiss1) * waste_2_3$E1_tech
waste_2_3$E1_enduseemiss <- 0
waste_2_3$E1_dispemiss <- 0 - waste_2_3$E1_elec* waste_2_3$Powergen_GHG - waste_2_3$E1_heat* waste_2_3$Heatgen_GHG
waste_2_3$E1_dispemissmain <- 0 - waste_2_3$E1_elec* waste_2_3$Powergen_GHG
waste_2_3$E1_dispemissco <- 0 - waste_2_3$E1_heat* waste_2_3$Heatgen_GHG
waste_2_3$E1_netemiss <- waste_2_3$E1_collectionemiss + waste_2_3$E1_transport1emiss + waste_2_3$E1_processemiss + waste_2_3$E1_enduseemiss + waste_2_3$E1_dispemiss
#E2
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2_3$E2_elec <- waste_2_3$Prod_ww * waste_2_3$E2.e.out * (1-0.065)
waste_2_3$E2_heat <- waste_2_3$Prod_ww * waste_2_3$E2.h.out * (1-0.2)
waste_2_3$E2_energy <- waste_2_3$E2_elec + waste_2_3$E2_heat
waste_2_3$E2_energymain <- waste_2_3$E2_elec
waste_2_3$E2_energyco <- waste_2_3$E2_heat
waste_2_3$E2_netenergy <- (waste_2_3$E2_energy - waste_2_3$Prod_ww * (waste_2_3$E2.e.in + waste_2_3$E2.h.in + waste_2_3$E2.d.in) - waste_2_3$collection_en - waste_2_3$transport1_en) * waste_2_3$E2_tech
waste_2_3$E2_collectionemiss <- waste_2_3$collection_emiss * waste_2_3$E2_tech
waste_2_3$E2_transport1emiss <- waste_2_3$transport1_emiss * waste_2_3$E2_tech
waste_2_3$E2_processemiss <- waste_2_3$Prod_ww * (waste_2_3$E2.e.in * waste_2_3$Powergen_GHG
+ waste_2_3$E2.h.in * waste_2_3$Heatgen_GHG
+ waste_2_3$E2.d.in * waste_2_3$Diesel_GHG
+ waste_2_3$Nonbio_emiss1) * waste_2_3$E2_tech
waste_2_3$E2_enduseemiss <- 0
waste_2_3$E2_dispemiss <- 0 - waste_2_3$E2_elec* waste_2_3$Powergen_GHG - waste_2_3$E2_heat* waste_2_3$Heatgen_GHG
waste_2_3$E2_dispemissmain <- 0 - waste_2_3$E2_elec* waste_2_3$Powergen_GHG
waste_2_3$E2_dispemissco <- 0 - waste_2_3$E2_heat* waste_2_3$Heatgen_GHG
waste_2_3$E2_netemiss <- waste_2_3$E2_collectionemiss + waste_2_3$E2_transport1emiss + waste_2_3$E2_processemiss + waste_2_3$E2_enduseemiss + waste_2_3$E2_dispemiss
#E3
#electricity T&D loss - 6.5%
waste_2_3$E3_energy <- waste_2_3$Prod_ww * waste_2_3$E3.e.out * (1-0.065)
waste_2_3$E3_energymain <- waste_2_3$E3_energy
waste_2_3$E3_netenergy <- (waste_2_3$E3_energy - waste_2_3$Prod_ww * (waste_2_3$E3.e.in + waste_2_3$E3.h.in + waste_2_3$E3.d.in) - waste_2_3$collection_en - waste_2_3$transport1_en) * waste_2_3$E3_tech
waste_2_3$E3_collectionemiss <- waste_2_3$collection_emiss * waste_2_3$E3_tech
waste_2_3$E3_transport1emiss <- waste_2_3$transport1_emiss * waste_2_3$E3_tech
waste_2_3$E3_processemiss <- waste_2_3$Prod_ww * (waste_2_3$E3.e.in * waste_2_3$Powergen_GHG
+ waste_2_3$E3.h.in * waste_2_3$Heatgen_GHG
+ waste_2_3$E3.d.in * waste_2_3$Diesel_GHG
+ waste_2_3$Nonbio_emiss1) * waste_2_3$E3_tech
waste_2_3$E3_enduseemiss <- 0
waste_2_3$E3_dispemiss <- 0 - waste_2_3$E3_energy* waste_2_3$Powergen_GHG
waste_2_3$E3_dispemissmain <- 0 - waste_2_3$E3_energy* waste_2_3$Powergen_GHG
waste_2_3$E3_netemiss <- waste_2_3$E3_collectionemiss + waste_2_3$E3_transport1emiss + waste_2_3$E3_processemiss + waste_2_3$E3_enduseemiss + waste_2_3$E3_dispemiss
#E4
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2_3$E4_elec <- waste_2_3$Prod_ww * waste_2_3$E4.e.out * (1-0.065)
waste_2_3$E4_heat <- waste_2_3$Prod_ww * waste_2_3$E4.h.out * (1-0.2)
waste_2_3$E4_energy <- waste_2_3$E4_elec + waste_2_3$E4_heat
waste_2_3$E4_energymain <- waste_2_3$E4_elec
waste_2_3$E4_energyco <- waste_2_3$E4_heat
waste_2_3$E4_netenergy <- (waste_2_3$E4_energy - waste_2_3$Prod_ww * (waste_2_3$E4.ng.in + waste_2_3$E4.d.in) - waste_2_3$collection_en - waste_2_3$transport1_en) * waste_2_3$E4_tech
waste_2_3$E4_collectionemiss <- waste_2_3$collection_emiss * waste_2_3$E4_tech
waste_2_3$E4_transport1emiss <- waste_2_3$transport1_emiss * waste_2_3$E4_tech
waste_2_3$E4_processemiss <- waste_2_3$Prod_ww * (waste_2_3$E4.ng.in * waste_2_3$NG_GHG
+ waste_2_3$E4.d.in * waste_2_3$Diesel_GHG
+ waste_2_3$Nonbio_emiss1) * waste_2_3$E4_tech
waste_2_3$E4_enduseemiss <- 0
waste_2_3$E4_dispemiss <- 0 - waste_2_3$E4_elec* waste_2_3$Powergen_GHG - waste_2_3$E4_heat* waste_2_3$Heatgen_GHG
waste_2_3$E4_dispemissmain <- 0 - waste_2_3$E4_elec* waste_2_3$Powergen_GHG
waste_2_3$E4_dispemissco <- 0 - waste_2_3$E4_heat* waste_2_3$Heatgen_GHG
waste_2_3$E4_netemiss <- waste_2_3$E4_collectionemiss + waste_2_3$E4_transport1emiss + waste_2_3$E4_processemiss + waste_2_3$E4_enduseemiss + waste_2_3$E4_dispemiss
#M1
#methane leakage - 2%
waste_2_3$M1_energy <- waste_2_3$Prod_ww * waste_2_3$M1.m.out * (1-0.02)
waste_2_3$M1_energymain <- waste_2_3$M1_energy
waste_2_3$M1_netenergy <- (waste_2_3$M1_energy - waste_2_3$Prod_ww * (waste_2_3$M1.e.in + waste_2_3$M1.h.in) - waste_2_3$collection_en - waste_2_3$transport1_en) * waste_2_3$M1_tech
waste_2_3$M1_collectionemiss <- waste_2_3$collection_emiss * waste_2_3$M1_tech
waste_2_3$M1_transport1emiss <- waste_2_3$transport1_emiss * waste_2_3$M1_tech
waste_2_3$M1_processemiss <- waste_2_3$Prod_ww * (waste_2_3$M1.e.in * waste_2_3$Powergen_GHG + waste_2_3$M1.h.in * waste_2_3$Heatgen_GHG)* waste_2_3$M1_tech
waste_2_3$M1_transport2emiss <- waste_2_3$Prod_ww * waste_2_3$M1.m.out *0.02 /50 *28 * waste_2_3$M1_tech
waste_2_3$M1_enduseemiss <- waste_2_3$Prod_ww * waste_2_3$Nonbio_emiss2 * waste_2_3$M1_tech
waste_2_3$M1_dispemiss <- 0 - waste_2_3$M1_energy* waste_2_3$NG_GHG
waste_2_3$M1_dispemissmain <- waste_2_3$M1_dispemiss
waste_2_3$M1_netemiss <- waste_2_3$M1_collectionemiss + waste_2_3$M1_transport1emiss + waste_2_3$M1_processemiss + waste_2_3$M1_transport2emiss + waste_2_3$M1_enduseemiss + waste_2_3$M1_dispemiss
#M2
#methane leakage - 2%
waste_2_3$M2_energy <- waste_2_3$Prod_ww * waste_2_3$M2.m.out * (1-0.02)
waste_2_3$M2_energymain <- waste_2_3$M2_energy
waste_2_3$M2_netenergy <- (waste_2_3$M2_energy - waste_2_3$Prod_ww * (waste_2_3$M2.e.in + waste_2_3$M2.h.in + waste_2_3$M2.d.in) - waste_2_3$collection_en - waste_2_3$transport1_en) * waste_2_3$M2_tech
waste_2_3$M2_collectionemiss <- waste_2_3$collection_emiss * waste_2_3$M2_tech
waste_2_3$M2_transport1emiss <- waste_2_3$transport1_emiss * waste_2_3$M2_tech
waste_2_3$M2_processemiss <- waste_2_3$Prod_ww * (waste_2_3$M2.e.in * waste_2_3$Powergen_GHG +
waste_2_3$M2.h.in * waste_2_3$Heatgen_GHG +
waste_2_3$M2.d.in * waste_2_3$Diesel_GHG) * waste_2_3$M2_tech
waste_2_3$M2_transport2emiss <- waste_2_3$Prod_ww * waste_2_3$M2.m.out *0.02 /50 *28 * waste_2_3$M1_tech
waste_2_3$M2_enduseemiss <- waste_2_3$Prod_ww * waste_2_3$Nonbio_emiss2 * waste_2_3$M2_tech
waste_2_3$M2_dispemiss <- 0 - waste_2_3$M2_energy* waste_2_3$NG_GHG
waste_2_3$M2_dispemissmain <- waste_2_3$M2_dispemiss
waste_2_3$M2_netemiss <- waste_2_3$M2_collectionemiss + waste_2_3$M2_transport1emiss + waste_2_3$M2_processemiss + waste_2_3$M2_transport2emiss + waste_2_3$M2_enduseemiss + waste_2_3$M2_dispemiss
#Eth1
#energy intensity of ethanol - 26.95 MJ/kg
waste_2_3$Eth1_elec <- waste_2_3$Prod_ww * waste_2_3$Eth1.e.out * (1-0.065)
waste_2_3$Eth1_eth <- waste_2_3$Prod_ww * waste_2_3$Eth1.eth.out
waste_2_3$Eth1_energy <- waste_2_3$Eth1_elec + waste_2_3$Eth1_eth
waste_2_3$Eth1_energymain <- waste_2_3$Eth1_eth
waste_2_3$Eth1_energyco <- waste_2_3$Eth1_elec
waste_2_3$Eth1_netenergy <- (waste_2_3$Eth1_energy - waste_2_3$Prod_ww * (waste_2_3$Eth1.ng.in + waste_2_3$Eth1.d.in) -
(waste_2_3$collection_en + waste_2_3$transport1_en) * waste_2_3$M1_tech -
waste_2_3$Eth1_eth / 26.95 * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel) * waste_2_3$Eth1_tech
waste_2_3$Eth1_collectionemiss <- waste_2_3$collection_emiss * waste_2_3$Eth1_tech
waste_2_3$Eth1_transport1emiss <- waste_2_3$transport1_emiss * waste_2_3$Eth1_tech
waste_2_3$Eth1_processemiss <- waste_2_3$Prod_ww * (waste_2_3$Eth1.ng.in * waste_2_3$Heatgen_GHG + waste_2_3$Eth1.d.in * waste_2_3$Diesel_GHG) * waste_2_3$Eth1_tech
waste_2_3$Eth1_transport2emiss <- waste_2_3$Eth1_eth / 26.95 * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel * waste_2_3$Diesel_GHG
waste_2_3$Eth1_enduseemiss <- waste_2_3$Prod_ww * waste_2_3$Nonbio_emiss2 * waste_2_3$Eth1_tech
waste_2_3$Eth1_dispemiss <- 0 - waste_2_3$Eth1_eth* waste_2_3$Gasoline_GHG - waste_2_3$Eth1_elec * waste_2_3$Powergen_GHG
waste_2_3$Eth1_dispemissmain <- 0 - waste_2_3$Eth1_eth* waste_2_3$Gasoline_GHG
waste_2_3$Eth1_dispemissco <- 0 - waste_2_3$Eth1_elec * waste_2_3$Powergen_GHG
waste_2_3$Eth1_netemiss <- waste_2_3$Eth1_collectionemiss + waste_2_3$Eth1_transport1emiss + waste_2_3$Eth1_processemiss + waste_2_3$Eth1_transport2emiss + waste_2_3$Eth1_enduseemiss + waste_2_3$Eth1_dispemiss
#Rd1
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
#electricity T&D loss - 6.5%, methane leakage - 2%
waste_2_3$Rd1_d <- waste_2_3$Prod_ww * waste_2_3$Rd1.d.out
waste_2_3$Rd1_g <- waste_2_3$Prod_ww * waste_2_3$Rd1.g.out
waste_2_3$Rd1_j <- waste_2_3$Prod_ww * waste_2_3$Rd1.j.out
waste_2_3$Rd1_m <- waste_2_3$Prod_ww * waste_2_3$Rd1.m.out * (1-0.02)
waste_2_3$Rd1_elec <- waste_2_3$Prod_ww * waste_2_3$Rd1.e.out * (1-0.065)
waste_2_3$Rd1_energy <- waste_2_3$Rd1_d + waste_2_3$Rd1_g + waste_2_3$Rd1_j + waste_2_3$Rd1_m + waste_2_3$Rd1_elec
waste_2_3$Rd1_energymain <- waste_2_3$Rd1_d
waste_2_3$Rd1_energyco <- waste_2_3$Rd1_g + waste_2_3$Rd1_j + waste_2_3$Rd1_m + waste_2_3$Rd1_elec
waste_2_3$Rd1_netenergy <-(waste_2_3$Rd1_energy - waste_2_3$Prod_ww * waste_2_3$Rd1.e.in -
(waste_2_3$collection_en + waste_2_3$transport1_en) -
waste_2_3$Rd1_d / 42.79 * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel -
waste_2_3$Rd1_g / 41.74 * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel -
waste_2_3$Rd1_j / 43.10 * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel ) * waste_2_3$Rd1_tech
waste_2_3$Rd1_collectionemiss <- waste_2_3$collection_emiss * waste_2_3$Rd1_tech
waste_2_3$Rd1_transport1emiss <- waste_2_3$transport1_emiss * waste_2_3$Rd1_tech
waste_2_3$Rd1_processemiss <- waste_2_3$Prod_ww * waste_2_3$Rd1.e.in * waste_2_3$Powergen_GHG * waste_2_3$Rd1_tech
waste_2_3$Rd1_transport2emiss <- (waste_2_3$Rd1_d / 42.79 + waste_2_3$Rd1_g / 41.74 + waste_2_3$Rd1_j / 43.10) * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel * waste_2_3$Diesel_GHG
waste_2_3$Rd1_enduseemiss <- waste_2_3$Prod_ww * waste_2_3$Nonbio_emiss2 * waste_2_3$Rd1_tech
waste_2_3$Rd1_dispemiss <- 0 - waste_2_3$Rd1_d *waste_2_3$Diesel_GHG - waste_2_3$Rd1_g * waste_2_3$Gasoline_GHG - waste_2_3$Rd1_j * waste_2_3$Jet_GHG -
waste_2_3$Rd1_m * waste_2_3$NG_GHG - waste_2_3$Rd1_elec * waste_2_3$Powergen_GHG
waste_2_3$Rd1_dispemissmain <- 0 - waste_2_3$Rd1_d *waste_2_3$Diesel_GHG
waste_2_3$Rd1_dispemissco <- 0 - waste_2_3$Rd1_g * waste_2_3$Gasoline_GHG - waste_2_3$Rd1_j * waste_2_3$Jet_GHG -
waste_2_3$Rd1_m * waste_2_3$NG_GHG - waste_2_3$Rd1_elec * waste_2_3$Powergen_GHG
waste_2_3$Rd1_netemiss <- waste_2_3$Rd1_collectionemiss + waste_2_3$Rd1_transport1emiss + waste_2_3$Rd1_processemiss + waste_2_3$Rd1_enduseemiss + waste_2_3$Rd1_transport2emiss + waste_2_3$Rd1_dispemiss
#Rd2
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74
waste_2_3$Rd2_d <- waste_2_3$Prod_ww * waste_2_3$Rd2.d.out
waste_2_3$Rd2_g <- waste_2_3$Prod_ww * waste_2_3$Rd2.g.out
waste_2_3$Rd2_energy <- waste_2_3$Rd2_d + waste_2_3$Rd2_g
waste_2_3$Rd2_energymain <- waste_2_3$Rd2_d
waste_2_3$Rd2_energyco <- waste_2_3$Rd2_g
waste_2_3$Rd2_netenergy <-(waste_2_3$Rd2_energy - waste_2_3$Prod_ww * (waste_2_3$Rd2.e.in + waste_2_3$Rd2.ng.in) -
(waste_2_3$collection_en + waste_2_3$transport1_en) -
waste_2_3$Rd2_d / 42.79 * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel -
waste_2_3$Rd2_g / 41.74 * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel ) * waste_2_3$Rd2_tech
waste_2_3$Rd2_collectionemiss <- waste_2_3$collection_emiss * waste_2_3$Rd2_tech
waste_2_3$Rd2_transport1emiss <- waste_2_3$transport1_emiss * waste_2_3$Rd2_tech
waste_2_3$Rd2_processemiss <- waste_2_3$Prod_ww * (waste_2_3$Rd2.e.in * waste_2_3$Powergen_GHG + waste_2_3$Rd2.ng.in * waste_2_3$H2_GHG) * waste_2_3$Rd2_tech
waste_2_3$Rd2_transport2emiss <- (waste_2_3$Rd2_d / 42.79 + waste_2_3$Rd2_g / 41.74) * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel * waste_2_3$Diesel_GHG
waste_2_3$Rd2_enduseemiss <- waste_2_3$Prod_ww * waste_2_3$Nonbio_emiss2 * waste_2_3$Rd2_tech
waste_2_3$Rd2_dispemiss <- 0 - waste_2_3$Rd2_d *waste_2_3$Diesel_GHG - waste_2_3$Rd2_g* waste_2_3$Gasoline_GHG
waste_2_3$Rd2_dispemissmain <- 0 - waste_2_3$Rd2_d *waste_2_3$Diesel_GHG
waste_2_3$Rd2_dispemissco <- 0 - waste_2_3$Rd2_g* waste_2_3$Gasoline_GHG
waste_2_3$Rd2_netemiss <- waste_2_3$Rd2_collectionemiss + waste_2_3$Rd2_transport1emiss + waste_2_3$Rd2_processemiss + waste_2_3$Rd2_transport2emiss + waste_2_3$Rd2_enduseemiss + waste_2_3$Rd2_dispemiss
#Bj1
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_3$Bj1_d <- waste_2_3$Prod_ww * waste_2_3$Bj1.d.out
waste_2_3$Bj1_g <- waste_2_3$Prod_ww * waste_2_3$Bj1.g.out
waste_2_3$Bj1_j <- waste_2_3$Prod_ww * waste_2_3$Bj1.j.out
waste_2_3$Bj1_energy <- waste_2_3$Bj1_d + waste_2_3$Bj1_g + waste_2_3$Bj1_j
waste_2_3$Bj1_energymain <- waste_2_3$Bj1_d
waste_2_3$Bj1_energyco <- waste_2_3$Bj1_g + waste_2_3$Bj1_j
waste_2_3$Bj1_netenergy <-(waste_2_3$Bj1_energy - waste_2_3$Prod_ww * waste_2_3$Bj1.h2.in -
(waste_2_3$collection_en + waste_2_3$transport1_en) -
(waste_2_3$Bj1_d / 42.79 + waste_2_3$Bj1_g / 41.74 +waste_2_3$Bj1_j / 43.10) * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel) * waste_2_3$Bj1_tech
waste_2_3$Bj1_collectionemiss <- waste_2_3$collection_emiss * waste_2_3$Bj1_tech
waste_2_3$Bj1_transport1emiss <- waste_2_3$transport1_emiss * waste_2_3$Bj1_tech
waste_2_3$Bj1_processemiss <- waste_2_3$Prod_ww * waste_2_3$Bj1.h2.in * waste_2_3$H2_GHG * waste_2_3$Bj1_tech
waste_2_3$Bj1_transport2emiss <- (waste_2_3$Bj1_d / 42.79 + waste_2_3$Bj1_g / 41.74 + waste_2_3$Bj1_j / 43.10) * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel * waste_2_3$Diesel_GHG
waste_2_3$Bj1_enduseemiss <- waste_2_3$Prod_ww * waste_2_3$Nonbio_emiss2 * waste_2_3$Bj1_tech
waste_2_3$Bj1_dispemiss <- 0 - waste_2_3$Bj1_d *waste_2_3$Diesel_GHG - waste_2_3$Bj1_g* waste_2_3$Gasoline_GHG - waste_2_3$Bj1_j * waste_2_3$Jet_GHG
waste_2_3$Bj1_dispemissmain <- 0 - waste_2_3$Bj1_d *waste_2_3$Diesel_GHG
waste_2_3$Bj1_dispemissco <- 0 - waste_2_3$Bj1_g* waste_2_3$Gasoline_GHG - waste_2_3$Bj1_j * waste_2_3$Jet_GHG
waste_2_3$Bj1_netemiss <- waste_2_3$Bj1_collectionemiss + waste_2_3$Bj1_transport1emiss + waste_2_3$Bj1_processemiss + waste_2_3$Bj1_transport2emiss + waste_2_3$Bj1_enduseemiss + waste_2_3$Bj1_dispemiss
#Bj2
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_3$Bj2_energy <- waste_2_3$Prod_ww * waste_2_3$Bj2.j.out
waste_2_3$Bj2_energymain <- waste_2_3$Bj1_energy
waste_2_3$Bj2_netenergy <-(waste_2_3$Bj2_energy - waste_2_3$Prod_ww * waste_2_3$Bj2.h2.in -
(waste_2_3$collection_en + waste_2_3$transport1_en) -
waste_2_3$Bj2_energy / 43.10 * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel ) * waste_2_3$Bj2_tech
waste_2_3$Bj2_collectionemiss <- waste_2_3$collection_emiss * waste_2_3$Bj2_tech
waste_2_3$Bj2_transport1emiss <- waste_2_3$transport1_emiss * waste_2_3$Bj2_tech
waste_2_3$Bj2_processemiss <- waste_2_3$Prod_ww * waste_2_3$Bj2.h2.in * waste_2_3$H2_GHG
waste_2_3$Bj2_transport2emiss <- waste_2_3$Bj2_energy / 43.10 * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel * waste_2_3$Diesel_GHG
waste_2_3$Bj2_enduseemiss <- waste_2_3$Prod_ww * waste_2_3$Nonbio_emiss2 * waste_2_3$Bj2_tech
waste_2_3$Bj2_dispemiss <- 0 - waste_2_3$Bj2_energy * waste_2_3$Jet_GHG
waste_2_3$Bj2_dispemissmain <- waste_2_3$Bj2_dispemiss
waste_2_3$Bj2_netemiss <- waste_2_3$Bj2_collectionemiss + waste_2_3$Bj2_transport1emiss + waste_2_3$Bj2_processemiss + waste_2_3$Bj2_transport2emiss + waste_2_3$Bj2_enduseemiss + waste_2_3$Bj2_dispemiss
#Bj3
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_3$Bj3_d <- waste_2_3$Prod_ww * waste_2_3$Bj3.d.out
waste_2_3$Bj3_g <- waste_2_3$Prod_ww * waste_2_3$Bj3.g.out
waste_2_3$Bj3_j <- waste_2_3$Prod_ww * waste_2_3$Bj3.j.out
waste_2_3$Bj3_energy <- waste_2_3$Bj3_d + waste_2_3$Bj3_g + waste_2_3$Bj3_j
waste_2_3$Bj3_energymain <- waste_2_3$Bj3_j
waste_2_3$Bj3_energyco <- waste_2_3$Bj3_d + waste_2_3$Bj3_g
waste_2_3$Bj3_netenergy <-(waste_2_3$Bj3_energy - waste_2_3$Prod_ww * waste_2_3$Bj3.e.in -
(waste_2_3$collection_en + waste_2_3$transport1_en) -
(waste_2_3$Bj3_d / 42.79 + waste_2_3$Bj3_g / 41.74 +waste_2_3$Bj3_j / 43.10) * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel) * waste_2_3$Bj3_tech
waste_2_3$Bj3_collectionemiss <- waste_2_3$collection_emiss * waste_2_3$Bj3_tech
waste_2_3$Bj3_transport1emiss <- waste_2_3$transport1_emiss * waste_2_3$Bj3_tech
waste_2_3$Bj3_processemiss <- waste_2_3$Prod_ww * waste_2_3$Bj3.e.in * waste_2_3$Powergen_GHG * waste_2_3$Bj3_tech
waste_2_3$Bj3_transport2emiss <- (waste_2_3$Bj3_d / 42.79 + waste_2_3$Bj3_g / 41.74 + waste_2_3$Bj3_j / 43.10) * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel * waste_2_3$Diesel_GHG
waste_2_3$Bj3_enduseemiss <- waste_2_3$Prod_ww * waste_2_3$Nonbio_emiss2 * waste_2_3$Bj3_tech
waste_2_3$Bj3_dispemiss <- 0 - waste_2_3$Bj3_d *waste_2_3$Diesel_GHG - waste_2_3$Bj3_g* waste_2_3$Gasoline_GHG - waste_2_3$Bj3_j * waste_2_3$Jet_GHG
waste_2_3$Bj3_dispemissmain <- 0 - waste_2_3$Bj3_j * waste_2_3$Jet_GHG
waste_2_3$Bj3_dispemissco <- 0 - waste_2_3$Bj3_d * waste_2_3$Diesel_GHG - waste_2_3$Bj3_g * waste_2_3$Gasoline_GHG
waste_2_3$Bj3_netemiss <- waste_2_3$Bj3_collectionemiss + waste_2_3$Bj3_transport1emiss + waste_2_3$Bj3_processemiss + waste_2_3$Bj3_transport2emiss + waste_2_3$Bj3_enduseemiss + waste_2_3$Bj3_dispemiss
#Bj4
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_3$Bj4_d <- waste_2_3$Prod_ww * waste_2_3$Bj4.d.out
waste_2_3$Bj4_g <- waste_2_3$Prod_ww * waste_2_3$Bj4.g.out
waste_2_3$Bj4_j <- waste_2_3$Prod_ww * waste_2_3$Bj4.j.out
waste_2_3$Bj4_energy <- waste_2_3$Bj4_d + waste_2_3$Bj4_g + waste_2_3$Bj4_j
waste_2_3$Bj4_energymain <- waste_2_3$Bj4_j
waste_2_3$Bj4_energyco <- waste_2_3$Bj4_d + waste_2_3$Bj4_g
waste_2_3$Bj4_netenergy <-(waste_2_3$Bj4_energy - waste_2_3$Prod_ww * waste_2_3$Bj4.h2.in -
(waste_2_3$collection_en + waste_2_3$transport1_en) -
(waste_2_3$Bj4_d / 42.79 + waste_2_3$Bj4_g / 41.74 +waste_2_3$Bj4_j / 43.10) * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel) * waste_2_3$Bj4_tech
waste_2_3$Bj4_collectionemiss <- waste_2_3$collection_emiss * waste_2_3$Bj4_tech
waste_2_3$Bj4_transport1emiss <- waste_2_3$transport1_emiss * waste_2_3$Bj4_tech
waste_2_3$Bj4_processemiss <- waste_2_3$Prod_ww * waste_2_3$Bj4.h2.in * waste_2_3$H2_GHG * waste_2_3$Bj4_tech
waste_2_3$Bj4_transport2emiss <- (waste_2_3$Bj4_d / 42.79 + waste_2_3$Bj4_g / 41.74 + waste_2_3$Bj4_j / 43.10) * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel * waste_2_3$Diesel_GHG
waste_2_3$Bj4_enduseemiss <- waste_2_3$Prod_ww * waste_2_3$Nonbio_emiss2 * waste_2_3$Bj4_tech
waste_2_3$Bj4_dispemiss <- 0 - waste_2_3$Bj4_d *waste_2_3$Diesel_GHG - waste_2_3$Bj4_g* waste_2_3$Gasoline_GHG - waste_2_3$Bj4_j * waste_2_3$Jet_GHG
waste_2_3$Bj4_dispemissmain <- 0 - waste_2_3$Bj4_j * waste_2_3$Jet_GHG
waste_2_3$Bj4_dispemissco <- 0 - waste_2_3$Bj4_d * waste_2_3$Diesel_GHG - waste_2_3$Bj4_g * waste_2_3$Gasoline_GHG
waste_2_3$Bj4_netemiss <- waste_2_3$Bj4_collectionemiss + waste_2_3$Bj4_transport1emiss + waste_2_3$Bj4_processemiss + waste_2_3$Bj4_transport2emiss+ waste_2_3$Bj4_enduseemiss + waste_2_3$Bj4_dispemiss
#Bj5
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_3$Bj5_d <- waste_2_3$Prod_ww * waste_2_3$Bj5.d.out
waste_2_3$Bj5_g <- waste_2_3$Prod_ww * waste_2_3$Bj5.g.out
waste_2_3$Bj5_j <- waste_2_3$Prod_ww * waste_2_3$Bj5.j.out
waste_2_3$Bj5_energy <- waste_2_3$Bj5_d + waste_2_3$Bj5_g + waste_2_3$Bj5_j
waste_2_3$Bj5_energymain <- waste_2_3$Bj5_j
waste_2_3$Bj5_energyco <- waste_2_3$Bj5_d + waste_2_3$Bj5_g
waste_2_3$Bj5_netenergy <-(waste_2_3$Bj5_energy - waste_2_3$Prod_ww * waste_2_3$Bj5.e.in -
(waste_2_3$collection_en + waste_2_3$transport1_en) -
(waste_2_3$Bj5_d / 42.79 + waste_2_3$Bj5_g / 41.74 +waste_2_3$Bj5_j / 43.10) * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel) * waste_2_3$Bj5_tech
waste_2_3$Bj5_collectionemiss <- waste_2_3$collection_emiss * waste_2_3$Bj5_tech
waste_2_3$Bj5_transport1emiss <- waste_2_3$transport1_emiss * waste_2_3$Bj5_tech
waste_2_3$Bj5_processemiss <- waste_2_3$Prod_ww * waste_2_3$Bj5.e.in * waste_2_3$Powergen_GHG * waste_2_3$Bj5_tech
waste_2_3$Bj5_transport2emiss <- (waste_2_3$Bj5_d / 42.79 + waste_2_3$Bj5_g / 41.74 + waste_2_3$Bj5_j / 43.10) * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel * waste_2_3$Diesel_GHG
waste_2_3$Bj5_enduseemiss <- waste_2_3$Prod_ww * waste_2_3$Nonbio_emiss2 * waste_2_3$Bj5_tech
waste_2_3$Bj5_dispemiss <- 0 - waste_2_3$Bj5_d *waste_2_3$Diesel_GHG - waste_2_3$Bj5_g* waste_2_3$Gasoline_GHG - waste_2_3$Bj5_j * waste_2_3$Jet_GHG
waste_2_3$Bj5_dispemissmain <- 0 - waste_2_3$Bj5_j * waste_2_3$Jet_GHG
waste_2_3$Bj5_dispemissco <- 0 - waste_2_3$Bj5_d * waste_2_3$Diesel_GHG - waste_2_3$Bj5_g * waste_2_3$Gasoline_GHG
waste_2_3$Bj5_netemiss <- waste_2_3$Bj5_collectionemiss + waste_2_3$Bj5_transport1emiss + waste_2_3$Bj5_processemiss + waste_2_3$Bj5_transport2emiss + waste_2_3$Bj5_enduseemiss + waste_2_3$Bj5_dispemiss
#Bj6
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_3$Bj6_d <- waste_2_3$Prod_ww * waste_2_3$Bj6.d.out
waste_2_3$Bj6_g <- waste_2_3$Prod_ww * waste_2_3$Bj6.g.out
waste_2_3$Bj6_j <- waste_2_3$Prod_ww * waste_2_3$Bj6.j.out
waste_2_3$Bj6_energy <- waste_2_3$Bj6_d + waste_2_3$Bj6_g + waste_2_3$Bj6_j
waste_2_3$Bj6_energymain <- waste_2_3$Bj6_j
waste_2_3$Bj6_energyco <- waste_2_3$Bj6_d + waste_2_3$Bj6_g
waste_2_3$Bj6_netenergy <-(waste_2_3$Bj6_energy - waste_2_3$Prod_ww * waste_2_3$Bj6.h2.in - waste_2_3$Prod_ww * waste_2_3$Bj6.e.in -
(waste_2_3$collection_en + waste_2_3$transport1_en) -
(waste_2_3$Bj6_d / 42.79 + waste_2_3$Bj6_g / 41.74 +waste_2_3$Bj6_j / 43.10) * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel) * waste_2_3$Bj6_tech
waste_2_3$Bj6_collectionemiss <- waste_2_3$collection_emiss * waste_2_3$Bj6_tech
waste_2_3$Bj6_transport1emiss <- waste_2_3$transport1_emiss * waste_2_3$Bj6_tech
waste_2_3$Bj6_processemiss <- waste_2_3$Prod_ww * (waste_2_3$Bj6.h2.in * waste_2_3$H2_GHG + waste_2_3$Bj6.e.in * waste_2_3$Powergen_GHG) * waste_2_3$Bj6_tech
waste_2_3$Bj6_transport2emiss <- (waste_2_3$Bj6_d / 42.79 + waste_2_3$Bj6_g / 41.74 + waste_2_3$Bj6_j / 43.10) * waste_2_3$Transport_km_2 * waste_2_3$Transport_Diesel * waste_2_3$Diesel_GHG
waste_2_3$Bj6_enduseemiss <- waste_2_3$Prod_ww * waste_2_3$Nonbio_emiss2 * waste_2_3$Bj6_tech
waste_2_3$Bj6_dispemiss <- 0 - waste_2_3$Bj6_d *waste_2_3$Diesel_GHG - waste_2_3$Bj6_g* waste_2_3$Gasoline_GHG - waste_2_3$Bj6_j * waste_2_3$Jet_GHG
waste_2_3$Bj6_dispemissmain <- 0 - waste_2_3$Bj6_j * waste_2_3$Jet_GHG
waste_2_3$Bj6_dispemissco <- 0 - waste_2_3$Bj6_d * waste_2_3$Diesel_GHG - waste_2_3$Bj6_g * waste_2_3$Gasoline_GHG
waste_2_3$Bj6_netemiss <- waste_2_3$Bj6_collectionemiss + waste_2_3$Bj6_transport1emiss + waste_2_3$Bj6_processemiss + waste_2_3$Bj6_transport2emiss + waste_2_3$Bj6_enduseemiss + waste_2_3$Bj6_dispemiss
# TD = 50km
coefficients2 <- merge(coefficients2, tech)
waste_1_2 <- merge(waste, coefficients2)
waste_2_4 <- merge(waste_1_2, state_GHG_ef)
#Collection & Transportation_1 (energy - GJ, emiss - kg CO1e)
waste_2_4$Prod_ww <- waste_2_4$Prod/(1-waste_2_4$MC/100)
waste_2_4$collection_en <- waste_2_4$Prod_ww * waste_2_4$Collection_Diesel
waste_2_4$collection_emiss <- waste_2_4$collection_en * waste_2_4$Diesel_GHG
waste_2_4$transport1_en <- waste_2_4$Prod_ww * waste_2_4$Transport_km_1 * waste_2_4$Transport_Diesel
waste_2_4$transport1_emiss <- waste_2_4$transport1_en * waste_2_4$Diesel_GHG
#E1
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2_4$E1_elec <- waste_2_4$Prod_ww * waste_2_4$E1.e.out * (1-0.065)
waste_2_4$E1_heat <- waste_2_4$Prod_ww * waste_2_4$E1.h.out * (1-0.2)
waste_2_4$E1_energy <- waste_2_4$E1_elec + waste_2_4$E1_heat
waste_2_4$E1_energymain <- waste_2_4$E1_elec
waste_2_4$E1_energyco <- waste_2_4$E1_heat
waste_2_4$E1_netenergy <- (waste_2_4$E1_energy - waste_2_4$Prod_ww * (waste_2_4$E1.e.in + waste_2_4$E1.h.in + waste_2_4$E1.d.in) - waste_2_4$collection_en - waste_2_4$transport1_en) * waste_2_4$E1_tech
waste_2_4$E1_collectionemiss <- waste_2_4$collection_emiss * waste_2_4$E1_tech
waste_2_4$E1_transport1emiss <- waste_2_4$transport1_emiss * waste_2_4$E1_tech
waste_2_4$E1_processemiss <- waste_2_4$Prod_ww * (waste_2_4$E1.e.in * waste_2_4$Powergen_GHG
+ waste_2_4$E1.h.in * waste_2_4$Heatgen_GHG
+ waste_2_4$E1.d.in * waste_2_4$Diesel_GHG
+ waste_2_4$Nonbio_emiss1) * waste_2_4$E1_tech
waste_2_4$E1_enduseemiss <- 0
waste_2_4$E1_dispemiss <- 0 - waste_2_4$E1_elec* waste_2_4$Powergen_GHG - waste_2_4$E1_heat* waste_2_4$Heatgen_GHG
waste_2_4$E1_dispemissmain <- 0 - waste_2_4$E1_elec* waste_2_4$Powergen_GHG
waste_2_4$E1_dispemissco <- 0 - waste_2_4$E1_heat* waste_2_4$Heatgen_GHG
waste_2_4$E1_netemiss <- waste_2_4$E1_collectionemiss + waste_2_4$E1_transport1emiss + waste_2_4$E1_processemiss + waste_2_4$E1_enduseemiss + waste_2_4$E1_dispemiss
#E2
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2_4$E2_elec <- waste_2_4$Prod_ww * waste_2_4$E2.e.out * (1-0.065)
waste_2_4$E2_heat <- waste_2_4$Prod_ww * waste_2_4$E2.h.out * (1-0.2)
waste_2_4$E2_energy <- waste_2_4$E2_elec + waste_2_4$E2_heat
waste_2_4$E2_energymain <- waste_2_4$E2_elec
waste_2_4$E2_energyco <- waste_2_4$E2_heat
waste_2_4$E2_netenergy <- (waste_2_4$E2_energy - waste_2_4$Prod_ww * (waste_2_4$E2.e.in + waste_2_4$E2.h.in + waste_2_4$E2.d.in) - waste_2_4$collection_en - waste_2_4$transport1_en) * waste_2_4$E2_tech
waste_2_4$E2_collectionemiss <- waste_2_4$collection_emiss * waste_2_4$E2_tech
waste_2_4$E2_transport1emiss <- waste_2_4$transport1_emiss * waste_2_4$E2_tech
waste_2_4$E2_processemiss <- waste_2_4$Prod_ww * (waste_2_4$E2.e.in * waste_2_4$Powergen_GHG
+ waste_2_4$E2.h.in * waste_2_4$Heatgen_GHG
+ waste_2_4$E2.d.in * waste_2_4$Diesel_GHG
+ waste_2_4$Nonbio_emiss1) * waste_2_4$E2_tech
waste_2_4$E2_enduseemiss <- 0
waste_2_4$E2_dispemiss <- 0 - waste_2_4$E2_elec* waste_2_4$Powergen_GHG - waste_2_4$E2_heat* waste_2_4$Heatgen_GHG
waste_2_4$E2_dispemissmain <- 0 - waste_2_4$E2_elec* waste_2_4$Powergen_GHG
waste_2_4$E2_dispemissco <- 0 - waste_2_4$E2_heat* waste_2_4$Heatgen_GHG
waste_2_4$E2_netemiss <- waste_2_4$E2_collectionemiss + waste_2_4$E2_transport1emiss + waste_2_4$E2_processemiss + waste_2_4$E2_enduseemiss + waste_2_4$E2_dispemiss
#E3
#electricity T&D loss - 6.5%
waste_2_4$E3_energy <- waste_2_4$Prod_ww * waste_2_4$E3.e.out * (1-0.065)
waste_2_4$E3_energymain <- waste_2_4$E3_energy
waste_2_4$E3_netenergy <- (waste_2_4$E3_energy - waste_2_4$Prod_ww * (waste_2_4$E3.e.in + waste_2_4$E3.h.in + waste_2_4$E3.d.in) - waste_2_4$collection_en - waste_2_4$transport1_en) * waste_2_4$E3_tech
waste_2_4$E3_collectionemiss <- waste_2_4$collection_emiss * waste_2_4$E3_tech
waste_2_4$E3_transport1emiss <- waste_2_4$transport1_emiss * waste_2_4$E3_tech
waste_2_4$E3_processemiss <- waste_2_4$Prod_ww * (waste_2_4$E3.e.in * waste_2_4$Powergen_GHG
+ waste_2_4$E3.h.in * waste_2_4$Heatgen_GHG
+ waste_2_4$E3.d.in * waste_2_4$Diesel_GHG
+ waste_2_4$Nonbio_emiss1) * waste_2_4$E3_tech
waste_2_4$E3_enduseemiss <- 0
waste_2_4$E3_dispemiss <- 0 - waste_2_4$E3_energy* waste_2_4$Powergen_GHG
waste_2_4$E3_dispemissmain <- 0 - waste_2_4$E3_energy* waste_2_4$Powergen_GHG
waste_2_4$E3_netemiss <- waste_2_4$E3_collectionemiss + waste_2_4$E3_transport1emiss + waste_2_4$E3_processemiss + waste_2_4$E3_enduseemiss + waste_2_4$E3_dispemiss
#E4
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2_4$E4_elec <- waste_2_4$Prod_ww * waste_2_4$E4.e.out * (1-0.065)
waste_2_4$E4_heat <- waste_2_4$Prod_ww * waste_2_4$E4.h.out * (1-0.2)
waste_2_4$E4_energy <- waste_2_4$E4_elec + waste_2_4$E4_heat
waste_2_4$E4_energymain <- waste_2_4$E4_elec
waste_2_4$E4_energyco <- waste_2_4$E4_heat
waste_2_4$E4_netenergy <- (waste_2_4$E4_energy - waste_2_4$Prod_ww * (waste_2_4$E4.ng.in + waste_2_4$E4.d.in) - waste_2_4$collection_en - waste_2_4$transport1_en) * waste_2_4$E4_tech
waste_2_4$E4_collectionemiss <- waste_2_4$collection_emiss * waste_2_4$E4_tech
waste_2_4$E4_transport1emiss <- waste_2_4$transport1_emiss * waste_2_4$E4_tech
waste_2_4$E4_processemiss <- waste_2_4$Prod_ww * (waste_2_4$E4.ng.in * waste_2_4$NG_GHG
+ waste_2_4$E4.d.in * waste_2_4$Diesel_GHG
+ waste_2_4$Nonbio_emiss1) * waste_2_4$E4_tech
waste_2_4$E4_enduseemiss <- 0
waste_2_4$E4_dispemiss <- 0 - waste_2_4$E4_elec* waste_2_4$Powergen_GHG - waste_2_4$E4_heat* waste_2_4$Heatgen_GHG
waste_2_4$E4_dispemissmain <- 0 - waste_2_4$E4_elec* waste_2_4$Powergen_GHG
waste_2_4$E4_dispemissco <- 0 - waste_2_4$E4_heat* waste_2_4$Heatgen_GHG
waste_2_4$E4_netemiss <- waste_2_4$E4_collectionemiss + waste_2_4$E4_transport1emiss + waste_2_4$E4_processemiss + waste_2_4$E4_enduseemiss + waste_2_4$E4_dispemiss
#M1
#methane leakage - 2%
waste_2_4$M1_energy <- waste_2_4$Prod_ww * waste_2_4$M1.m.out * (1-0.02)
waste_2_4$M1_energymain <- waste_2_4$M1_energy
waste_2_4$M1_netenergy <- (waste_2_4$M1_energy - waste_2_4$Prod_ww * (waste_2_4$M1.e.in + waste_2_4$M1.h.in) - waste_2_4$collection_en - waste_2_4$transport1_en) * waste_2_4$M1_tech
waste_2_4$M1_collectionemiss <- waste_2_4$collection_emiss * waste_2_4$M1_tech
waste_2_4$M1_transport1emiss <- waste_2_4$transport1_emiss * waste_2_4$M1_tech
waste_2_4$M1_processemiss <- waste_2_4$Prod_ww * (waste_2_4$M1.e.in * waste_2_4$Powergen_GHG + waste_2_4$M1.h.in * waste_2_4$Heatgen_GHG)* waste_2_4$M1_tech
waste_2_4$M1_transport2emiss <- waste_2_4$Prod_ww * waste_2_4$M1.m.out *0.02 /50 *28 * waste_2_4$M1_tech
waste_2_4$M1_enduseemiss <- waste_2_4$Prod_ww * waste_2_4$Nonbio_emiss2 * waste_2_4$M1_tech
waste_2_4$M1_dispemiss <- 0 - waste_2_4$M1_energy* waste_2_4$NG_GHG
waste_2_4$M1_dispemissmain <- waste_2_4$M1_dispemiss
waste_2_4$M1_netemiss <- waste_2_4$M1_collectionemiss + waste_2_4$M1_transport1emiss + waste_2_4$M1_processemiss + waste_2_4$M1_transport2emiss + waste_2_4$M1_enduseemiss + waste_2_4$M1_dispemiss
#M2
#methane leakage - 2%
waste_2_4$M2_energy <- waste_2_4$Prod_ww * waste_2_4$M2.m.out * (1-0.02)
waste_2_4$M2_energymain <- waste_2_4$M2_energy
waste_2_4$M2_netenergy <- (waste_2_4$M2_energy - waste_2_4$Prod_ww * (waste_2_4$M2.e.in + waste_2_4$M2.h.in + waste_2_4$M2.d.in) - waste_2_4$collection_en - waste_2_4$transport1_en) * waste_2_4$M2_tech
waste_2_4$M2_collectionemiss <- waste_2_4$collection_emiss * waste_2_4$M2_tech
waste_2_4$M2_transport1emiss <- waste_2_4$transport1_emiss * waste_2_4$M2_tech
waste_2_4$M2_processemiss <- waste_2_4$Prod_ww * (waste_2_4$M2.e.in * waste_2_4$Powergen_GHG +
waste_2_4$M2.h.in * waste_2_4$Heatgen_GHG +
waste_2_4$M2.d.in * waste_2_4$Diesel_GHG) * waste_2_4$M2_tech
waste_2_4$M2_transport2emiss <- waste_2_4$Prod_ww * waste_2_4$M2.m.out *0.02 /50 *28 * waste_2_4$M1_tech
waste_2_4$M2_enduseemiss <- waste_2_4$Prod_ww * waste_2_4$Nonbio_emiss2 * waste_2_4$M2_tech
waste_2_4$M2_dispemiss <- 0 - waste_2_4$M2_energy* waste_2_4$NG_GHG
waste_2_4$M2_dispemissmain <- waste_2_4$M2_dispemiss
waste_2_4$M2_netemiss <- waste_2_4$M2_collectionemiss + waste_2_4$M2_transport1emiss + waste_2_4$M2_processemiss + waste_2_4$M2_transport2emiss + waste_2_4$M2_enduseemiss + waste_2_4$M2_dispemiss
#Eth1
#energy intensity of ethanol - 26.95 MJ/kg
waste_2_4$Eth1_elec <- waste_2_4$Prod_ww * waste_2_4$Eth1.e.out * (1-0.065)
waste_2_4$Eth1_eth <- waste_2_4$Prod_ww * waste_2_4$Eth1.eth.out
waste_2_4$Eth1_energy <- waste_2_4$Eth1_elec + waste_2_4$Eth1_eth
waste_2_4$Eth1_energymain <- waste_2_4$Eth1_eth
waste_2_4$Eth1_energyco <- waste_2_4$Eth1_elec
waste_2_4$Eth1_netenergy <- (waste_2_4$Eth1_energy - waste_2_4$Prod_ww * (waste_2_4$Eth1.ng.in + waste_2_4$Eth1.d.in) -
(waste_2_4$collection_en + waste_2_4$transport1_en) * waste_2_4$M1_tech -
waste_2_4$Eth1_eth / 26.95 * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel) * waste_2_4$Eth1_tech
waste_2_4$Eth1_collectionemiss <- waste_2_4$collection_emiss * waste_2_4$Eth1_tech
waste_2_4$Eth1_transport1emiss <- waste_2_4$transport1_emiss * waste_2_4$Eth1_tech
waste_2_4$Eth1_processemiss <- waste_2_4$Prod_ww * (waste_2_4$Eth1.ng.in * waste_2_4$Heatgen_GHG + waste_2_4$Eth1.d.in * waste_2_4$Diesel_GHG) * waste_2_4$Eth1_tech
waste_2_4$Eth1_transport2emiss <- waste_2_4$Eth1_eth / 26.95 * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel * waste_2_4$Diesel_GHG
waste_2_4$Eth1_enduseemiss <- waste_2_4$Prod_ww * waste_2_4$Nonbio_emiss2 * waste_2_4$Eth1_tech
waste_2_4$Eth1_dispemiss <- 0 - waste_2_4$Eth1_eth* waste_2_4$Gasoline_GHG - waste_2_4$Eth1_elec * waste_2_4$Powergen_GHG
waste_2_4$Eth1_dispemissmain <- 0 - waste_2_4$Eth1_eth* waste_2_4$Gasoline_GHG
waste_2_4$Eth1_dispemissco <- 0 - waste_2_4$Eth1_elec * waste_2_4$Powergen_GHG
waste_2_4$Eth1_netemiss <- waste_2_4$Eth1_collectionemiss + waste_2_4$Eth1_transport1emiss + waste_2_4$Eth1_processemiss + waste_2_4$Eth1_transport2emiss + waste_2_4$Eth1_enduseemiss + waste_2_4$Eth1_dispemiss
#Rd1
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
#electricity T&D loss - 6.5%, methane leakage - 2%
waste_2_4$Rd1_d <- waste_2_4$Prod_ww * waste_2_4$Rd1.d.out
waste_2_4$Rd1_g <- waste_2_4$Prod_ww * waste_2_4$Rd1.g.out
waste_2_4$Rd1_j <- waste_2_4$Prod_ww * waste_2_4$Rd1.j.out
waste_2_4$Rd1_m <- waste_2_4$Prod_ww * waste_2_4$Rd1.m.out * (1-0.02)
waste_2_4$Rd1_elec <- waste_2_4$Prod_ww * waste_2_4$Rd1.e.out * (1-0.065)
waste_2_4$Rd1_energy <- waste_2_4$Rd1_d + waste_2_4$Rd1_g + waste_2_4$Rd1_j + waste_2_4$Rd1_m + waste_2_4$Rd1_elec
waste_2_4$Rd1_energymain <- waste_2_4$Rd1_d
waste_2_4$Rd1_energyco <- waste_2_4$Rd1_g + waste_2_4$Rd1_j + waste_2_4$Rd1_m + waste_2_4$Rd1_elec
waste_2_4$Rd1_netenergy <-(waste_2_4$Rd1_energy - waste_2_4$Prod_ww * waste_2_4$Rd1.e.in -
(waste_2_4$collection_en + waste_2_4$transport1_en) -
waste_2_4$Rd1_d / 42.79 * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel -
waste_2_4$Rd1_g / 41.74 * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel -
waste_2_4$Rd1_j / 43.10 * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel ) * waste_2_4$Rd1_tech
waste_2_4$Rd1_collectionemiss <- waste_2_4$collection_emiss * waste_2_4$Rd1_tech
waste_2_4$Rd1_transport1emiss <- waste_2_4$transport1_emiss * waste_2_4$Rd1_tech
waste_2_4$Rd1_processemiss <- waste_2_4$Prod_ww * waste_2_4$Rd1.e.in * waste_2_4$Powergen_GHG * waste_2_4$Rd1_tech
waste_2_4$Rd1_transport2emiss <- (waste_2_4$Rd1_d / 42.79 + waste_2_4$Rd1_g / 41.74 + waste_2_4$Rd1_j / 43.10) * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel * waste_2_4$Diesel_GHG
waste_2_4$Rd1_enduseemiss <- waste_2_4$Prod_ww * waste_2_4$Nonbio_emiss2 * waste_2_4$Rd1_tech
waste_2_4$Rd1_dispemiss <- 0 - waste_2_4$Rd1_d *waste_2_4$Diesel_GHG - waste_2_4$Rd1_g * waste_2_4$Gasoline_GHG - waste_2_4$Rd1_j * waste_2_4$Jet_GHG -
waste_2_4$Rd1_m * waste_2_4$NG_GHG - waste_2_4$Rd1_elec * waste_2_4$Powergen_GHG
waste_2_4$Rd1_dispemissmain <- 0 - waste_2_4$Rd1_d *waste_2_4$Diesel_GHG
waste_2_4$Rd1_dispemissco <- 0 - waste_2_4$Rd1_g * waste_2_4$Gasoline_GHG - waste_2_4$Rd1_j * waste_2_4$Jet_GHG -
waste_2_4$Rd1_m * waste_2_4$NG_GHG - waste_2_4$Rd1_elec * waste_2_4$Powergen_GHG
waste_2_4$Rd1_netemiss <- waste_2_4$Rd1_collectionemiss + waste_2_4$Rd1_transport1emiss + waste_2_4$Rd1_processemiss + waste_2_4$Rd1_enduseemiss + waste_2_4$Rd1_transport2emiss + waste_2_4$Rd1_dispemiss
#Rd2
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74
waste_2_4$Rd2_d <- waste_2_4$Prod_ww * waste_2_4$Rd2.d.out
waste_2_4$Rd2_g <- waste_2_4$Prod_ww * waste_2_4$Rd2.g.out
waste_2_4$Rd2_energy <- waste_2_4$Rd2_d + waste_2_4$Rd2_g
waste_2_4$Rd2_energymain <- waste_2_4$Rd2_d
waste_2_4$Rd2_energyco <- waste_2_4$Rd2_g
waste_2_4$Rd2_netenergy <-(waste_2_4$Rd2_energy - waste_2_4$Prod_ww * (waste_2_4$Rd2.e.in + waste_2_4$Rd2.ng.in) -
(waste_2_4$collection_en + waste_2_4$transport1_en) -
waste_2_4$Rd2_d / 42.79 * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel -
waste_2_4$Rd2_g / 41.74 * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel ) * waste_2_4$Rd2_tech
waste_2_4$Rd2_collectionemiss <- waste_2_4$collection_emiss * waste_2_4$Rd2_tech
waste_2_4$Rd2_transport1emiss <- waste_2_4$transport1_emiss * waste_2_4$Rd2_tech
waste_2_4$Rd2_processemiss <- waste_2_4$Prod_ww * (waste_2_4$Rd2.e.in * waste_2_4$Powergen_GHG + waste_2_4$Rd2.ng.in * waste_2_4$H2_GHG) * waste_2_4$Rd2_tech
waste_2_4$Rd2_transport2emiss <- (waste_2_4$Rd2_d / 42.79 + waste_2_4$Rd2_g / 41.74) * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel * waste_2_4$Diesel_GHG
waste_2_4$Rd2_enduseemiss <- waste_2_4$Prod_ww * waste_2_4$Nonbio_emiss2 * waste_2_4$Rd2_tech
waste_2_4$Rd2_dispemiss <- 0 - waste_2_4$Rd2_d *waste_2_4$Diesel_GHG - waste_2_4$Rd2_g* waste_2_4$Gasoline_GHG
waste_2_4$Rd2_dispemissmain <- 0 - waste_2_4$Rd2_d *waste_2_4$Diesel_GHG
waste_2_4$Rd2_dispemissco <- 0 - waste_2_4$Rd2_g* waste_2_4$Gasoline_GHG
waste_2_4$Rd2_netemiss <- waste_2_4$Rd2_collectionemiss + waste_2_4$Rd2_transport1emiss + waste_2_4$Rd2_processemiss + waste_2_4$Rd2_transport2emiss + waste_2_4$Rd2_enduseemiss + waste_2_4$Rd2_dispemiss
#Bj1
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_4$Bj1_d <- waste_2_4$Prod_ww * waste_2_4$Bj1.d.out
waste_2_4$Bj1_g <- waste_2_4$Prod_ww * waste_2_4$Bj1.g.out
waste_2_4$Bj1_j <- waste_2_4$Prod_ww * waste_2_4$Bj1.j.out
waste_2_4$Bj1_energy <- waste_2_4$Bj1_d + waste_2_4$Bj1_g + waste_2_4$Bj1_j
waste_2_4$Bj1_energymain <- waste_2_4$Bj1_d
waste_2_4$Bj1_energyco <- waste_2_4$Bj1_g + waste_2_4$Bj1_j
waste_2_4$Bj1_netenergy <-(waste_2_4$Bj1_energy - waste_2_4$Prod_ww * waste_2_4$Bj1.h2.in -
(waste_2_4$collection_en + waste_2_4$transport1_en) -
(waste_2_4$Bj1_d / 42.79 + waste_2_4$Bj1_g / 41.74 +waste_2_4$Bj1_j / 43.10) * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel) * waste_2_4$Bj1_tech
waste_2_4$Bj1_collectionemiss <- waste_2_4$collection_emiss * waste_2_4$Bj1_tech
waste_2_4$Bj1_transport1emiss <- waste_2_4$transport1_emiss * waste_2_4$Bj1_tech
waste_2_4$Bj1_processemiss <- waste_2_4$Prod_ww * waste_2_4$Bj1.h2.in * waste_2_4$H2_GHG * waste_2_4$Bj1_tech
waste_2_4$Bj1_transport2emiss <- (waste_2_4$Bj1_d / 42.79 + waste_2_4$Bj1_g / 41.74 + waste_2_4$Bj1_j / 43.10) * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel * waste_2_4$Diesel_GHG
waste_2_4$Bj1_enduseemiss <- waste_2_4$Prod_ww * waste_2_4$Nonbio_emiss2 * waste_2_4$Bj1_tech
waste_2_4$Bj1_dispemiss <- 0 - waste_2_4$Bj1_d *waste_2_4$Diesel_GHG - waste_2_4$Bj1_g* waste_2_4$Gasoline_GHG - waste_2_4$Bj1_j * waste_2_4$Jet_GHG
waste_2_4$Bj1_dispemissmain <- 0 - waste_2_4$Bj1_d *waste_2_4$Diesel_GHG
waste_2_4$Bj1_dispemissco <- 0 - waste_2_4$Bj1_g* waste_2_4$Gasoline_GHG - waste_2_4$Bj1_j * waste_2_4$Jet_GHG
waste_2_4$Bj1_netemiss <- waste_2_4$Bj1_collectionemiss + waste_2_4$Bj1_transport1emiss + waste_2_4$Bj1_processemiss + waste_2_4$Bj1_transport2emiss + waste_2_4$Bj1_enduseemiss + waste_2_4$Bj1_dispemiss
#Bj2
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_4$Bj2_energy <- waste_2_4$Prod_ww * waste_2_4$Bj2.j.out
waste_2_4$Bj2_energymain <- waste_2_4$Bj1_energy
waste_2_4$Bj2_netenergy <-(waste_2_4$Bj2_energy - waste_2_4$Prod_ww * waste_2_4$Bj2.h2.in -
(waste_2_4$collection_en + waste_2_4$transport1_en) -
waste_2_4$Bj2_energy / 43.10 * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel ) * waste_2_4$Bj2_tech
waste_2_4$Bj2_collectionemiss <- waste_2_4$collection_emiss * waste_2_4$Bj2_tech
waste_2_4$Bj2_transport1emiss <- waste_2_4$transport1_emiss * waste_2_4$Bj2_tech
waste_2_4$Bj2_processemiss <- waste_2_4$Prod_ww * waste_2_4$Bj2.h2.in * waste_2_4$H2_GHG
waste_2_4$Bj2_transport2emiss <- waste_2_4$Bj2_energy / 43.10 * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel * waste_2_4$Diesel_GHG
waste_2_4$Bj2_enduseemiss <- waste_2_4$Prod_ww * waste_2_4$Nonbio_emiss2 * waste_2_4$Bj2_tech
waste_2_4$Bj2_dispemiss <- 0 - waste_2_4$Bj2_energy * waste_2_4$Jet_GHG
waste_2_4$Bj2_dispemissmain <- waste_2_4$Bj2_dispemiss
waste_2_4$Bj2_netemiss <- waste_2_4$Bj2_collectionemiss + waste_2_4$Bj2_transport1emiss + waste_2_4$Bj2_processemiss + waste_2_4$Bj2_transport2emiss + waste_2_4$Bj2_enduseemiss + waste_2_4$Bj2_dispemiss
#Bj3
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_4$Bj3_d <- waste_2_4$Prod_ww * waste_2_4$Bj3.d.out
waste_2_4$Bj3_g <- waste_2_4$Prod_ww * waste_2_4$Bj3.g.out
waste_2_4$Bj3_j <- waste_2_4$Prod_ww * waste_2_4$Bj3.j.out
waste_2_4$Bj3_energy <- waste_2_4$Bj3_d + waste_2_4$Bj3_g + waste_2_4$Bj3_j
waste_2_4$Bj3_energymain <- waste_2_4$Bj3_j
waste_2_4$Bj3_energyco <- waste_2_4$Bj3_d + waste_2_4$Bj3_g
waste_2_4$Bj3_netenergy <-(waste_2_4$Bj3_energy - waste_2_4$Prod_ww * waste_2_4$Bj3.e.in -
(waste_2_4$collection_en + waste_2_4$transport1_en) -
(waste_2_4$Bj3_d / 42.79 + waste_2_4$Bj3_g / 41.74 +waste_2_4$Bj3_j / 43.10) * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel) * waste_2_4$Bj3_tech
waste_2_4$Bj3_collectionemiss <- waste_2_4$collection_emiss * waste_2_4$Bj3_tech
waste_2_4$Bj3_transport1emiss <- waste_2_4$transport1_emiss * waste_2_4$Bj3_tech
waste_2_4$Bj3_processemiss <- waste_2_4$Prod_ww * waste_2_4$Bj3.e.in * waste_2_4$Powergen_GHG * waste_2_4$Bj3_tech
waste_2_4$Bj3_transport2emiss <- (waste_2_4$Bj3_d / 42.79 + waste_2_4$Bj3_g / 41.74 + waste_2_4$Bj3_j / 43.10) * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel * waste_2_4$Diesel_GHG
waste_2_4$Bj3_enduseemiss <- waste_2_4$Prod_ww * waste_2_4$Nonbio_emiss2 * waste_2_4$Bj3_tech
waste_2_4$Bj3_dispemiss <- 0 - waste_2_4$Bj3_d *waste_2_4$Diesel_GHG - waste_2_4$Bj3_g* waste_2_4$Gasoline_GHG - waste_2_4$Bj3_j * waste_2_4$Jet_GHG
waste_2_4$Bj3_dispemissmain <- 0 - waste_2_4$Bj3_j * waste_2_4$Jet_GHG
waste_2_4$Bj3_dispemissco <- 0 - waste_2_4$Bj3_d * waste_2_4$Diesel_GHG - waste_2_4$Bj3_g * waste_2_4$Gasoline_GHG
waste_2_4$Bj3_netemiss <- waste_2_4$Bj3_collectionemiss + waste_2_4$Bj3_transport1emiss + waste_2_4$Bj3_processemiss + waste_2_4$Bj3_transport2emiss + waste_2_4$Bj3_enduseemiss + waste_2_4$Bj3_dispemiss
#Bj4
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_4$Bj4_d <- waste_2_4$Prod_ww * waste_2_4$Bj4.d.out
waste_2_4$Bj4_g <- waste_2_4$Prod_ww * waste_2_4$Bj4.g.out
waste_2_4$Bj4_j <- waste_2_4$Prod_ww * waste_2_4$Bj4.j.out
waste_2_4$Bj4_energy <- waste_2_4$Bj4_d + waste_2_4$Bj4_g + waste_2_4$Bj4_j
waste_2_4$Bj4_energymain <- waste_2_4$Bj4_j
waste_2_4$Bj4_energyco <- waste_2_4$Bj4_d + waste_2_4$Bj4_g
waste_2_4$Bj4_netenergy <-(waste_2_4$Bj4_energy - waste_2_4$Prod_ww * waste_2_4$Bj4.h2.in -
(waste_2_4$collection_en + waste_2_4$transport1_en) -
(waste_2_4$Bj4_d / 42.79 + waste_2_4$Bj4_g / 41.74 +waste_2_4$Bj4_j / 43.10) * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel) * waste_2_4$Bj4_tech
waste_2_4$Bj4_collectionemiss <- waste_2_4$collection_emiss * waste_2_4$Bj4_tech
waste_2_4$Bj4_transport1emiss <- waste_2_4$transport1_emiss * waste_2_4$Bj4_tech
waste_2_4$Bj4_processemiss <- waste_2_4$Prod_ww * waste_2_4$Bj4.h2.in * waste_2_4$H2_GHG * waste_2_4$Bj4_tech
waste_2_4$Bj4_transport2emiss <- (waste_2_4$Bj4_d / 42.79 + waste_2_4$Bj4_g / 41.74 + waste_2_4$Bj4_j / 43.10) * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel * waste_2_4$Diesel_GHG
waste_2_4$Bj4_enduseemiss <- waste_2_4$Prod_ww * waste_2_4$Nonbio_emiss2 * waste_2_4$Bj4_tech
waste_2_4$Bj4_dispemiss <- 0 - waste_2_4$Bj4_d *waste_2_4$Diesel_GHG - waste_2_4$Bj4_g* waste_2_4$Gasoline_GHG - waste_2_4$Bj4_j * waste_2_4$Jet_GHG
waste_2_4$Bj4_dispemissmain <- 0 - waste_2_4$Bj4_j * waste_2_4$Jet_GHG
waste_2_4$Bj4_dispemissco <- 0 - waste_2_4$Bj4_d * waste_2_4$Diesel_GHG - waste_2_4$Bj4_g * waste_2_4$Gasoline_GHG
waste_2_4$Bj4_netemiss <- waste_2_4$Bj4_collectionemiss + waste_2_4$Bj4_transport1emiss + waste_2_4$Bj4_processemiss + waste_2_4$Bj4_transport2emiss+ waste_2_4$Bj4_enduseemiss + waste_2_4$Bj4_dispemiss
#Bj5
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_4$Bj5_d <- waste_2_4$Prod_ww * waste_2_4$Bj5.d.out
waste_2_4$Bj5_g <- waste_2_4$Prod_ww * waste_2_4$Bj5.g.out
waste_2_4$Bj5_j <- waste_2_4$Prod_ww * waste_2_4$Bj5.j.out
waste_2_4$Bj5_energy <- waste_2_4$Bj5_d + waste_2_4$Bj5_g + waste_2_4$Bj5_j
waste_2_4$Bj5_energymain <- waste_2_4$Bj5_j
waste_2_4$Bj5_energyco <- waste_2_4$Bj5_d + waste_2_4$Bj5_g
waste_2_4$Bj5_netenergy <-(waste_2_4$Bj5_energy - waste_2_4$Prod_ww * waste_2_4$Bj5.e.in -
(waste_2_4$collection_en + waste_2_4$transport1_en) -
(waste_2_4$Bj5_d / 42.79 + waste_2_4$Bj5_g / 41.74 +waste_2_4$Bj5_j / 43.10) * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel) * waste_2_4$Bj5_tech
waste_2_4$Bj5_collectionemiss <- waste_2_4$collection_emiss * waste_2_4$Bj5_tech
waste_2_4$Bj5_transport1emiss <- waste_2_4$transport1_emiss * waste_2_4$Bj5_tech
waste_2_4$Bj5_processemiss <- waste_2_4$Prod_ww * waste_2_4$Bj5.e.in * waste_2_4$Powergen_GHG * waste_2_4$Bj5_tech
waste_2_4$Bj5_transport2emiss <- (waste_2_4$Bj5_d / 42.79 + waste_2_4$Bj5_g / 41.74 + waste_2_4$Bj5_j / 43.10) * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel * waste_2_4$Diesel_GHG
waste_2_4$Bj5_enduseemiss <- waste_2_4$Prod_ww * waste_2_4$Nonbio_emiss2 * waste_2_4$Bj5_tech
waste_2_4$Bj5_dispemiss <- 0 - waste_2_4$Bj5_d *waste_2_4$Diesel_GHG - waste_2_4$Bj5_g* waste_2_4$Gasoline_GHG - waste_2_4$Bj5_j * waste_2_4$Jet_GHG
waste_2_4$Bj5_dispemissmain <- 0 - waste_2_4$Bj5_j * waste_2_4$Jet_GHG
waste_2_4$Bj5_dispemissco <- 0 - waste_2_4$Bj5_d * waste_2_4$Diesel_GHG - waste_2_4$Bj5_g * waste_2_4$Gasoline_GHG
waste_2_4$Bj5_netemiss <- waste_2_4$Bj5_collectionemiss + waste_2_4$Bj5_transport1emiss + waste_2_4$Bj5_processemiss + waste_2_4$Bj5_transport2emiss + waste_2_4$Bj5_enduseemiss + waste_2_4$Bj5_dispemiss
#Bj6
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_4$Bj6_d <- waste_2_4$Prod_ww * waste_2_4$Bj6.d.out
waste_2_4$Bj6_g <- waste_2_4$Prod_ww * waste_2_4$Bj6.g.out
waste_2_4$Bj6_j <- waste_2_4$Prod_ww * waste_2_4$Bj6.j.out
waste_2_4$Bj6_energy <- waste_2_4$Bj6_d + waste_2_4$Bj6_g + waste_2_4$Bj6_j
waste_2_4$Bj6_energymain <- waste_2_4$Bj6_j
waste_2_4$Bj6_energyco <- waste_2_4$Bj6_d + waste_2_4$Bj6_g
waste_2_4$Bj6_netenergy <-(waste_2_4$Bj6_energy - waste_2_4$Prod_ww * waste_2_4$Bj6.h2.in - waste_2_4$Prod_ww * waste_2_4$Bj6.e.in -
(waste_2_4$collection_en + waste_2_4$transport1_en) -
(waste_2_4$Bj6_d / 42.79 + waste_2_4$Bj6_g / 41.74 +waste_2_4$Bj6_j / 43.10) * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel) * waste_2_4$Bj6_tech
waste_2_4$Bj6_collectionemiss <- waste_2_4$collection_emiss * waste_2_4$Bj6_tech
waste_2_4$Bj6_transport1emiss <- waste_2_4$transport1_emiss * waste_2_4$Bj6_tech
waste_2_4$Bj6_processemiss <- waste_2_4$Prod_ww * (waste_2_4$Bj6.h2.in * waste_2_4$H2_GHG + waste_2_4$Bj6.e.in * waste_2_4$Powergen_GHG) * waste_2_4$Bj6_tech
waste_2_4$Bj6_transport2emiss <- (waste_2_4$Bj6_d / 42.79 + waste_2_4$Bj6_g / 41.74 + waste_2_4$Bj6_j / 43.10) * waste_2_4$Transport_km_2 * waste_2_4$Transport_Diesel * waste_2_4$Diesel_GHG
waste_2_4$Bj6_enduseemiss <- waste_2_4$Prod_ww * waste_2_4$Nonbio_emiss2 * waste_2_4$Bj6_tech
waste_2_4$Bj6_dispemiss <- 0 - waste_2_4$Bj6_d *waste_2_4$Diesel_GHG - waste_2_4$Bj6_g* waste_2_4$Gasoline_GHG - waste_2_4$Bj6_j * waste_2_4$Jet_GHG
waste_2_4$Bj6_dispemissmain <- 0 - waste_2_4$Bj6_j * waste_2_4$Jet_GHG
waste_2_4$Bj6_dispemissco <- 0 - waste_2_4$Bj6_d * waste_2_4$Diesel_GHG - waste_2_4$Bj6_g * waste_2_4$Gasoline_GHG
waste_2_4$Bj6_netemiss <- waste_2_4$Bj6_collectionemiss + waste_2_4$Bj6_transport1emiss + waste_2_4$Bj6_processemiss + waste_2_4$Bj6_transport2emiss + waste_2_4$Bj6_enduseemiss + waste_2_4$Bj6_dispemiss
# TD = 100km
coefficients3 <- merge(coefficients3, tech)
waste_1_3 <- merge(waste, coefficients3)
waste_2_5 <- merge(waste_1_3, state_GHG_ef)
#Collection & Transportation_3 (energy - GJ, emiss - kg CO1e)
waste_2_5$Prod_ww <- waste_2_5$Prod/(1-waste_2_5$MC/100)
waste_2_5$collection_en <- waste_2_5$Prod_ww * waste_2_5$Collection_Diesel
waste_2_5$collection_emiss <- waste_2_5$collection_en * waste_2_5$Diesel_GHG
waste_2_5$transport1_en <- waste_2_5$Prod_ww * waste_2_5$Transport_km_1 * waste_2_5$Transport_Diesel
waste_2_5$transport1_emiss <- waste_2_5$transport1_en * waste_2_5$Diesel_GHG
#E1
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2_5$E1_elec <- waste_2_5$Prod_ww * waste_2_5$E1.e.out * (1-0.065)
waste_2_5$E1_heat <- waste_2_5$Prod_ww * waste_2_5$E1.h.out * (1-0.2)
waste_2_5$E1_energy <- waste_2_5$E1_elec + waste_2_5$E1_heat
waste_2_5$E1_energymain <- waste_2_5$E1_elec
waste_2_5$E1_energyco <- waste_2_5$E1_heat
waste_2_5$E1_netenergy <- (waste_2_5$E1_energy - waste_2_5$Prod_ww * (waste_2_5$E1.e.in + waste_2_5$E1.h.in + waste_2_5$E1.d.in) - waste_2_5$collection_en - waste_2_5$transport1_en) * waste_2_5$E1_tech
waste_2_5$E1_collectionemiss <- waste_2_5$collection_emiss * waste_2_5$E1_tech
waste_2_5$E1_transport1emiss <- waste_2_5$transport1_emiss * waste_2_5$E1_tech
waste_2_5$E1_processemiss <- waste_2_5$Prod_ww * (waste_2_5$E1.e.in * waste_2_5$Powergen_GHG
+ waste_2_5$E1.h.in * waste_2_5$Heatgen_GHG
+ waste_2_5$E1.d.in * waste_2_5$Diesel_GHG
+ waste_2_5$Nonbio_emiss1) * waste_2_5$E1_tech
waste_2_5$E1_enduseemiss <- 0
waste_2_5$E1_dispemiss <- 0 - waste_2_5$E1_elec* waste_2_5$Powergen_GHG - waste_2_5$E1_heat* waste_2_5$Heatgen_GHG
waste_2_5$E1_dispemissmain <- 0 - waste_2_5$E1_elec* waste_2_5$Powergen_GHG
waste_2_5$E1_dispemissco <- 0 - waste_2_5$E1_heat* waste_2_5$Heatgen_GHG
waste_2_5$E1_netemiss <- waste_2_5$E1_collectionemiss + waste_2_5$E1_transport1emiss + waste_2_5$E1_processemiss + waste_2_5$E1_enduseemiss + waste_2_5$E1_dispemiss
#E2
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2_5$E2_elec <- waste_2_5$Prod_ww * waste_2_5$E2.e.out * (1-0.065)
waste_2_5$E2_heat <- waste_2_5$Prod_ww * waste_2_5$E2.h.out * (1-0.2)
waste_2_5$E2_energy <- waste_2_5$E2_elec + waste_2_5$E2_heat
waste_2_5$E2_energymain <- waste_2_5$E2_elec
waste_2_5$E2_energyco <- waste_2_5$E2_heat
waste_2_5$E2_netenergy <- (waste_2_5$E2_energy - waste_2_5$Prod_ww * (waste_2_5$E2.e.in + waste_2_5$E2.h.in + waste_2_5$E2.d.in) - waste_2_5$collection_en - waste_2_5$transport1_en) * waste_2_5$E2_tech
waste_2_5$E2_collectionemiss <- waste_2_5$collection_emiss * waste_2_5$E2_tech
waste_2_5$E2_transport1emiss <- waste_2_5$transport1_emiss * waste_2_5$E2_tech
waste_2_5$E2_processemiss <- waste_2_5$Prod_ww * (waste_2_5$E2.e.in * waste_2_5$Powergen_GHG
+ waste_2_5$E2.h.in * waste_2_5$Heatgen_GHG
+ waste_2_5$E2.d.in * waste_2_5$Diesel_GHG
+ waste_2_5$Nonbio_emiss1) * waste_2_5$E2_tech
waste_2_5$E2_enduseemiss <- 0
waste_2_5$E2_dispemiss <- 0 - waste_2_5$E2_elec* waste_2_5$Powergen_GHG - waste_2_5$E2_heat* waste_2_5$Heatgen_GHG
waste_2_5$E2_dispemissmain <- 0 - waste_2_5$E2_elec* waste_2_5$Powergen_GHG
waste_2_5$E2_dispemissco <- 0 - waste_2_5$E2_heat* waste_2_5$Heatgen_GHG
waste_2_5$E2_netemiss <- waste_2_5$E2_collectionemiss + waste_2_5$E2_transport1emiss + waste_2_5$E2_processemiss + waste_2_5$E2_enduseemiss + waste_2_5$E2_dispemiss
#E3
#electricity T&D loss - 6.5%
waste_2_5$E3_energy <- waste_2_5$Prod_ww * waste_2_5$E3.e.out * (1-0.065)
waste_2_5$E3_energymain <- waste_2_5$E3_energy
waste_2_5$E3_netenergy <- (waste_2_5$E3_energy - waste_2_5$Prod_ww * (waste_2_5$E3.e.in + waste_2_5$E3.h.in + waste_2_5$E3.d.in) - waste_2_5$collection_en - waste_2_5$transport1_en) * waste_2_5$E3_tech
waste_2_5$E3_collectionemiss <- waste_2_5$collection_emiss * waste_2_5$E3_tech
waste_2_5$E3_transport1emiss <- waste_2_5$transport1_emiss * waste_2_5$E3_tech
waste_2_5$E3_processemiss <- waste_2_5$Prod_ww * (waste_2_5$E3.e.in * waste_2_5$Powergen_GHG
+ waste_2_5$E3.h.in * waste_2_5$Heatgen_GHG
+ waste_2_5$E3.d.in * waste_2_5$Diesel_GHG
+ waste_2_5$Nonbio_emiss1) * waste_2_5$E3_tech
waste_2_5$E3_enduseemiss <- 0
waste_2_5$E3_dispemiss <- 0 - waste_2_5$E3_energy* waste_2_5$Powergen_GHG
waste_2_5$E3_dispemissmain <- 0 - waste_2_5$E3_energy* waste_2_5$Powergen_GHG
waste_2_5$E3_netemiss <- waste_2_5$E3_collectionemiss + waste_2_5$E3_transport1emiss + waste_2_5$E3_processemiss + waste_2_5$E3_enduseemiss + waste_2_5$E3_dispemiss
#E4
#electricity T&D loss - 6.5%, heat loss - 20%
waste_2_5$E4_elec <- waste_2_5$Prod_ww * waste_2_5$E4.e.out * (1-0.065)
waste_2_5$E4_heat <- waste_2_5$Prod_ww * waste_2_5$E4.h.out * (1-0.2)
waste_2_5$E4_energy <- waste_2_5$E4_elec + waste_2_5$E4_heat
waste_2_5$E4_energymain <- waste_2_5$E4_elec
waste_2_5$E4_energyco <- waste_2_5$E4_heat
waste_2_5$E4_netenergy <- (waste_2_5$E4_energy - waste_2_5$Prod_ww * (waste_2_5$E4.ng.in + waste_2_5$E4.d.in) - waste_2_5$collection_en - waste_2_5$transport1_en) * waste_2_5$E4_tech
waste_2_5$E4_collectionemiss <- waste_2_5$collection_emiss * waste_2_5$E4_tech
waste_2_5$E4_transport1emiss <- waste_2_5$transport1_emiss * waste_2_5$E4_tech
waste_2_5$E4_processemiss <- waste_2_5$Prod_ww * (waste_2_5$E4.ng.in * waste_2_5$NG_GHG
+ waste_2_5$E4.d.in * waste_2_5$Diesel_GHG
+ waste_2_5$Nonbio_emiss1) * waste_2_5$E4_tech
waste_2_5$E4_enduseemiss <- 0
waste_2_5$E4_dispemiss <- 0 - waste_2_5$E4_elec* waste_2_5$Powergen_GHG - waste_2_5$E4_heat* waste_2_5$Heatgen_GHG
waste_2_5$E4_dispemissmain <- 0 - waste_2_5$E4_elec* waste_2_5$Powergen_GHG
waste_2_5$E4_dispemissco <- 0 - waste_2_5$E4_heat* waste_2_5$Heatgen_GHG
waste_2_5$E4_netemiss <- waste_2_5$E4_collectionemiss + waste_2_5$E4_transport1emiss + waste_2_5$E4_processemiss + waste_2_5$E4_enduseemiss + waste_2_5$E4_dispemiss
#M1
#methane leakage - 2%
waste_2_5$M1_energy <- waste_2_5$Prod_ww * waste_2_5$M1.m.out * (1-0.02)
waste_2_5$M1_energymain <- waste_2_5$M1_energy
waste_2_5$M1_netenergy <- (waste_2_5$M1_energy - waste_2_5$Prod_ww * (waste_2_5$M1.e.in + waste_2_5$M1.h.in) - waste_2_5$collection_en - waste_2_5$transport1_en) * waste_2_5$M1_tech
waste_2_5$M1_collectionemiss <- waste_2_5$collection_emiss * waste_2_5$M1_tech
waste_2_5$M1_transport1emiss <- waste_2_5$transport1_emiss * waste_2_5$M1_tech
waste_2_5$M1_processemiss <- waste_2_5$Prod_ww * (waste_2_5$M1.e.in * waste_2_5$Powergen_GHG + waste_2_5$M1.h.in * waste_2_5$Heatgen_GHG)* waste_2_5$M1_tech
waste_2_5$M1_transport2emiss <- waste_2_5$Prod_ww * waste_2_5$M1.m.out *0.02 /50 *28 * waste_2_5$M1_tech
waste_2_5$M1_enduseemiss <- waste_2_5$Prod_ww * waste_2_5$Nonbio_emiss2 * waste_2_5$M1_tech
waste_2_5$M1_dispemiss <- 0 - waste_2_5$M1_energy* waste_2_5$NG_GHG
waste_2_5$M1_dispemissmain <- waste_2_5$M1_dispemiss
waste_2_5$M1_netemiss <- waste_2_5$M1_collectionemiss + waste_2_5$M1_transport1emiss + waste_2_5$M1_processemiss + waste_2_5$M1_transport2emiss + waste_2_5$M1_enduseemiss + waste_2_5$M1_dispemiss
#M2
#methane leakage - 2%
waste_2_5$M2_energy <- waste_2_5$Prod_ww * waste_2_5$M2.m.out * (1-0.02)
waste_2_5$M2_energymain <- waste_2_5$M2_energy
waste_2_5$M2_netenergy <- (waste_2_5$M2_energy - waste_2_5$Prod_ww * (waste_2_5$M2.e.in + waste_2_5$M2.h.in + waste_2_5$M2.d.in) - waste_2_5$collection_en - waste_2_5$transport1_en) * waste_2_5$M2_tech
waste_2_5$M2_collectionemiss <- waste_2_5$collection_emiss * waste_2_5$M2_tech
waste_2_5$M2_transport1emiss <- waste_2_5$transport1_emiss * waste_2_5$M2_tech
waste_2_5$M2_processemiss <- waste_2_5$Prod_ww * (waste_2_5$M2.e.in * waste_2_5$Powergen_GHG +
waste_2_5$M2.h.in * waste_2_5$Heatgen_GHG +
waste_2_5$M2.d.in * waste_2_5$Diesel_GHG) * waste_2_5$M2_tech
waste_2_5$M2_transport2emiss <- waste_2_5$Prod_ww * waste_2_5$M2.m.out *0.02 /50 *28 * waste_2_5$M1_tech
waste_2_5$M2_enduseemiss <- waste_2_5$Prod_ww * waste_2_5$Nonbio_emiss2 * waste_2_5$M2_tech
waste_2_5$M2_dispemiss <- 0 - waste_2_5$M2_energy* waste_2_5$NG_GHG
waste_2_5$M2_dispemissmain <- waste_2_5$M2_dispemiss
waste_2_5$M2_netemiss <- waste_2_5$M2_collectionemiss + waste_2_5$M2_transport1emiss + waste_2_5$M2_processemiss + waste_2_5$M2_transport2emiss + waste_2_5$M2_enduseemiss + waste_2_5$M2_dispemiss
#Eth1
#energy intensity of ethanol - 26.95 MJ/kg
waste_2_5$Eth1_elec <- waste_2_5$Prod_ww * waste_2_5$Eth1.e.out * (1-0.065)
waste_2_5$Eth1_eth <- waste_2_5$Prod_ww * waste_2_5$Eth1.eth.out
waste_2_5$Eth1_energy <- waste_2_5$Eth1_elec + waste_2_5$Eth1_eth
waste_2_5$Eth1_energymain <- waste_2_5$Eth1_eth
waste_2_5$Eth1_energyco <- waste_2_5$Eth1_elec
waste_2_5$Eth1_netenergy <- (waste_2_5$Eth1_energy - waste_2_5$Prod_ww * (waste_2_5$Eth1.ng.in + waste_2_5$Eth1.d.in) -
(waste_2_5$collection_en + waste_2_5$transport1_en) * waste_2_5$M1_tech -
waste_2_5$Eth1_eth / 26.95 * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel) * waste_2_5$Eth1_tech
waste_2_5$Eth1_collectionemiss <- waste_2_5$collection_emiss * waste_2_5$Eth1_tech
waste_2_5$Eth1_transport1emiss <- waste_2_5$transport1_emiss * waste_2_5$Eth1_tech
waste_2_5$Eth1_processemiss <- waste_2_5$Prod_ww * (waste_2_5$Eth1.ng.in * waste_2_5$Heatgen_GHG + waste_2_5$Eth1.d.in * waste_2_5$Diesel_GHG) * waste_2_5$Eth1_tech
waste_2_5$Eth1_transport2emiss <- waste_2_5$Eth1_eth / 26.95 * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel * waste_2_5$Diesel_GHG
waste_2_5$Eth1_enduseemiss <- waste_2_5$Prod_ww * waste_2_5$Nonbio_emiss2 * waste_2_5$Eth1_tech
waste_2_5$Eth1_dispemiss <- 0 - waste_2_5$Eth1_eth* waste_2_5$Gasoline_GHG - waste_2_5$Eth1_elec * waste_2_5$Powergen_GHG
waste_2_5$Eth1_dispemissmain <- 0 - waste_2_5$Eth1_eth* waste_2_5$Gasoline_GHG
waste_2_5$Eth1_dispemissco <- 0 - waste_2_5$Eth1_elec * waste_2_5$Powergen_GHG
waste_2_5$Eth1_netemiss <- waste_2_5$Eth1_collectionemiss + waste_2_5$Eth1_transport1emiss + waste_2_5$Eth1_processemiss + waste_2_5$Eth1_transport2emiss + waste_2_5$Eth1_enduseemiss + waste_2_5$Eth1_dispemiss
#Rd1
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
#electricity T&D loss - 6.5%, methane leakage - 2%
waste_2_5$Rd1_d <- waste_2_5$Prod_ww * waste_2_5$Rd1.d.out
waste_2_5$Rd1_g <- waste_2_5$Prod_ww * waste_2_5$Rd1.g.out
waste_2_5$Rd1_j <- waste_2_5$Prod_ww * waste_2_5$Rd1.j.out
waste_2_5$Rd1_m <- waste_2_5$Prod_ww * waste_2_5$Rd1.m.out * (1-0.02)
waste_2_5$Rd1_elec <- waste_2_5$Prod_ww * waste_2_5$Rd1.e.out * (1-0.065)
waste_2_5$Rd1_energy <- waste_2_5$Rd1_d + waste_2_5$Rd1_g + waste_2_5$Rd1_j + waste_2_5$Rd1_m + waste_2_5$Rd1_elec
waste_2_5$Rd1_energymain <- waste_2_5$Rd1_d
waste_2_5$Rd1_energyco <- waste_2_5$Rd1_g + waste_2_5$Rd1_j + waste_2_5$Rd1_m + waste_2_5$Rd1_elec
waste_2_5$Rd1_netenergy <-(waste_2_5$Rd1_energy - waste_2_5$Prod_ww * waste_2_5$Rd1.e.in -
(waste_2_5$collection_en + waste_2_5$transport1_en) -
waste_2_5$Rd1_d / 42.79 * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel -
waste_2_5$Rd1_g / 41.74 * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel -
waste_2_5$Rd1_j / 43.10 * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel ) * waste_2_5$Rd1_tech
waste_2_5$Rd1_collectionemiss <- waste_2_5$collection_emiss * waste_2_5$Rd1_tech
waste_2_5$Rd1_transport1emiss <- waste_2_5$transport1_emiss * waste_2_5$Rd1_tech
waste_2_5$Rd1_processemiss <- waste_2_5$Prod_ww * waste_2_5$Rd1.e.in * waste_2_5$Powergen_GHG * waste_2_5$Rd1_tech
waste_2_5$Rd1_transport2emiss <- (waste_2_5$Rd1_d / 42.79 + waste_2_5$Rd1_g / 41.74 + waste_2_5$Rd1_j / 43.10) * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel * waste_2_5$Diesel_GHG
waste_2_5$Rd1_enduseemiss <- waste_2_5$Prod_ww * waste_2_5$Nonbio_emiss2 * waste_2_5$Rd1_tech
waste_2_5$Rd1_dispemiss <- 0 - waste_2_5$Rd1_d *waste_2_5$Diesel_GHG - waste_2_5$Rd1_g * waste_2_5$Gasoline_GHG - waste_2_5$Rd1_j * waste_2_5$Jet_GHG -
waste_2_5$Rd1_m * waste_2_5$NG_GHG - waste_2_5$Rd1_elec * waste_2_5$Powergen_GHG
waste_2_5$Rd1_dispemissmain <- 0 - waste_2_5$Rd1_d *waste_2_5$Diesel_GHG
waste_2_5$Rd1_dispemissco <- 0 - waste_2_5$Rd1_g * waste_2_5$Gasoline_GHG - waste_2_5$Rd1_j * waste_2_5$Jet_GHG -
waste_2_5$Rd1_m * waste_2_5$NG_GHG - waste_2_5$Rd1_elec * waste_2_5$Powergen_GHG
waste_2_5$Rd1_netemiss <- waste_2_5$Rd1_collectionemiss + waste_2_5$Rd1_transport1emiss + waste_2_5$Rd1_processemiss + waste_2_5$Rd1_enduseemiss + waste_2_5$Rd1_transport2emiss + waste_2_5$Rd1_dispemiss
#Rd2
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74
waste_2_5$Rd2_d <- waste_2_5$Prod_ww * waste_2_5$Rd2.d.out
waste_2_5$Rd2_g <- waste_2_5$Prod_ww * waste_2_5$Rd2.g.out
waste_2_5$Rd2_energy <- waste_2_5$Rd2_d + waste_2_5$Rd2_g
waste_2_5$Rd2_energymain <- waste_2_5$Rd2_d
waste_2_5$Rd2_energyco <- waste_2_5$Rd2_g
waste_2_5$Rd2_netenergy <-(waste_2_5$Rd2_energy - waste_2_5$Prod_ww * (waste_2_5$Rd2.e.in + waste_2_5$Rd2.ng.in) -
(waste_2_5$collection_en + waste_2_5$transport1_en) -
waste_2_5$Rd2_d / 42.79 * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel -
waste_2_5$Rd2_g / 41.74 * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel ) * waste_2_5$Rd2_tech
waste_2_5$Rd2_collectionemiss <- waste_2_5$collection_emiss * waste_2_5$Rd2_tech
waste_2_5$Rd2_transport1emiss <- waste_2_5$transport1_emiss * waste_2_5$Rd2_tech
waste_2_5$Rd2_processemiss <- waste_2_5$Prod_ww * (waste_2_5$Rd2.e.in * waste_2_5$Powergen_GHG + waste_2_5$Rd2.ng.in * waste_2_5$H2_GHG) * waste_2_5$Rd2_tech
waste_2_5$Rd2_transport2emiss <- (waste_2_5$Rd2_d / 42.79 + waste_2_5$Rd2_g / 41.74) * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel * waste_2_5$Diesel_GHG
waste_2_5$Rd2_enduseemiss <- waste_2_5$Prod_ww * waste_2_5$Nonbio_emiss2 * waste_2_5$Rd2_tech
waste_2_5$Rd2_dispemiss <- 0 - waste_2_5$Rd2_d *waste_2_5$Diesel_GHG - waste_2_5$Rd2_g* waste_2_5$Gasoline_GHG
waste_2_5$Rd2_dispemissmain <- 0 - waste_2_5$Rd2_d *waste_2_5$Diesel_GHG
waste_2_5$Rd2_dispemissco <- 0 - waste_2_5$Rd2_g* waste_2_5$Gasoline_GHG
waste_2_5$Rd2_netemiss <- waste_2_5$Rd2_collectionemiss + waste_2_5$Rd2_transport1emiss + waste_2_5$Rd2_processemiss + waste_2_5$Rd2_transport2emiss + waste_2_5$Rd2_enduseemiss + waste_2_5$Rd2_dispemiss
#Bj1
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_5$Bj1_d <- waste_2_5$Prod_ww * waste_2_5$Bj1.d.out
waste_2_5$Bj1_g <- waste_2_5$Prod_ww * waste_2_5$Bj1.g.out
waste_2_5$Bj1_j <- waste_2_5$Prod_ww * waste_2_5$Bj1.j.out
waste_2_5$Bj1_energy <- waste_2_5$Bj1_d + waste_2_5$Bj1_g + waste_2_5$Bj1_j
waste_2_5$Bj1_energymain <- waste_2_5$Bj1_d
waste_2_5$Bj1_energyco <- waste_2_5$Bj1_g + waste_2_5$Bj1_j
waste_2_5$Bj1_netenergy <-(waste_2_5$Bj1_energy - waste_2_5$Prod_ww * waste_2_5$Bj1.h2.in -
(waste_2_5$collection_en + waste_2_5$transport1_en) -
(waste_2_5$Bj1_d / 42.79 + waste_2_5$Bj1_g / 41.74 +waste_2_5$Bj1_j / 43.10) * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel) * waste_2_5$Bj1_tech
waste_2_5$Bj1_collectionemiss <- waste_2_5$collection_emiss * waste_2_5$Bj1_tech
waste_2_5$Bj1_transport1emiss <- waste_2_5$transport1_emiss * waste_2_5$Bj1_tech
waste_2_5$Bj1_processemiss <- waste_2_5$Prod_ww * waste_2_5$Bj1.h2.in * waste_2_5$H2_GHG * waste_2_5$Bj1_tech
waste_2_5$Bj1_transport2emiss <- (waste_2_5$Bj1_d / 42.79 + waste_2_5$Bj1_g / 41.74 + waste_2_5$Bj1_j / 43.10) * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel * waste_2_5$Diesel_GHG
waste_2_5$Bj1_enduseemiss <- waste_2_5$Prod_ww * waste_2_5$Nonbio_emiss2 * waste_2_5$Bj1_tech
waste_2_5$Bj1_dispemiss <- 0 - waste_2_5$Bj1_d *waste_2_5$Diesel_GHG - waste_2_5$Bj1_g* waste_2_5$Gasoline_GHG - waste_2_5$Bj1_j * waste_2_5$Jet_GHG
waste_2_5$Bj1_dispemissmain <- 0 - waste_2_5$Bj1_d *waste_2_5$Diesel_GHG
waste_2_5$Bj1_dispemissco <- 0 - waste_2_5$Bj1_g* waste_2_5$Gasoline_GHG - waste_2_5$Bj1_j * waste_2_5$Jet_GHG
waste_2_5$Bj1_netemiss <- waste_2_5$Bj1_collectionemiss + waste_2_5$Bj1_transport1emiss + waste_2_5$Bj1_processemiss + waste_2_5$Bj1_transport2emiss + waste_2_5$Bj1_enduseemiss + waste_2_5$Bj1_dispemiss
#Bj2
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_5$Bj2_energy <- waste_2_5$Prod_ww * waste_2_5$Bj2.j.out
waste_2_5$Bj2_energymain <- waste_2_5$Bj1_energy
waste_2_5$Bj2_netenergy <-(waste_2_5$Bj2_energy - waste_2_5$Prod_ww * waste_2_5$Bj2.h2.in -
(waste_2_5$collection_en + waste_2_5$transport1_en) -
waste_2_5$Bj2_energy / 43.10 * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel ) * waste_2_5$Bj2_tech
waste_2_5$Bj2_collectionemiss <- waste_2_5$collection_emiss * waste_2_5$Bj2_tech
waste_2_5$Bj2_transport1emiss <- waste_2_5$transport1_emiss * waste_2_5$Bj2_tech
waste_2_5$Bj2_processemiss <- waste_2_5$Prod_ww * waste_2_5$Bj2.h2.in * waste_2_5$H2_GHG
waste_2_5$Bj2_transport2emiss <- waste_2_5$Bj2_energy / 43.10 * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel * waste_2_5$Diesel_GHG
waste_2_5$Bj2_enduseemiss <- waste_2_5$Prod_ww * waste_2_5$Nonbio_emiss2 * waste_2_5$Bj2_tech
waste_2_5$Bj2_dispemiss <- 0 - waste_2_5$Bj2_energy * waste_2_5$Jet_GHG
waste_2_5$Bj2_dispemissmain <- waste_2_5$Bj2_dispemiss
waste_2_5$Bj2_netemiss <- waste_2_5$Bj2_collectionemiss + waste_2_5$Bj2_transport1emiss + waste_2_5$Bj2_processemiss + waste_2_5$Bj2_transport2emiss + waste_2_5$Bj2_enduseemiss + waste_2_5$Bj2_dispemiss
#Bj3
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_5$Bj3_d <- waste_2_5$Prod_ww * waste_2_5$Bj3.d.out
waste_2_5$Bj3_g <- waste_2_5$Prod_ww * waste_2_5$Bj3.g.out
waste_2_5$Bj3_j <- waste_2_5$Prod_ww * waste_2_5$Bj3.j.out
waste_2_5$Bj3_energy <- waste_2_5$Bj3_d + waste_2_5$Bj3_g + waste_2_5$Bj3_j
waste_2_5$Bj3_energymain <- waste_2_5$Bj3_j
waste_2_5$Bj3_energyco <- waste_2_5$Bj3_d + waste_2_5$Bj3_g
waste_2_5$Bj3_netenergy <-(waste_2_5$Bj3_energy - waste_2_5$Prod_ww * waste_2_5$Bj3.e.in -
(waste_2_5$collection_en + waste_2_5$transport1_en) -
(waste_2_5$Bj3_d / 42.79 + waste_2_5$Bj3_g / 41.74 +waste_2_5$Bj3_j / 43.10) * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel) * waste_2_5$Bj3_tech
waste_2_5$Bj3_collectionemiss <- waste_2_5$collection_emiss * waste_2_5$Bj3_tech
waste_2_5$Bj3_transport1emiss <- waste_2_5$transport1_emiss * waste_2_5$Bj3_tech
waste_2_5$Bj3_processemiss <- waste_2_5$Prod_ww * waste_2_5$Bj3.e.in * waste_2_5$Powergen_GHG * waste_2_5$Bj3_tech
waste_2_5$Bj3_transport2emiss <- (waste_2_5$Bj3_d / 42.79 + waste_2_5$Bj3_g / 41.74 + waste_2_5$Bj3_j / 43.10) * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel * waste_2_5$Diesel_GHG
waste_2_5$Bj3_enduseemiss <- waste_2_5$Prod_ww * waste_2_5$Nonbio_emiss2 * waste_2_5$Bj3_tech
waste_2_5$Bj3_dispemiss <- 0 - waste_2_5$Bj3_d *waste_2_5$Diesel_GHG - waste_2_5$Bj3_g* waste_2_5$Gasoline_GHG - waste_2_5$Bj3_j * waste_2_5$Jet_GHG
waste_2_5$Bj3_dispemissmain <- 0 - waste_2_5$Bj3_j * waste_2_5$Jet_GHG
waste_2_5$Bj3_dispemissco <- 0 - waste_2_5$Bj3_d * waste_2_5$Diesel_GHG - waste_2_5$Bj3_g * waste_2_5$Gasoline_GHG
waste_2_5$Bj3_netemiss <- waste_2_5$Bj3_collectionemiss + waste_2_5$Bj3_transport1emiss + waste_2_5$Bj3_processemiss + waste_2_5$Bj3_transport2emiss + waste_2_5$Bj3_enduseemiss + waste_2_5$Bj3_dispemiss
#Bj4
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_5$Bj4_d <- waste_2_5$Prod_ww * waste_2_5$Bj4.d.out
waste_2_5$Bj4_g <- waste_2_5$Prod_ww * waste_2_5$Bj4.g.out
waste_2_5$Bj4_j <- waste_2_5$Prod_ww * waste_2_5$Bj4.j.out
waste_2_5$Bj4_energy <- waste_2_5$Bj4_d + waste_2_5$Bj4_g + waste_2_5$Bj4_j
waste_2_5$Bj4_energymain <- waste_2_5$Bj4_j
waste_2_5$Bj4_energyco <- waste_2_5$Bj4_d + waste_2_5$Bj4_g
waste_2_5$Bj4_netenergy <-(waste_2_5$Bj4_energy - waste_2_5$Prod_ww * waste_2_5$Bj4.h2.in -
(waste_2_5$collection_en + waste_2_5$transport1_en) -
(waste_2_5$Bj4_d / 42.79 + waste_2_5$Bj4_g / 41.74 +waste_2_5$Bj4_j / 43.10) * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel) * waste_2_5$Bj4_tech
waste_2_5$Bj4_collectionemiss <- waste_2_5$collection_emiss * waste_2_5$Bj4_tech
waste_2_5$Bj4_transport1emiss <- waste_2_5$transport1_emiss * waste_2_5$Bj4_tech
waste_2_5$Bj4_processemiss <- waste_2_5$Prod_ww * waste_2_5$Bj4.h2.in * waste_2_5$H2_GHG * waste_2_5$Bj4_tech
waste_2_5$Bj4_transport2emiss <- (waste_2_5$Bj4_d / 42.79 + waste_2_5$Bj4_g / 41.74 + waste_2_5$Bj4_j / 43.10) * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel * waste_2_5$Diesel_GHG
waste_2_5$Bj4_enduseemiss <- waste_2_5$Prod_ww * waste_2_5$Nonbio_emiss2 * waste_2_5$Bj4_tech
waste_2_5$Bj4_dispemiss <- 0 - waste_2_5$Bj4_d *waste_2_5$Diesel_GHG - waste_2_5$Bj4_g* waste_2_5$Gasoline_GHG - waste_2_5$Bj4_j * waste_2_5$Jet_GHG
waste_2_5$Bj4_dispemissmain <- 0 - waste_2_5$Bj4_j * waste_2_5$Jet_GHG
waste_2_5$Bj4_dispemissco <- 0 - waste_2_5$Bj4_d * waste_2_5$Diesel_GHG - waste_2_5$Bj4_g * waste_2_5$Gasoline_GHG
waste_2_5$Bj4_netemiss <- waste_2_5$Bj4_collectionemiss + waste_2_5$Bj4_transport1emiss + waste_2_5$Bj4_processemiss + waste_2_5$Bj4_transport2emiss+ waste_2_5$Bj4_enduseemiss + waste_2_5$Bj4_dispemiss
#Bj5
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_5$Bj5_d <- waste_2_5$Prod_ww * waste_2_5$Bj5.d.out
waste_2_5$Bj5_g <- waste_2_5$Prod_ww * waste_2_5$Bj5.g.out
waste_2_5$Bj5_j <- waste_2_5$Prod_ww * waste_2_5$Bj5.j.out
waste_2_5$Bj5_energy <- waste_2_5$Bj5_d + waste_2_5$Bj5_g + waste_2_5$Bj5_j
waste_2_5$Bj5_energymain <- waste_2_5$Bj5_j
waste_2_5$Bj5_energyco <- waste_2_5$Bj5_d + waste_2_5$Bj5_g
waste_2_5$Bj5_netenergy <-(waste_2_5$Bj5_energy - waste_2_5$Prod_ww * waste_2_5$Bj5.e.in -
(waste_2_5$collection_en + waste_2_5$transport1_en) -
(waste_2_5$Bj5_d / 42.79 + waste_2_5$Bj5_g / 41.74 +waste_2_5$Bj5_j / 43.10) * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel) * waste_2_5$Bj5_tech
waste_2_5$Bj5_collectionemiss <- waste_2_5$collection_emiss * waste_2_5$Bj5_tech
waste_2_5$Bj5_transport1emiss <- waste_2_5$transport1_emiss * waste_2_5$Bj5_tech
waste_2_5$Bj5_processemiss <- waste_2_5$Prod_ww * waste_2_5$Bj5.e.in * waste_2_5$Powergen_GHG * waste_2_5$Bj5_tech
waste_2_5$Bj5_transport2emiss <- (waste_2_5$Bj5_d / 42.79 + waste_2_5$Bj5_g / 41.74 + waste_2_5$Bj5_j / 43.10) * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel * waste_2_5$Diesel_GHG
waste_2_5$Bj5_enduseemiss <- waste_2_5$Prod_ww * waste_2_5$Nonbio_emiss2 * waste_2_5$Bj5_tech
waste_2_5$Bj5_dispemiss <- 0 - waste_2_5$Bj5_d *waste_2_5$Diesel_GHG - waste_2_5$Bj5_g* waste_2_5$Gasoline_GHG - waste_2_5$Bj5_j * waste_2_5$Jet_GHG
waste_2_5$Bj5_dispemissmain <- 0 - waste_2_5$Bj5_j * waste_2_5$Jet_GHG
waste_2_5$Bj5_dispemissco <- 0 - waste_2_5$Bj5_d * waste_2_5$Diesel_GHG - waste_2_5$Bj5_g * waste_2_5$Gasoline_GHG
waste_2_5$Bj5_netemiss <- waste_2_5$Bj5_collectionemiss + waste_2_5$Bj5_transport1emiss + waste_2_5$Bj5_processemiss + waste_2_5$Bj5_transport2emiss + waste_2_5$Bj5_enduseemiss + waste_2_5$Bj5_dispemiss
#Bj6
#energy intensity of diesel - 42.79 MJ/kg, gasoline - 41.74, jet - 43.10
waste_2_5$Bj6_d <- waste_2_5$Prod_ww * waste_2_5$Bj6.d.out
waste_2_5$Bj6_g <- waste_2_5$Prod_ww * waste_2_5$Bj6.g.out
waste_2_5$Bj6_j <- waste_2_5$Prod_ww * waste_2_5$Bj6.j.out
waste_2_5$Bj6_energy <- waste_2_5$Bj6_d + waste_2_5$Bj6_g + waste_2_5$Bj6_j
waste_2_5$Bj6_energymain <- waste_2_5$Bj6_j
waste_2_5$Bj6_energyco <- waste_2_5$Bj6_d + waste_2_5$Bj6_g
waste_2_5$Bj6_netenergy <-(waste_2_5$Bj6_energy - waste_2_5$Prod_ww * waste_2_5$Bj6.h2.in - waste_2_5$Prod_ww * waste_2_5$Bj6.e.in -
(waste_2_5$collection_en + waste_2_5$transport1_en) -
(waste_2_5$Bj6_d / 42.79 + waste_2_5$Bj6_g / 41.74 +waste_2_5$Bj6_j / 43.10) * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel) * waste_2_5$Bj6_tech
waste_2_5$Bj6_collectionemiss <- waste_2_5$collection_emiss * waste_2_5$Bj6_tech
waste_2_5$Bj6_transport1emiss <- waste_2_5$transport1_emiss * waste_2_5$Bj6_tech
waste_2_5$Bj6_processemiss <- waste_2_5$Prod_ww * (waste_2_5$Bj6.h2.in * waste_2_5$H2_GHG + waste_2_5$Bj6.e.in * waste_2_5$Powergen_GHG) * waste_2_5$Bj6_tech
waste_2_5$Bj6_transport2emiss <- (waste_2_5$Bj6_d / 42.79 + waste_2_5$Bj6_g / 41.74 + waste_2_5$Bj6_j / 43.10) * waste_2_5$Transport_km_2 * waste_2_5$Transport_Diesel * waste_2_5$Diesel_GHG
waste_2_5$Bj6_enduseemiss <- waste_2_5$Prod_ww * waste_2_5$Nonbio_emiss2 * waste_2_5$Bj6_tech
waste_2_5$Bj6_dispemiss <- 0 - waste_2_5$Bj6_d *waste_2_5$Diesel_GHG - waste_2_5$Bj6_g* waste_2_5$Gasoline_GHG - waste_2_5$Bj6_j * waste_2_5$Jet_GHG
waste_2_5$Bj6_dispemissmain <- 0 - waste_2_5$Bj6_j * waste_2_5$Jet_GHG
waste_2_5$Bj6_dispemissco <- 0 - waste_2_5$Bj6_d * waste_2_5$Diesel_GHG - waste_2_5$Bj6_g * waste_2_5$Gasoline_GHG
waste_2_5$Bj6_netemiss <- waste_2_5$Bj6_collectionemiss + waste_2_5$Bj6_transport1emiss + waste_2_5$Bj6_processemiss + waste_2_5$Bj6_transport2emiss + waste_2_5$Bj6_enduseemiss + waste_2_5$Bj6_dispemiss
##extracting net emissions
waste_sen4 <- subset(waste_2_3, select = c(Waste_type, Prod_ww, E1_netemiss,E2_netemiss,E3_netemiss,E4_netemiss,M1_netemiss,
M2_netemiss,Eth1_netemiss, Rd1_netemiss,Rd2_netemiss,Bj1_netemiss,Bj2_netemiss,
Bj3_netemiss,Bj4_netemiss,Bj5_netemiss, Bj6_netemiss))
waste_sen4_1 <- aggregate(.~Waste_type, waste_sen4, sum)
waste_sen4_1 <- gather(waste_sen4_1, category, emiss_kg, E1_netemiss:Bj6_netemiss)
waste_sen4_1$category <- gsub("emiss", "", waste_sen4_1$category)
waste_sen4_1 <- separate(data = waste_sen4_1, col = category, into = c("Tech", "Stage"), sep = "\\_")
waste_sen4_1$emiss_MT_per <- waste_sen4_1$emiss_kg / waste_sen4_1$Prod_ww / 1000
waste_sen4_1$Tech <- factor(waste_sen4_1$Tech,
levels = c("E1", "E2","E3","E4", "M1", "M2", "Eth1", "Rd1", "Rd2",
"Bj1", "Bj2", "Bj3", "Bj4", "Bj5", "Bj6"))
waste_sen4_1$TD <- "25km"
waste_sen5 <- subset(waste_2_4, select = c(Waste_type, Prod_ww, E1_netemiss,E2_netemiss,E3_netemiss,E4_netemiss,M1_netemiss,
M2_netemiss,Eth1_netemiss, Rd1_netemiss,Rd2_netemiss,Bj1_netemiss,Bj2_netemiss,
Bj3_netemiss,Bj4_netemiss,Bj5_netemiss, Bj6_netemiss))
waste_sen5_1 <- aggregate(.~Waste_type, waste_sen5, sum)
waste_sen5_1 <- gather(waste_sen5_1, category, emiss_kg, E1_netemiss:Bj6_netemiss)
waste_sen5_1$category <- gsub("emiss", "", waste_sen5_1$category)
waste_sen5_1 <- separate(data = waste_sen5_1, col = category, into = c("Tech", "Stage"), sep = "\\_")
waste_sen5_1$emiss_MT_per <- waste_sen5_1$emiss_kg / waste_sen5_1$Prod_ww / 1000
waste_sen5_1$Tech <- factor(waste_sen5_1$Tech,
levels = c("E1", "E2","E3","E4", "M1", "M2", "Eth1", "Rd1", "Rd2",
"Bj1", "Bj2", "Bj3", "Bj4", "Bj5", "Bj6"))
waste_sen5_1$TD <- "50km"
waste_sen6 <- subset(waste_2_5, select = c(Waste_type, Prod_ww, E1_netemiss,E2_netemiss,E3_netemiss,E4_netemiss,M1_netemiss,
M2_netemiss,Eth1_netemiss, Rd1_netemiss,Rd2_netemiss,Bj1_netemiss,Bj2_netemiss,
Bj3_netemiss,Bj4_netemiss,Bj5_netemiss, Bj6_netemiss))
waste_sen6_1 <- aggregate(.~Waste_type, waste_sen6, sum)
waste_sen6_1 <- gather(waste_sen6_1, category, emiss_kg, E1_netemiss:Bj6_netemiss)
waste_sen6_1$category <- gsub("emiss", "", waste_sen6_1$category)
waste_sen6_1 <- separate(data = waste_sen6_1, col = category, into = c("Tech", "Stage"), sep = "\\_")
waste_sen6_1$emiss_MT_per <- waste_sen6_1$emiss_kg / waste_sen6_1$Prod_ww / 1000
waste_sen6_1$Tech <- factor(waste_sen6_1$Tech,
levels = c("E1", "E2","E3","E4", "M1", "M2", "Eth1", "Rd1", "Rd2",
"Bj1", "Bj2", "Bj3", "Bj4", "Bj5", "Bj6"))
waste_sen6_1$TD <- "100km"
waste_sen7 <- subset(waste_2, select = c(Waste_type, Prod_ww, E1_netemiss,E2_netemiss,E3_netemiss,E4_netemiss,M1_netemiss,
M2_netemiss,Eth1_netemiss, Rd1_netemiss,Rd2_netemiss,Bj1_netemiss,Bj2_netemiss,
Bj3_netemiss,Bj4_netemiss,Bj5_netemiss, Bj6_netemiss))
waste_sen7_1 <- aggregate(.~Waste_type, waste_sen7, sum)
waste_sen7_1 <- gather(waste_sen7_1, category, emiss_kg, E1_netemiss:Bj6_netemiss)
waste_sen7_1$category <- gsub("emiss", "", waste_sen7_1$category)
waste_sen7_1 <- separate(data = waste_sen7_1, col = category, into = c("Tech", "Stage"), sep = "\\_")
waste_sen7_1$emiss_MT_per <- waste_sen7_1$emiss_kg / waste_sen7_1$Prod_ww / 1000
waste_sen7_1$Tech <- factor(waste_sen7_1$Tech,
levels = c("E1", "E2","E3","E4", "M1", "M2", "Eth1", "Rd1", "Rd2",
"Bj1", "Bj2", "Bj3", "Bj4", "Bj5", "Bj6"))
waste_sen7_1$TD <- "150km"
waste_sen_td <- rbind(waste_sen4_1[ , -which(names(waste_sen1_1) == "Stage")], waste_sen5_1[ , -which(names(waste_sen2_1) == "Stage")],
waste_sen6_1[ , -which(names(waste_sen3_1) == "Stage")], waste_sen7_1[ , -which(names(waste_sen4_1) == "Stage")])
waste_sen_td$TD <- factor(waste_sen_td$TD,
levels = c("25km", "50km", "100km", "150km"))
#sensitivity chart
colors_power <- c("25km" = "#6BAED6",
"50km" = "#4292C6",
"100km" = "#2171B5",
"150km" = "#08519C")
p <- ggplot()+
geom_bar(data = waste_sen_td[which(waste_sen_td$emiss_MT_per != 0),],
aes(x=Tech, y=emiss_MT_per, fill=TD), stat="identity", position = "dodge") +
geom_hline(yintercept=0, size=0.05)+
theme_bw() +
theme(text = element_text(size=20)) +
scale_fill_manual(values=colors_power) +
guides(fill = guide_legend(title = "", label.theme = element_text(size = 20, angle = 0))) +
scale_y_continuous(name="Metric tonne CO2e/tonne ww", limits = c(-1.5, 1)) +
labs(x = '') +
theme(legend.position="top",
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.3),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
facet_grid(~Waste_type, scales = "free_x", space = "free_x")+
theme(strip.background =element_rect(fill="white"), strip.text = element_text(size = 20, face = "bold"))
# ggtitle("Net GHG emissions - sensitivity analysis on transportation distance") +
# theme(plot.title = element_text(face = "bold", size = 24, hjust = 0))
ggsave(paste(County_FOLDER, "/US_netemiss_SA_TD.png", sep=""), plot=p, width=20,height=6,units="in",dpi=300)
|
b384fbf4232b55f5be0d24fcf09a528be940db6d
|
645e6f2cf029b8ed0a497d7bd19c443b7c54ba8d
|
/Locations_App/ui.R
|
bce35270455ca5f5952c91f22945f3c56a0141a6
|
[] |
no_license
|
ismorozov/minor_project
|
9e4078dafada41530e623b7ed5720360fcd1c61c
|
14326efacc15ba72c2b8b2d252ea90d09dc74ae0
|
refs/heads/master
| 2021-01-19T13:03:26.055701
| 2017-05-23T18:42:00
| 2017-05-23T18:42:00
| 82,376,201
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 888
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Load the ggplot2 package which provides
# the 'mpg' dataset.
library(ggplot2)
fluidPage(
leafletOutput("mymap"),
titlePanel("Basic DataTable"),
# Create a new Row in the UI for selectInputs
fluidRow(
column(4,
selectInput("f1",
"Film:",
c("All",
unique(film_df$FilmName)))
) ,
column(4,
selectInput("f2",
"Location:",
c("All",
unique(film_df$Location)))
)
),
# Create a new row for the table.
fluidRow(
DT::dataTableOutput("table")
)
)
|
acfc2c72dacee5eaac491272d22e59a21aba2b68
|
1e732ce310d34a91d011ec609769fccbf9fb1160
|
/cachematrix.R
|
dcf8011f4696950ed6924055d03cc845feb4d89c
|
[] |
no_license
|
jamesasselin/ProgrammingAssignment2
|
0755e2f5f233aea1f13e2582f47f557cefb22261
|
e1042c626c8967efcc526dfa2c3d63126a685e63
|
refs/heads/master
| 2021-01-17T21:51:20.032987
| 2016-06-07T04:18:40
| 2016-06-07T04:18:40
| 60,581,454
| 0
| 0
| null | 2016-06-07T04:12:06
| 2016-06-07T04:12:06
| null |
UTF-8
|
R
| false
| false
| 876
|
r
|
cachematrix.R
|
## Functions work to calculate the inverse of matricies
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set<- function(y){
x <<- y
i <<-NULL
}
get <- function() x
seti <- function(ii) i <<- ii
geti <- function() ii
list(set=set, get=get, seti = seti, geti=geti)
}
## This function computes the inverse of the special "matrix" returned
#by makeCacheMatrix above. If the inverse has already been calculated
#(and the matrix has not changed), then cacheSolve should retrieve the
#inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$geti()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data <- x$get
i <- solve(data)
x$seti(ii)
i
}
|
e9254c76937dfd9823e1bdccdca4f0e0ddbb9a5e
|
68fe900eda00733f3a20312556dbf7e246b277ed
|
/Introduction to linear regression/Ex7_20.R
|
969f951a15108a50a7510709a165ff54880f4d74
|
[] |
no_license
|
thesaltree/TBC_R
|
299e6f697294c1ddd51f128cfc46627a9ee4a178
|
e3f35a7209adf77f1eb75421e5f2e80b105ade83
|
refs/heads/master
| 2021-07-14T08:40:57.482143
| 2019-10-15T20:58:40
| 2019-10-15T20:58:40
| 215,079,409
| 0
| 1
| null | 2023-01-02T18:37:52
| 2019-10-14T15:29:37
|
R
|
UTF-8
|
R
| false
| false
| 280
|
r
|
Ex7_20.R
|
##Section: 7.2.5 Page No.:346
##from the least-square line representation: aid=24.32758-0.0431*family-income
family_income_unit=1000000/1000
intercept=24.32758
b1=-0.0431
aid=intercept+b1*family_income_unit
aid
cat("The aid of another freshman student is",aid*1000,"$")
|
93b3747d173a9de30236717f1be0d61f9fd6b582
|
6d8572fb50a9ba39e6372ff0de70aac877d50ec7
|
/R/plot_pc_simple_complex.R
|
5ab7c2e6ced16d394c9d27dad647bef71ac4b5ee
|
[] |
no_license
|
erikerhardt/isogasex
|
aed346bf689f28dce3d8500dc799e80b7354c037
|
2e3fc9c21c1d3d8e2348b7bff28954b5a169b0e8
|
refs/heads/master
| 2020-05-22T00:32:30.670300
| 2019-07-16T04:43:20
| 2019-07-16T04:43:20
| 186,173,267
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,086
|
r
|
plot_pc_simple_complex.R
|
#' Plot pc, simple and complex
#'
#' Plots the input variables.
#'
#' @param chamber_Totalpc_using_simple_Delta_for_gm xxxPARAMxxx
#' @param chamber_Totalpc_using_simple_Delta_for_modeling xxxPARAMxxx
#' @param chamber_Totalpc_using_complex_Delta_no_decarboxylation xxxPARAMxxx
#' @param chamber_Totalpc_using_complex_Delta_full_model xxxPARAMxxx
#' @param x_time xxxPARAMxxx
#' @param plot_format_list xxxPARAMxxx
#' @param output_fn_prefix xxxPARAMxxx
#'
#' @return NULL xxxRETURNxxx
#' @importFrom graphics par
#'
plot_pc_simple_complex <-
function# Plot pc, simple and complex
###
(chamber_Totalpc_using_simple_Delta_for_gm
###
, chamber_Totalpc_using_simple_Delta_for_modeling
###
, chamber_Totalpc_using_complex_Delta_no_decarboxylation
###
, chamber_Totalpc_using_complex_Delta_full_model
###
, x_time
###
, plot_format_list
###
, output_fn_prefix
###
)
{
##details<<
## Plots the input variables.
for (i_plot in plot_format_list)
{
plot_filename <- "plot_pc_simple_complex";
s_plot_settings_begin_end(output_fn_prefix, plot_filename, plot_mode = "begin", plot_format = i_plot);
graphics::par(mfrow=c(4,1), mar=c(4,4,2,2), oma=c(1,1,1,1)); # mar allows the histograms to touch top-bottom c(bot,lef,top,rig)
plot_not_na(x_time, chamber_Totalpc_using_simple_Delta_for_gm , pch=20, type="l", ylab="", main="Total pc using simple D for gm, includes boundary layer");
plot_not_na(x_time, chamber_Totalpc_using_simple_Delta_for_modeling , pch=20, type="l", ylab="", main="Total pc using simple D for modeling");
plot_not_na(x_time, chamber_Totalpc_using_complex_Delta_no_decarboxylation, pch=20, type="l", ylab="", main="Total pc using complex D, no decarboxylation");
plot_not_na(x_time, chamber_Totalpc_using_complex_Delta_full_model , pch=20, type="l", ylab="", main="Total pc using complex D, full model");
#axis(3); axis(4); # add axis labels to top and right sides
s_plot_settings_begin_end(output_fn_prefix, plot_filename, plot_mode = "end", i_plot);
} # plotting loop
invisible(NULL);
### NULL
}
|
c13f1b6c7de1e77b0cfa21a5be2dde0ba073f168
|
fa1d5ee8e2c1d497ee6a20bb0a00d6224bfc8d71
|
/Week4/plot2.R
|
428976a41fa74b6e899abd8a5661a82e75a6214d
|
[] |
no_license
|
remembrance1/Coursera_Exploratory-Data-Analysis
|
57a4c80c70492c10e92c4f7812aac40edf25d398
|
b4f16038a26c81e15c45690915f573b4b117a858
|
refs/heads/master
| 2020-03-24T04:02:58.228211
| 2018-08-04T04:38:36
| 2018-08-04T04:38:36
| 142,442,218
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 446
|
r
|
plot2.R
|
#Have total emissions from PM2.5 decreased in the Baltimore City,
#Maryland (fips == "24510"\color{red}{\verb|fips == "24510"|}fips=="24510") from 1999 to 2008?
NEIsub <- subset(NEI, fips == "24510") #baltimore
totals1 <- aggregate(Emissions ~ year, NEIsub, sum) #summing emissions by SCC and year
barplot(totals1$Emissions, ylab = "PM2.5 Emissions", xlab = "Year", names = totals1$year,
main = "PM2.5 Emission Totals (Baltimore City)")
|
5f15af23b31d6876189ed28992323442af1af7d0
|
465612160cef5a59e19057c282b10b1e23a15357
|
/scripts/evictions_boundaries.R
|
cf58fd2fa590f1fc47ff98ea036b40b2c0e6299d
|
[] |
no_license
|
amykouch/sc-evictions
|
7d9a2e35990066fe01cb16ffa25be0c5e9471c9c
|
cf450a62f9d1dfa4d0d7476765143649b0d20134
|
refs/heads/master
| 2020-12-23T07:28:24.512694
| 2020-01-31T01:46:45
| 2020-01-31T01:46:45
| 237,083,537
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 767
|
r
|
evictions_boundaries.R
|
# Description
# Joining cities evictions and boundaries data in South Carolina
# Author: Amy Kouch
# Version: 2020-01-30
# Libraries
library(tidyverse)
library(vroom)
library(utils)
library(sf)
# Parameters
# Input files from data
file_evictions <- here::here("data/evictions.rds")
file_boundaries <- here::here("data/boundaries.rds")
# Output files from data
file_aggregate_rds <- here::here("data/evictions_boundaries.rds")
#===============================================================================
evictions <- read_rds(file_evictions)
boundaries <- read_rds(file_boundaries)
evictions %>%
left_join(boundaries, by = c("geoid" = "city_id")) %>%
select(geoid, year, name, evictions, geometry) %>%
write_rds(file_aggregate_rds, compress = "gz")
|
5bf0ffc26f173be3a4daa25b6427359be00e225f
|
87115a53263db358e2b3e3bea6651763b3728624
|
/heatmap/cluster03.R
|
0ef0b922ac6a146a3cf1549f75efc710cf9e01a7
|
[] |
no_license
|
liuxm0239/demo_R
|
e762f841053af972d99792b9a8c20d46311ee754
|
79814540b9bf85fa52c7cfa398bd2aab6417bfb4
|
refs/heads/master
| 2020-03-20T21:40:19.751634
| 2018-08-29T01:12:06
| 2018-08-29T01:12:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 685
|
r
|
cluster03.R
|
#Usage: cat cluster02.R | R --slave --args <output prefix> < file.lst >
library("gplots")
args <- commandArgs(T)
a<-read.table(args[1],header=T, row.names=1, sep="\t")
#a<-read.table(args[5],header=T, sep="\t")
#a<-a[,-1]
#a<-a[,-1]
b<-t(a)
data<-as.matrix(b)
colbar<-c(rep("green",50),rep("yellow",50))
pdf(paste(args[2],".pdf",sep = ""))
heatmap.2(data,
#col= colorRampPalette(c("navy" ,"white" , "firebrick3"))(100),revC=FALSE,
col= colorRampPalette(c("seashell", "navy", "firebrick3"))(100),revC=FALSE,
Rowv=T, Colv=T,
scale="none",key=TRUE,symkey=FALSE,trace="none", labRow=NULL, labCol = NULL,
density.info="none",cexRow=0.5,cexCol=0.7,keysize=1.2)
dev.off()
|
144d38dee125a0470a31a1679f6ada65ae368b11
|
a73f54821b6cee9f343e053b775ce5f6d6ac19a5
|
/analyses/ELgraphs.R
|
555e28eb69a408209910e5a82222981a0ad58045
|
[] |
no_license
|
lizzieinvancouver/heattolerance
|
94d6b6280ce7076ffbb66b18e9ef1ac2d5d43908
|
6847234de9b498cafd68636f95b9d06a9b6f6738
|
refs/heads/master
| 2021-07-11T19:23:33.346418
| 2020-10-16T18:54:25
| 2020-10-16T18:54:25
| 73,869,765
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,897
|
r
|
ELgraphs.R
|
## Started 27 January 2017 ##
## By Nicole ##
##EL stage##
## general housekeeping ##
rm(list=ls())
options(stringsAsFactors = FALSE)
## libraries
library(ggplot2)
require(plyr); require(dplyr); require(tidyr)
# Set working directory:
if(length(grep("Lizzie", getwd())>0)) { setwd("~/Documents/git/projects/vinmisc/heattolerance/analyses")
} else
setwd("/Users/Nicole/Desktop/Wolkovich/analysis/")
# get one f(x)
source("source/estimatephen.R") # more notes in this file, check it out!
## get data
nodespurdater <- read.csv("input/baselinespursize.csv", header=TRUE)
dater <- read.csv("input/phenmoist_grapes2016.csv", header=TRUE)
ids <- read.csv("input/VitisExpReps2.csv", header=TRUE)
## change header names here
names(ids)[names(ids)=="Row"] <- "RowNum"
names(ids)[names(ids)=="Plant"] <- "Num"
names(ids)[names(ids)=="Variety"] <- "Var"
ids.sm <- subset(ids, select=c("RowNum", "Num", "Var"))
## join dfs
dats <- join(dater, ids.sm, by=c("RowNum", "Num"))
nsdats <- join(nodespurdater, ids.sm, by=c("RowNum", "Num"))
## format date (see http://www.statmethods.net/input/dates.html)
dats$Date <- as.Date(dater$Date, format="%m/%d/%Y")
dats$days <- as.numeric(format(dats$Date, "%j"))-228 # 245 is around the start of September
##get averages
dats$EL_mean <- rowMeans(dats[,6:7], na.rm=TRUE)
dats$sm_mean <- rowMeans(dats[,8:10], na.rm=TRUE)
## fix this
nsdats$diam_mean <- rowMeans(nsdats[,10:11], na.rm=TRUE)
nsdats$ndsz_mean <- rowMeans(nsdats[6:9], na.rm=TRUE)
## histogram soil moisture
ggplot(dats, aes(sm_mean), na.rm=TRUE) + geom_histogram(breaks=seq(0, 35, by = 1))
## categorize by soil moisture
dats <- within(dats, {
sm_cat <- NA
sm_cat[sm_mean<=11.2] <- "driest"
sm_cat[sm_mean>11.2 & sm_mean<=13.1] <- "dry"
sm_cat[sm_mean>13.1 & sm_mean<=15.1] <- "moist"
sm_cat[sm_mean>15.1] <- "very moist"
})
##histogram diam_mean
ggplot(nsdats, aes(diam_mean), na.rm=TRUE) + geom_histogram(breaks=seq(0, 12, by = 1))
##categorize by spur diameter
nsdats <- within(nsdats, {
diam_cat <- NA
diam_cat[diam_mean<3] <- "1.00-3.00cm"
diam_cat[diam_mean>3 & diam_mean<=5] <- "3.00-5.00cm"
diam_cat[diam_mean>5 & diam_mean<=7] <- "5.00-7.00cm"
diam_cat[diam_mean>7 & diam_mean<=9] <- "7.00-9.00cm"
diam_cat[diam_mean>9] <- "9.00-11.00cm"
})
##histogram node size
ggplot(nsdats, aes(ndsz_mean), na.rm=TRUE) + geom_histogram(breaks=seq(0, 8, by = 1))
##categorize node size
nsdats <- within(nsdats, {
ndsz_cat <- NA
ndsz_cat[ndsz_mean<3] <- "1.00-3.00cm"
ndsz_cat[ndsz_mean>3 & ndsz_mean<=4] <- "3.00-4.00cm"
ndsz_cat[ndsz_mean>4 & ndsz_mean<=5] <- "4.00-5.00cm"
ndsz_cat[ndsz_mean>5] <- "5.00-6.00cm"
})
##subset nsdats
ns <- subset(nsdats, select=c("RowNumNumRep", "diam_cat", "ndsz_cat"))
## join ns and dats
ds <- join(dats, ns, by=c("RowNumNumRep"))
ds.sm <- subset(ds, select=c(
"Date",
"RowNumNumRep",
"Var",
"days",
"EL_mean",
"sm_cat",
"diam_cat",
"ndsz_cat"))
ds.om <- na.omit(ds.sm)
ggplot(ds.om, aes(days, EL_mean, color=Var, group=RowNumNumRep)) + geom_point(aes(size=sm_cat), shape=1) + geom_line() + labs(x = "Time (days)", y = "EL Stage")
ggplot(ds.om, aes(days, EL_mean, color=Var, group=RowNumNumRep)) + geom_point(shape=1) + geom_line(aes(linetype=ndsz_cat)) + labs(x = "Time (days)", y = "EL Stage")
ggplot(ds.om, aes(days, EL_mean, color=Var, group=RowNumNumRep)) + geom_point(shape=1) + geom_line(aes(linetype=diam_cat)) + labs(x = "Time (days)", y = "EL Stage")
# alternative version of one of the above plots
ggplot(ds.om, aes(days, EL_mean, color=sm_cat)) +
geom_point() +
facet_wrap(~Var) +
geom_line() + labs(x = "Time (days)", y = "EL Stage")
#################################
## Estimating dates of events! ##
#################################
##
## Working on getting estimates of day each ind reached a certain stage
bbday <- get_pheno_est(ds.om, "budburst", 5, NA) # should double-check what we call budburst with EL paper
# hmm, some weird Tempranillo data; how can EL go from 10 to 0?
weird <- dat[which(dat$RowNumNumRep=="35.3.R5"),]
# exclude for now
bbday <- subset(bbday, RowNumNumRep != "35.3.R5")
## get mean bbday by var (this is not the most efficient way!)
# and SD and SE (std deviation and error, respectively)
bb.mean <-
ddply(bbday, c("Var"), summarise,
mean.bb = mean(days),
sd = sd(days),
sem = sd(days)/sqrt(length(days)))
# now merge this back in
bbday.full <- merge(bb.mean, bbday, by="Var", all.y=TRUE)
# We should figure out how to order this plot by the bbday of each var ... figure out how to suppress the legend
pdf(file="graphs/bbday_byvar.pdf")
ggplot(bbday, aes(days, Var, color=Var)) +
geom_point()
dev.off()
# Colors not the best ... need to change to rainbow or heat or such, I think
ggplot(bbday.full, aes(days, Var, color=mean.bb)) +
geom_point()
|
138b5fd3142df88ecaa79957c906f7fa7c85b299
|
5ca8793fd39a818675306047c861e8c32965022a
|
/website/Old_source/Function/man/index2CountryFeature.Rd
|
9491f8d9c40c86d4f4f95b76f751f178a858725a
|
[] |
no_license
|
SOCR/TCIU
|
a0dfac068670fa63703b8e9a48236883ec167e06
|
85076ae775a32d89676679cfa6050e683da44d1d
|
refs/heads/master
| 2023-03-09T01:28:28.366831
| 2023-02-27T20:27:26
| 2023-02-27T20:27:26
| 188,899,192
| 8
| 5
| null | 2022-11-12T01:56:25
| 2019-05-27T19:33:38
|
R
|
UTF-8
|
R
| false
| true
| 744
|
rd
|
index2CountryFeature.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/index2CountryFeature.R
\name{index2CountryFeature}
\alias{index2CountryFeature}
\title{index2CountryFeature}
\usage{
index2CountryFeature(indx = 1)
}
\arguments{
\item{indx}{Indicate the feature you wish to see after transfering to 1D index}
}
\value{
return the original country and feature code of the feature you wish to show
}
\description{
Convert index to country name and feature code
}
\details{
This function maps to convert between 1D indices
\strong{Notice:} This function mainly serves as a functional part for other functions. So we seldomly
apply this function by itself.
}
\author{
SOCR team <http://socr.umich.edu/people/>
}
|
0a9e3ed9d09dcb86053382975dd7b9621f5b0bd9
|
940047d7eb38f464e0ae201a57dbf0bf49aef839
|
/042_Gompertz.R
|
8cbe8baab34ab647a09880cf842ced61e3a66eb6
|
[] |
no_license
|
mvoigt87/ICP2017_SU1
|
da7d8717548f580f823c8b7d2fbf1d2d44316c53
|
18c0379f7a1653077dc7a97c5241e148764be333
|
refs/heads/master
| 2021-01-01T04:47:22.963892
| 2018-08-24T12:07:21
| 2018-08-24T12:07:21
| 97,245,349
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,705
|
r
|
042_Gompertz.R
|
### Playing with the Gompertz model
#### Fit Gompertz model with log income
## 1) Parametric survival model - log income
GOMP.MALE.A.C <- flexsurvreg(Surv(time=entry.age.r,
time2=exit.age,
event=event) ~ log(hhincome), data = subset(pen.coupl, SEXO="male"),
dist = "gompertz")
GOMP.MALE.A.C
## 2) Parametric survival model - log income
GOMP.FEMALE.A.C <- flexsurvreg(Surv(time=entry.age.r,
time2=exit.age,
event=event) ~ log(hhincome), data = subset(pen.coupl, SEXO="female"),
dist = "gompertz")
GOMP.FEMALE.A.C
# survival curve
plot(GOMP.MALE.A.C, xlim=c(65,100))
lines(GOMP.FEMALE.A.C)
# hazard
plot(GOMP.MALE.A.C, xlim=c(65,100),ylim=c(-0.1,1),type = "hazard")
##### FULL MODELS
## 3d) Parametric survival model with log(income) - there seem to be some kind of problem with the log income
GOMP.FEMALE.C.C <- flexsurvreg(Surv(time=entry.age.r,
time2=exit.age,
event=event) ~ log(hhincome) + ESREAL5 + mobil + HousReg + p.surv + DIS + FNAC +
DIS_p + ESREAL5_p + hijo + bw, data = subset(pen.coupl, SEXO="female"),
dist = "gompertz")
GOMP.FEMALE.C.C
### For Males - model fit
GOMP.MALE.A.C
# estimated shape and scale
a <-
shape <- 0.1308294
rate <- 0.00000731
# vector of quantiles
time<-seq(65,100,0.1)
### Base survival - Gompertz function (http://www.statsathome.com/2017/06/07/fitting-non-linear-groth-curves-in-r/)
gompertz <- function(time, a, mu, lambda){
y <- a*exp(-exp(mu*exp(1)/a*(lambda-time)+1))
return(data.frame(time=time, y=y))
}
fit <- gompertz(time = time, a = 2 ,mu = shape ,lambda = rate)
plot(fit)
# Gompertz survival function
Gomp.Surv <- function(time,a=0,b=0,c=0){
y(t) <- a*exp(-b*exp(-c*t))
return(y)
}
fit <- Gomp.Mod(t=pen.coupl$exit.age, b= 0.131,c=0.000007)
### model fit
fit.gompertz <- function(data, time){
d <- data.frame(y=data, t=time)
# Must have at least 3 datapoints at different times
if (length(unique(d$t)) < 3) stop("too few data points to fit curve")
# Pick starting values ###
i <- which.max(diff(d$y))
starting.values <- c(a=max(d$y),
mu=max(diff(d$y))/(d[i+1,"t"]-d[i, "t"]),
lambda=i)
print("Starting Values for Optimization: ")
print(starting.values)
##########################
formula.gompertz <- "y~a*exp(-exp(mu*exp(1)/a*(lambda-t)+1))"
nls(formula.gompertz, d, starting.values)
}
|
f9e45ff674a41e6b1537991eeeea9238c4485d40
|
965fd77fcb7c7bcc179af685c6f8bec101f33fc1
|
/man/heightMatcher.Rd
|
3a1fb6d87df4d29ab5c9db3ecd868a3aeb560a30
|
[] |
no_license
|
DrRoad/spsComps
|
f4df3e9b175b020a9f2214fc82b61a4c8e455165
|
39bcbb7f97b4b886b262aa8bee7f2a8818699307
|
refs/heads/master
| 2023-03-20T11:24:44.053001
| 2021-03-12T08:07:33
| 2021-03-12T08:07:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,657
|
rd
|
heightMatcher.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spsUIcollections.R
\name{heightMatcher}
\alias{heightMatcher}
\title{Match height of one element to the other element}
\usage{
heightMatcher(div1, div2, isID = TRUE)
}
\arguments{
\item{div1}{element ID, or jquery selector if \code{isID = FALSE}. The first element
that you want to match the height to the other element}
\item{div2}{matched element ID or selector, the other element}
\item{isID}{bool, if \code{TRUE}, \code{div1} and \code{div2} will be treated as ID, otherwise
you can use complex jquery selector}
}
\value{
will be run as javascript
}
\description{
Match the height of one element to the second element.
If the height of second element change, the height of first element will change
automatically
}
\examples{
if(interactive()){
library(shiny)
library(shinyjqui)
ui <- fluidPage(
column(
3, id = "a",
style = "border: 1px black solid; background-color: gray;",
p("This block's height is matched with orange one")
),
shinyjqui::jqui_resizable(column(
2, id ="b",
style = "border: 1px black solid; background-color: orange;",
p("drag the bottom-right corner")
)),
column(
3, id = "c",
style = "border: 1px black solid; background-color: red;",
p("This block's is not matched with others")
),
heightMatcher("a", "b")
)
server <- function(input, output, session) {
}
# Try to drag `b` from bottom right corner and see what happens to `a`
shinyApp(ui, server)
}
}
|
3bb3690069403a941cf7c1875166cbfd3abbad59
|
e7d6cb14f42e01e0d9255717f851bc926c440700
|
/quantviews_functions.R
|
b15d29d18d65cf46a93079ff120d3746bc41c690
|
[] |
no_license
|
quantviews/quantviews_functions
|
b42984cbeadaafa8cb4803cb9b2b7ef174967094
|
c6bf5ef62c69123e529a482aadf6a8d3932c7807
|
refs/heads/master
| 2021-05-04T01:01:37.708575
| 2016-10-17T06:44:03
| 2016-10-17T06:44:03
| 71,108,012
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 127
|
r
|
quantviews_functions.R
|
# загрузить все функции
source('data_functions.R')
source('chart_functions.R')
source('theme_functions.R')
|
2b8948e0c4ada3bb916616e958f784899d627ae2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/popRange/examples/popRangeSim.Rd.R
|
a6f6ce0c5eaf2d43c2e755ba55093fa4e7d4cd99
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 739
|
r
|
popRangeSim.Rd.R
|
library(popRange)
### Name: popRangeSim
### Title: popRangeSim
### Aliases: popRangeSim
### ** Examples
## Example: In this scenario, we are simulating a 3x3 grid of populations for 50
## generations. Each population starts with 100 diploid individuals. Each individual
## has a 0.01 probability of migrating away from their populations. There are 100
## SNPs that all have a starting frequency of 0.5, and the program outputs the standard
## "results" file, as well as a PLINK file.
## Commands
mat = matrix(1,nrow=3,ncol=3)
popRangeSim(world = mat, popSize = 100, diploid = TRUE, nGens = 50,
mig = 0.01, SNP_model = 0, nSNPs = 100, SNPs_starting_freq = 0.5,
outfile= "outFile1", PLINK=TRUE)
|
2b0c43e1d17b52f9bd951fddd9e226d4d2256cb7
|
20eccf57bfffc40c64eb9cd5ab36373fec502079
|
/plot3.R
|
f79b7fe95ee1fda7ae891ccec055b4cccf6158ca
|
[] |
no_license
|
mayakamay/Exploratory-data-analysis
|
97fa9383541470d243f9e161014a5466cffec77e
|
e910a0b1d097980e3b88bad1b32b380aeac341fd
|
refs/heads/main
| 2023-02-02T20:46:18.793483
| 2020-12-22T07:23:10
| 2020-12-22T07:23:10
| 320,191,494
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 460
|
r
|
plot3.R
|
##Plot 3
png('plot3.png')
plot(my_data$Datetime, as.numeric(my_data$Sub_metering_1), type='l',
xlab='',
ylab='Energy sub metering')
lines(my_data$Datetime,as.numeric(my_data$Sub_metering_2),type='l',col='red')
lines(my_data$Datetime,as.numeric(my_data$Sub_metering_3),type='l',col='blue')
legend("topright",
legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),
lty=1,
col=c("black","red","blue"))
dev.off()
|
52bd663259c728b3a21db252bf9bb42e3e1944a6
|
54c86df6a27edb73185859342a331daac0cf9ce8
|
/man/PADManuscript.Rd
|
c42a533d15633821c839b4116290b35c230bb6e3
|
[] |
no_license
|
alondhe/PADManuscript
|
c2ad60b91c63144aa8e3fa92604cc4dc1b2c0bb2
|
5be31718a7ae155104f900c6a7c9f6cdc4e96951
|
refs/heads/master
| 2020-04-04T15:21:20.281592
| 2018-12-07T06:11:58
| 2018-12-07T06:11:58
| 156,034,461
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 231
|
rd
|
PADManuscript.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Package.R
\docType{package}
\name{PADManuscript}
\alias{PADManuscript}
\alias{PADManuscript-package}
\title{PADManuscript}
\description{
PADManuscript
}
|
dad2a1e2274fb033ea93d6714ac7c7a5b2746180
|
2340a0e540d04b2fb442e6606a35537b8d2fc1b1
|
/ABTesting/NextPageReport.R
|
1e7b5d154498d73dccc62cd7208c8897a87b7fe9
|
[
"BSD-2-Clause"
] |
permissive
|
CDC-Digital-Media/metRics
|
11887f348c2de5cded15eec46eedb5b87fac5c24
|
2b1cf076a9dbb896e1fb6e04ff04a0dfff50c376
|
refs/heads/master
| 2021-01-17T15:55:37.719909
| 2017-03-31T19:49:28
| 2017-03-31T19:49:28
| 84,117,818
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,513
|
r
|
NextPageReport.R
|
#########################################
############ Edit this block ############
#########################################
SCUser <- Sys.getenv("SC_ID")
SCToken <- Sys.getenv("SC_KEY")
currentABTestSegment <- "s570_573c5ef1e4b0becdc3b444ce"
#ABTestPage <- "CDC Newsroom | CDC Online Newsroom | CDC"
ABTestPage <- "Pink Eye: Usually Mild and Easy to Treat | Features | CDC"
#ABTestPageURL <- "http://www.cdc.gov/media/index.html"
start <- "2016-04-22"
end <- "2016-06-22"
suite <- "cdcgov"
clickThreshold <- ".5"
#########################################
############ End Edit block ############
#########################################
#######################
###### Libraries ######
#######################
library("RSiteCatalyst")
library("networkD3")
library("webshot")
library("dplyr")
library("ggplot2")
library("stringr")
#######################
###### Set WD ######
#######################
result = tryCatch({
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
}, warning = function(w) {
print("warning")
}, error = function(e) {
print("error")
}, finally = {
print("")
} )
#### Authentication
SCAuth(SCUser, SCToken)
#### First find to 20 features #####
features <- QueueRanked(suite, start, end, c("pageviews"), c("page"), search = " | Features |", top = 40)
names(features)[2] <- "pageURL"
names(features)[1] <- "pageName"
features <- features[,1:3]
### initialize all pathing report data
returnedData <- data.frame()
#nrow(features)
for(i in 1 : 1 ){
#checkpage <- features[i,1]
checkpage <- ABTestPage
rootName <- str_replace_all(str_replace_all(str_replace(checkpage, " \\| Features \\| CDC", ""),"([[:punct:]]+)",""),"([[:space:]]+)","")
fname <- paste(rootName,".html", sep = "")
imgName <- paste(rootName,"NextPage.png", sep = "")
reportPath <- paste("//cdc.gov/ahb_apps/prototyped_cdc_gov/abtest/", fname, sep = "")
viewURL <- paste("http://prototyped.cdc.gov/abtest/",fname, sep = "")
#### Get Pathing data: Single page, then ::anything:: pattern
pathpattern <- c( checkpage, "::anything::")
suite <- "cdcgov"
next_page <- QueuePathing(suite,
start,
end,
metric="pageviews",
element="page",
pathpattern,
top = 50000)
tot <- sum(next_page$count)
conrate <- round(next_page$count[2] / tot * 100, 1)
#write.csv(next_page, "3pages.csv")
next_page <- read.csv("3pages.csv")
page2 <- next_page[,3:4]
page2$X <- as.numeric(row.names(page2))
page2 <- page2[,c(3,1,2)]
names(page2)[1] <- "step.1"
names(page2)[2] <- "step.2"
next_page <- next_page[,1:3]
next_page <- rbind(next_page, page2)
#Optional step: Cleaning my pagename URLs to remove to domain for clarity
next_page$step.1 <- sub("http://www.cdc.gov/","",
next_page$step.1, ignore.case = TRUE)
next_page$step.2 <- sub("http://www.cdc.gov","",
next_page$step.2, ignore.case = TRUE)
next_page <- next_page[,1:3]
totClicks <- sum(next_page$count)
next_page$clickPercent <- round(next_page$count/totClicks * 100, 1)
##### store these values for summary later ######
if(nrow(returnedData) == 0){
returnedData <- next_page
} else {
returnedData <- rbind(returnedData, next_page)
}
next_page <- next_page[as.numeric(next_page$clickPercent) >= as.numeric(clickThreshold), ]
###for display reasons ###
next_page$step.2 <- paste(next_page$step.2," - ", next_page$clickPercent, "%", sep = "")
next_page <- next_page[,1:3]
#Get unique values of page name to create nodes df
#Create an index value, starting at 0
nodes <- as.data.frame(unique(c(next_page$step.1, next_page$step.2)))
names(nodes) <- "name"
nodes$nodevalue <- as.numeric(row.names(nodes)) - 1
#Convert string to numeric nodeid
links <- merge(next_page, nodes, by.x="step.1", by.y="name")
names(links) <- c("step.1", "step.2", "value", "source")
links <- merge(links, nodes, by.x="step.2", by.y="name")
names(links) <- c("step.1", "step.2", "value", "source", "target")
#Create next page Sankey chart
d3output = reportPath
sankeyNetwork(Links = links, Nodes = nodes, Source = "source",
Target = "target", Value = "value", NodeID = "name",
nodeWidth = 30, fontSize = 12, width = 650, height = 900) %>%
saveNetwork(file = reportPath)
webshot(viewURL, imgName, delay = 0.2)
}
returnedData <- returnedData[,1:4]
returnedData$reviewURL <- paste("http://prototyped.cdc.gov/abtest/",str_replace_all(str_replace_all(str_replace(returnedData$step.1, " \\| Features \\| CDC", ""),"([[:punct:]]+)",""),"([[:space:]]+)",""), ".html", sep="")
returnedData2 <- returnedData[as.numeric(returnedData$clickPercent) >= as.numeric(clickThreshold),]
write.csv(returnedData, "fullSet.csv")
write.csv(returnedData2, "smallSet.csv")
exits <- filter(returnedData2, step.2 == "Exited Site")
############### basic clickthrough plot
p = ggplot(data=exits,
aes(x=str_replace(step.1, " \\| Features \\| CDC", ""),
y=clickPercent
)
)
labelPrint <- paste("Site Exits by Feature Page\n")
p=p + geom_bar(stat = "identity", fill="blue") +
xlab("Page") + ylab("Percent Exited") +
ggtitle(labelPrint)
p=p + guides(fill=FALSE) + theme(axis.text.x = element_text(angle = 90, hjust = 1))
p
|
82e426529999205a6f6ed1ce9698da2e22a3ef6c
|
675992faf7562aa1bb2fc2e133012f0ac9ae1312
|
/Linkages_tests/linkages.test.butt.R
|
246377c97eb442e751a7a4441d3e4192cafd572e
|
[] |
no_license
|
bcow/Miscellaneous
|
6780908b91cf94074d0306dc7341532f0ba1e9e9
|
53c580082552f96a5c4de19b3e23297ac9da1d58
|
refs/heads/master
| 2021-01-20T11:46:40.565124
| 2018-11-20T01:22:03
| 2018-11-20T01:22:03
| 31,224,090
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,112
|
r
|
linkages.test.butt.R
|
rm(list = setdiff(ls(), lsf.str())) # clear variables but not sourced functions
for (i in dbListConnections(PostgreSQL())) db.close(i) #close any stray database connections
require(PEcAn.all)
# PEcAn.data.atmosphere::
xml_file <- "/home/ecowdery/GitHub_Miscellaneous/PalEON/PalEON.pecan.xml"
# settings <- read.settings(xml_file)
settings <- xmlToList(xmlParse(xml_file))
site = settings$run$site
input_met = settings$run$inputs$met
start_date = settings$run$start.date
end_date = settings$run$end.date
model = settings$model$type
host = settings$run$host
dbparms = settings$database$bety
dir = settings$run$dbfiles
browndog = settings$browndog
stage$met2model = TRUE
ready.id <- 1000000478
input.id = ready.id
site.id=site$id
con=con
hostname=host$name
write=TRUE
lst=lst
lat=new.site$lat
lon=new.site$lon
l <- list(lst=lst,
lat=new.site$lat,
lon=new.site$lon)
in.path <- "/fs/data1/pecan.data/input/PalEON_CF_site_1-650"
in.prefix <- "PalEON"
outfolder <- "//fs/data1/pecan.data/input/PalEON_LINKAGES_site_1-650/"
overwrite=FALSE
verbose=FALSE
|
574ebb248620b745bbdc751107642d8d8daf7749
|
f204357131766869e52d4c28052096fe0b78e04b
|
/R/shiny_app.R
|
5b1d44c76cbaa842d74d735ae750153070a9617d
|
[] |
no_license
|
innuo/KidsWindFarmDesign
|
95f342f2673d59f2a6f568be6cf0ffe9be5fb46d
|
915753cc623d0badddc5150f052dcc5610fab053
|
refs/heads/master
| 2020-04-24T13:09:19.828075
| 2019-04-19T17:23:24
| 2019-04-19T17:23:24
| 171,977,683
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,221
|
r
|
shiny_app.R
|
#' @import shiny
start_game <- function(n_turbines = 15, n_maps = 5){
seed <<- 0
max_turbines <<- n_turbines
num_maps <<- n_maps
shiny::shinyApp(ui, server)
}
#' @import shiny
ui <- shiny::fluidPage(
shiny::tags$head(
shiny::tags$style(shiny::HTML("
pre, table.table {
font-size: smaller;
}
")),
shiny::tags$style(type = 'text/css', '#score
{font-size: 18px; font-family: Helvetica; background-color: rgba(255,255,255,0.40);
color: blue; border-style: none;}')
),
shiny::fluidRow(
shiny::column(width = 3,
# In a plotOutput, passing values for click, dblclick, hover, or brush
# will enable those interactions.
shiny::plotOutput("plot1", height = "600px", width = "250%",
# Equivalent to: click = clickOpts(id = "plot_click")
click = shiny::clickOpts(id = "plot_click"),
dblclick = shiny::dblclickOpts(
id = "plot_dblclick"
),
hover = shiny::hoverOpts(
id = "plot_hover",
delay=10
)
)
),
shiny::column(width = 4, offset = 4,
DT::dataTableOutput('table')
#DT::DTOutput('table')
)
),
shiny::fluidRow(
shiny::column(width = 3,
shiny::verbatimTextOutput("hover_info")
),
shiny::column(width = 3,
shiny::span(shiny::verbatimTextOutput("score"), style="color:blue")
)
)
)
#' @import shiny
server <- function(input, output) {
game_over <- FALSE
seed <<- seed %% num_maps + 1
init_wind_map <- make_initial_wind_map(seed)
cur_x <<- 1
cur_y <<- 1
turbine_array <<- data.frame(x = numeric(0), y=numeric(0), orig.ws = numeric(0), waked.ws=numeric(0))
cur_wind_map <<- init_wind_map
output$plot1 <- shiny::renderPlot({
plot_wind_map(cur_wind_map, main=sprintf("Arrange %d Turbines for Wind Map %d. ", max_turbines, seed))
})
shiny::observeEvent(input$plot_click, {
cur_x <<- round(input$plot_click$x * domain_dims[1])
cur_y <<- round(input$plot_click$y * domain_dims[2])
cur_x <<- max(min(cur_x, domain_dims[1]), 1)
cur_y <<- max(min(cur_y, domain_dims[2]), 1)
if(!game_over){
ws <- round(init_wind_map[cur_y, cur_x], 1)
tw = turbine_conditional_wind_map(c(cur_y, cur_x), init_wind_map)
cur_wind_map <<- pmin(tw, cur_wind_map)
waked.ws <- round(cur_wind_map[cbind(c(turbine_array$y, cur_y),
c(turbine_array$x, cur_x))], 1)
turbine_array <<- data.frame(x = c(turbine_array$x, cur_x),
y = c(turbine_array$y, cur_y),
orig.ws = c(turbine_array$orig.ws, ws),
waked.ws = waked.ws)
output$table <- DT::renderDataTable(DT::datatable(turbine_array,
options = list(searching = FALSE, paging = FALSE)))
}
print (turbine_array)
output$plot1 <- shiny::renderPlot({
plot_wind_map(cur_wind_map, main=sprintf("Arrange %d Turbines for Wind Map %d. ", max_turbines, seed))
})
if(nrow(turbine_array) == max_turbines){
if(!game_over) beepr::beep(1)
game_over <<- TRUE
}
output$score <- shiny::renderText({
score <- sum(turbine_array$waked.ws)
if(!game_over)
sprintf("\nCurrent Score: %5.1f", score)
else
sprintf("GAME OVER \nFinal Score: %5.1f", score)
})
})
output$hover_info <- shiny::renderPrint({
cur_x <- round(input$plot_hover$x * domain_dims[1])
cur_y <- round(input$plot_hover$y * domain_dims[2])
cur_x <- max(min(cur_x, domain_dims[1]), 1)
cur_y <- max(min(cur_y, domain_dims[2]), 1)
cat("Location and wind speed\n")
cat(paste0("x = ", cur_x, "\ny = ", cur_y,
"\nWind speed = ", round(cur_wind_map[cur_y, cur_x], 1)))
})
output$dblclick_info <- shiny::renderPrint({
cat("input$plot_dblclick:\n")
str(input$plot_dblclick)
})
}
|
4f0b5e7f3e1cfc12f6b3994a42d7d8985a6d41ae
|
ff96ca0135a10973c4b58b0dc8ae68f0428886c2
|
/runShiny.R
|
77da68454159ac4ef90eaee8b2e17f473e2497f2
|
[] |
no_license
|
rkrtiwari/rAdvanced
|
58930e383423f68b2fe2c75ff192fc1ac6bc241d
|
e3474fe759849612ed29b658a670c7e8e4835c21
|
refs/heads/master
| 2020-04-12T03:54:28.173536
| 2016-09-14T15:40:04
| 2016-09-14T15:40:04
| 63,211,309
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 63
|
r
|
runShiny.R
|
##install.packages("shiny")
library(shiny)
runApp("shinyApp/")
|
d64cc58e6a47542facaf62b05c7a9b22915e802a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/baseballDBR/examples/get_bbdb.Rd.R
|
661849c71b3f64efcc91c17c67e26a0ab5ead957
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 349
|
r
|
get_bbdb.Rd.R
|
library(baseballDBR)
### Name: get_bbdb
### Title: Get an up to date copy of the Baseball Databank.
### Aliases: get_bbdb
### Keywords: data database, frame
### ** Examples
get_bbdb(table = "Batting")
## Not run:
##D get_bbdb(table = c("Batting", "Pitching"))
## End(Not run)
## Not run:
##D get_bbdb(downloadZip = TRUE)
## End(Not run)
|
2469c62ee9c81a96aadf19f22720cf8f37bcc989
|
3cd0ff8022a94c234c8a9990719d2dcf8e54dd24
|
/Reg_II assign_7 logit Rcode.R
|
b326e1f6522b79c8599e5d30b13bed5f7ea02278
|
[] |
no_license
|
drewwint/ICPSR-Regression-II-Assignments
|
325d97077575f3c06ac867e374991d867b52612c
|
4f4b0d8f30f0cb88bee82153bd8a7054ea93d708
|
refs/heads/master
| 2022-11-13T00:26:21.603696
| 2020-07-13T17:03:01
| 2020-07-13T17:03:01
| 279,361,250
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,123
|
r
|
Reg_II assign_7 logit Rcode.R
|
###########################################################
### ICPSR 2020 Regressin II OPTIONAL logit assignment 7 ###
### Instructor: Tim McDaniels #############################
### Code by: Drew E Winters, PhD. #########################
###########################################################
library(haven)
library(psych)
library(car)
library(pscl) # for hitmiss command
reg2assign7<- read_sav("C:\\YOUR_FILE_PATH\\Crows.sav")
head(reg2assign7)
str(reg2assign7)
#A: describe varaibles usign spss
psych::describe(reg2assign7[2:6])
#B: run LOGIT models
## weight predicting sex
logit.1 <- glm(Sex ~ Weight,
data=reg2assign7, family=binomial(link="logit"))
summary(logit.1)
hitmiss(logit.1)
## bill_height predicting
logit.2 <- glm(Sex ~ Bill_Height,
data=reg2assign7, family=binomial(link="logit"))
summary(logit.2)
hitmiss(logit.2)
# head length predicting
logit.3 <- glm(Sex ~ Head_Length,
data=reg2assign7, family=binomial(link="logit"))
summary(logit.3)
hitmiss(logit.3)
# weight and bill_lengtth
logit.4 <- glm(Sex ~ Weight + Bill_Height,
data=reg2assign7, family=binomial(link="logit"))
summary(logit.4)
hitmiss(logit.4)
# weight adn head length
logit.5 <- glm(Sex ~ Weight + Head_Length,
data=reg2assign7, family=binomial(link="logit"))
summary(logit.5)
hitmiss(logit.5)
# bill deight and head length
logit.6 <- glm(Sex ~ Bill_Height + Head_Length,
data=reg2assign7, family=binomial(link="logit"))
summary(logit.6)
hitmiss(logit.6)
#weight, bill height, head length
logit.7 <- glm(Sex ~ Weight + Bill_Height + Head_Length,
data=reg2assign7, family=binomial(link="logit"))
summary(logit.7)
hitmiss(logit.7)
#C: *without considering statistical sig* substantively interpret the sign fo eahc slope coefficient in model #7
## weight
### coeffficient is positive
#### after holdign constant (filtering out) the immpact of bill height and length of head, the heavier toe crow is
##### the more likely it is that cros is a male
## Bill_Height
### the coefficient is negative
#### after holding constant (fiiltering out) the impact of weiight and Head_Length, the larger the bill height
##### the less likely it is the crow is a male (i.e. the more likely a female)
## head_length
### the coefficient is positive
#### after holding constant (fiiltering out) the impact of weight and bill_height, the larger the head_length
##### the more likely it is a male crow (i.e. less likley it is a female)
#D: Report the percentage of observatiosn predicted correctly for each model. based soley on thsi (rather flimsy) evidence
## which model seems best? why?
hitmiss(logit.1) #83.3
hitmiss(logit.2) #66.7
hitmiss(logit.3) #84.0
hitmiss(logit.4) #85.2
hitmiss(logit.5) #84.0
hitmiss(logit.6) #84.0
hitmiss(logit.7) #88.0
## based on this information only - model 7 seems to fit the best since ti has the highest percentage correctly predicted.
### NOTE - this uses p<.05 to predict this for correctly predicted.
#E: based soley on the reported sig of slope coeffficients --nothing else-- which model seems best to you?
# models 1, 2, & 3 have sig coefficients - not bad
# model 5 has 2 x's that both are stat sig - *even better*
# model 4 & 6 - have 2 x's but not all stat sig
# model 7 has 3 x's but one is insignificiant
# MODEL 5 looks the best!
#F *ignoring stat sig* for each model predict the probability that a crow that weighs 450g, has a bill height of 19.5mm, and
## and a head length of 90mm is a male. you will estimate a total of 7 probabilities here
#### just place all relevant numbers to multiply the appropriate coefficients
#### weighs 450g
#### bill height of 19.5mm
#### head length of 90mm
#### a male
###p(male)= P(y=1)=e^y_hat/1+e^y_hat
#### "e" = exponent
summary(logit.1) # reference for model 1
# Model 1: Y_hat= -17.544 + 0.036(450)= -1.344
as.numeric(logit.1$coefficients[1]+logit.1$coefficients[2]*450)
# P(male)= e^-1.344/(1+e^-1.344)= 0.261/1.261= 0.207 (aka the predicted probability FOR MALES)
exp(-1.344)/(1+exp(-1.344)) # 0.206853
# Model 2: Y_hat= -9.458 + 0.565(19.5) = 1.5595
summary(logit.2) # reference for model2
as.numeric(logit.2$coefficients[1]+logit.2$coefficients[2]*19.5) # 1.568292
exp(1.568292)/(1+exp(1.568292)) #0.82754 (aka the predicted probability FOR MALES)
# Model 3: Y_hat= -61.4221 + .6650(90) = -1.572
summary(logit.3)
as.numeric(logit.3$coefficients[1]+logit.3$coefficients[2]*90) # -1.569036
exp(-1.569036)/(1+exp(-1.569036)) #0.1723539 (aka the predicted probability FOR MALES)
# Model 4:
summary(logit.4)
as.numeric(logit.4$coefficients[1]+(logit.4$coefficients[2]*450)+(logit.4$coefficients[3]*19.5)) #-1.142298
exp(-1.142298 )/(1+exp(-1.142298 )) # 0.2418987 (aka the predicted probability FOR MALES)
# Model 5:
summary(logit.5)
as.numeric(logit.5$coefficients[1]+(logit.5$coefficients[2]*450)+(logit.5$coefficients[3]*90)) #-2.258161
exp(-2.258161 )/(1+exp(-2.258161 )) # 0.09464783 (aka the predicted probability FOR MALES)
# Model 6:
summary(logit.6)
as.numeric(logit.6$coefficients[1]+(logit.6$coefficients[2]*19.5)+(logit.6$coefficients[3]*90)) #-1.703104
exp(-1.703104 )/(1+exp(-1.703104 )) #0.1540603 (aka the predicted probability FOR MALES)
# Model 7: Y= -67.888 + 0.0302(450) + -0.5559(19.5) + 0.6775(90) = -4.211 (aka the predicted probability)
summary(logit.7)
as.numeric(logit.7$coefficients[1]+(logit.7$coefficients[2]*450)+(logit.7$coefficients[3]*19.5)+(logit.7$coefficients[4]*90)) #-4.161632
exp(-4.161632 )/(1+exp(-4.161632 )) #0.01540603 (aka the predicted probability FOR MALES)
#G: *ignoring stat sig* for each model predict the probabiliyt that a crow that weighs 535 grams, bill height of 16mm, and
## a head length f 95mm is a female. note you will estimate a totoal of 7 probabilities here.
#### just place all relevant numbers to multiply the appropriate coefficients
#### weighs 535g
#### bill height of 16mm
#### head length of 95mm
#### a female
#### FORMULA FOR FEMALE: P(male) = P(Y=1)= e^y_hat/a+e^y_hat SO P(female) = P(Y=0)= 1-P(male)
# Model 1: Y_hat = -17.544 + 0.036(535)= 1.716
summary(logit.1) # reference for model 1
as.numeric(logit.1$coefficients[1]+(logit.1$coefficients[2]*535))# 1.690108
1 - exp(1.690108)/(1+exp(1.690108)) # = 0.152 (predicted probabilty *FOR FEMALES*)
# Model 2:
summary(logit.2) # reference for model2
as.numeric(logit.2$coefficients[1]+logit.2$coefficients[2]*16) #-0.4107711
1 - exp(-0.4107711)/(1+exp(-0.4107711)) # = 0.6012728 (aka the predicted probability *FOR FEMALES*)
# Model 3:
summary(logit.3)
as.numeric(logit.3$coefficients[1]+logit.3$coefficients[2]*95) # 1.756133
1 - exp(1.756133)/(1+exp(1.756133)) #0.1472753 (aka the predicted probability *FOR F#MALES*)
# Model 4:
summary(logit.4)
as.numeric(logit.4$coefficients[1]+(logit.4$coefficients[2]*535)+(logit.4$coefficients[3]*16)) # 1.53102
1- exp(1.53102)/(1+exp(1.53102)) # 0.1778445 (aka the predicted probability *FOR FEMALES*)
# Model 5:
summary(logit.5)
as.numeric(logit.5$coefficients[1]+(logit.5$coefficients[2]*535)+(logit.5$coefficients[3]*95)) # 2.497854
1- exp( 2.497854 )/(1+exp( 2.497854 )) # 0.07600876 (aka the predicted probability *FOR FEMALES*)
# Model 6:
summary(logit.6)
as.numeric(logit.6$coefficients[1]+(logit.6$coefficients[2]*16)+(logit.6$coefficients[3]*95)) # 1.833577
1 - exp(1.833577 )/(1+exp(1.833577 )) #0.1378127 (aka the predicted probability *FOR FEMALES*)
# Model 7:
summary(logit.7)
as.numeric(logit.7$coefficients[1]+(logit.7$coefficients[2]*535)+(logit.7$coefficients[3]*16)+(logit.7$coefficients[4]*95)) #-3.738934
1 - exp( 3.738934 )/(1+exp( 3.738934 )) #0.01540603 (aka the predicted probability *FOR FEMALES*)
|
920910b319504e0333b7e83e93b4bcfdfc5345c4
|
87b999676e7470897a283a52f0fb119cca653cd9
|
/man/iapsr-package.Rd
|
d3aef05f3084b8d6f88a163dc684f6a61da35e3c
|
[
"MIT"
] |
permissive
|
jdtrat/iapsr
|
52fb245824cd21c823e6bae736b6bfec1609d460
|
a1159e28b24a5402820294221d8b7bca3e29da79
|
refs/heads/master
| 2023-08-12T01:35:28.388733
| 2021-09-27T16:20:12
| 2021-09-27T16:20:12
| 280,185,892
| 0
| 2
|
NOASSERTION
| 2020-07-27T21:28:37
| 2020-07-16T15:11:43
|
HTML
|
UTF-8
|
R
| false
| true
| 334
|
rd
|
iapsr-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iapsr-package.R
\docType{package}
\name{iapsr-package}
\alias{iapsr}
\alias{iapsr-package}
\title{iapsr: IAPS Analysis Functions}
\description{
Package useful for analyzing images as part of the International Affective Picture System.
}
\keyword{internal}
|
dc1f72369613f9f2ab78cef9c679e055d4ad82d4
|
27ebfdade6e3ac5df274eb71a97430ba024d9db1
|
/inst/scripts/extractSnpsFromVCFs/extractByDiagnosis.R
|
dea0dc8b7316d104c4c1e5e12465e5caa945db97
|
[
"Apache-2.0"
] |
permissive
|
PriceLab/TrenaProjectGBM
|
f00ba88798063746159592f9ccec8772840ba90b
|
462071a16bdd6f44734c73b0700c55a0d6248143
|
refs/heads/master
| 2020-04-09T09:59:39.320154
| 2018-12-11T01:45:18
| 2018-12-11T01:45:18
| 160,253,735
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,955
|
r
|
extractByDiagnosis.R
|
library(TrenaProjectIGAP)
library(RUnit)
library(GenomicRanges)
library(VariantAnnotation)
#----------------------------------------------------------------------------------------------------
igap <- TrenaProjectIGAP()
goi <- getSupportedGenes(igap)
load(system.file(package="TrenaProjectIGAP", "extdata", "misc", "tbl.enhancerRegions.RData"))
dim(tbl.enhancerRegions)
length(goi)
tbl.covar <- getCovariatesTable(igap)
#ad.samples <- sub("_TCX", "", subset(tbl.covar, Diagnosis=="AD")$ID)
#ctl.samples <- sub("_TCX", "", subset(tbl.covar, Diagnosis=="Control")$ID)
load(system.file(package="TrenaProjectIGAP", "extdata", "misc", "tbl.enhancerRegions.RData"))
dim(tbl.enhancerRegions)
vcf.dir <- "~/s/data/sage/ad-mayo"
vcf.file.template <- "NIA_JG_1898_samples_GRM_WGS_b37_JointAnalysis01_2017-12-08_%s.recalibrated_variants.Mayo.vcf.gz"
#----------------------------------------------------------------------------------------------------
getSnpTable <- function(gene, diagnosis)
{
stopifnot(gene %in% rownames(tbl.enhancerRegions))
chromosome <- tbl.enhancerRegions[gene,"hg19.chrom"]
chromosome.stripped <- sub("chr", "", chromosome)
loc.start <- tbl.enhancerRegions[gene,"hg19.start"]
loc.end <- tbl.enhancerRegions[gene,"hg19.end"]
gr <- GRanges(chromosome.stripped, IRanges(loc.start, loc.end))
sampleIDs <- switch(diagnosis,
AD = sub("_TCX", "", subset(tbl.covar, Diagnosis=="AD")$ID),
Control = sub("_TCX", "", subset(tbl.covar, Diagnosis=="Control")$ID))
filename <- sprintf(vcf.file.template, chromosome.stripped)
full.path <- file.path(vcf.dir, filename)
stopifnot(file.exists(full.path))
params <- ScanVcfParam(which=gr, samples=sampleIDs)
suppressWarnings(
vcf <- expand(readVcf(full.path, "hg19", params)) # samples not in file reported
)
dim(vcf)
tbl <- vcfToTable(vcf)
tbl$gene <- gene
} # getSnpTable
#----------------------------------------------------------------------------------------------------
test_getSnpTable <- function()
{
printf("--- test_getSnpTable")
x <- getSnpTable("INPP5D", "AD")
} # test_getSnpTable
#----------------------------------------------------------------------------------------------------
vcfToTable <- function(vcf)
{
# only want the locations with non-NA AF (allele frequency), AC (allele count)
# browser()
deleters <- which(is.na(unlist(info(vcf)[["AF"]])))
keepers <- which(!is.na(unlist(info(vcf)[["AF"]])))
length(deleters); range(deleters)
length(keepers); range(keepers)
tbl.info <- as.data.frame(info(vcf))[keepers,]
dim(tbl.info)
mtx.geno <- geno(vcf)$GT[keepers,]
dim(mtx.geno)
mtx.012 <- matrix(0, nrow=nrow(mtx.geno), ncol=ncol(mtx.geno), dimnames=list(rownames(mtx.geno), colnames(mtx.geno)))
mtx.012[which(mtx.geno=="0/1")] <- 1
mtx.012[which(mtx.geno=="1/1")] <- 2
mtx.geno <- mtx.012
tbl.geno <- as.data.frame(mtx.geno)
dim(tbl.geno)
head(lapply(tbl.geno, class))
tbl.loc <- as.data.frame(rowRanges(vcf[keepers]))
dim(tbl.loc)
tbl <- cbind(cbind(tbl.loc, tbl.info), tbl.geno)
dim(tbl) # 18405 383 (349 + 24 + 10)
colnames(tbl)[1] <- "chr"
tbl$chr <- paste("chr", tbl$chr, sep="")
invisible(tbl)
} # vcfToTable
#----------------------------------------------------------------------------------------------------
test_vcfToTable <- function()
{
if(!exists("vcf.demo"))
load("inpp5d.AD.vcf.RData")
tbl <- vcfToTable(expand(vcf.demo))
checkEquals(dim(tbl), c(8215, 113))
} # test_vcfToTable
#----------------------------------------------------------------------------------------------------
getAllTables <- function()
{
x.ad <- lapply(goi, function(gene) getSnpTable(gene, "AD"))
x.ctl <- lapply(goi, function(gene) getSnpTable(gene, "Control"))
} # getAllTables
#----------------------------------------------------------------------------------------------------
|
dfe8fdb1c4b896137adbf30eda0f93073244eb2e
|
e3025a6182fa8739447e505bb7e723fb26a10712
|
/man/download_CGLS_data.Rd
|
5050574ce094aacd0e2369389267ba796d2b6e8e
|
[] |
no_license
|
xiaojiujiu999/RCGLS
|
5b1861cf0a542d90ab54adf64a0c859307fc2b11
|
1d28bb35a00976676d3ab96da16a503d0242c9de
|
refs/heads/master
| 2022-12-14T05:52:39.659229
| 2020-09-16T08:10:17
| 2020-09-16T08:10:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,457
|
rd
|
download_CGLS_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RCGLS-functions.R
\name{download_CGLS_data}
\alias{download_CGLS_data}
\title{Download CGLS data}
\usage{
download_CGLS_data(username, password, timeframe, product, resolution, version)
}
\arguments{
\item{username}{Register at https://land.copernicus.eu/global/}
\item{password}{Register at https://land.copernicus.eu/global/}
\item{timeframe}{Time frame of interest, for example June 2019}
\item{product}{Product name: fapar, fcover, lai, ndvi, ss, swi, lst, ...}
\item{resolution}{1km, 300m or 100m}
\item{version}{Version number: v1, v2, v3,...}
}
\value{
CGLS data Data saved locally in chosen folder.
}
\description{
Downloads manifest files of the Copernicus Global Land Service. Registration at https://land.copernicus.eu/global/ is required.
}
\details{
Check https://land.copernicus.eu/global/products/ for a product overview and product details. Check https://land.copernicus.vgt.vito.be/manifest/ for an overview for data availability in the manifest.
}
\examples{
\dontrun{
#library(RCurl)
UN <- "Willemijn"
PW <- "Testthis"
TF <- seq(as.Date("2019-06-01"), as.Date("2019-06-15"), by="days")
PROD <- "fapar" #Product name: fapar, fcover, lai, ndvi, ss, swi, lst, ...
RES <- "1km" #1km, 300m or 100m
V <- "v1" #Version number: v1, v2, v3, ...
download_CGLS_data(username=UN, password=PW, timeframe=TF, product=PROD, resolution=RES, version=V)
}
}
|
92ce650f05980928ff81a94bff73055a30af58a3
|
e60ba3cec348b78509e1475f92e26dcec78ed362
|
/man/get_stations.Rd
|
186ac0792e3cf0edb3690ebe67fd2393d2c856bb
|
[] |
no_license
|
benaldous/weathr
|
4262c744c8f668310005522fa5f10e2f78c7233c
|
631b4bfda35ff608bba5ab39a8fa9fe1d6e2b089
|
refs/heads/master
| 2022-12-17T13:43:15.564233
| 2020-09-14T16:08:48
| 2020-09-14T16:08:48
| 209,882,465
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 283
|
rd
|
get_stations.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Get-Stations.R
\name{get_stations}
\alias{get_stations}
\title{Get List of Stations}
\usage{
get_stations(wmo = TRUE)
}
\arguments{
\item{wmo}{Limit to WMO stations}
}
\description{
Get List of Stations
}
|
da1ee2fcb8d10112686dd21ad2f51e3582268984
|
e0e538679b6e29837839fdbc3d68b4550e256bb9
|
/docs/autumn/example/reg-dummy.r
|
738a3d51f8a90bca6fb1aef2d9d0859354782dba
|
[] |
no_license
|
noboru-murata/sda
|
69e3076da2f6c24faf754071702a5edfe317ced4
|
4f535c3749f6e60f641d6600e99a0e269d1fa4ea
|
refs/heads/master
| 2020-09-24T20:23:36.224958
| 2020-09-22T07:17:54
| 2020-09-22T07:17:54
| 225,833,335
| 0
| 0
| null | 2019-12-05T08:20:51
| 2019-12-04T09:51:18
| null |
UTF-8
|
R
| false
| false
| 1,607
|
r
|
reg-dummy.r
|
### 質的変数の取り扱い
### datasets::ToothGrowth による例
### モルモットにビタミンC/オレンジジュースを与えた場合の歯の成長度を記録したデータ
example("ToothGrowth") # helpに付いている例を使ってデータを図示する
## ビタミンCとオレンジジュースで成長度に違いは出るか?
est1 <- lm(len ~ supp, data=ToothGrowth)
summary(est1)
## suppVC(ビタミンC=1のダミー変数)の係数が負のため
## オレンジジュースの方が効果が高いと予想される
## しかしながら差(係数)は5%水準では有意でない
## 投与量も説明変数として加える
est2 <- lm(len ~ supp + dose, data=ToothGrowth)
summary(est2)
## ビタミンC/オレンジジュースの差が1%水準で有意となる
## これは「投与量が等しい」という条件下で効果を比較した場合
## オレンジジュースの方が効果が高いことを統計的に支持している
### 東京都の気候による例
## データの読み込み
mydata <- read.csv("data/tokyo-weather.csv",fileEncoding="utf8")
## 雨と気温の関係を分析
mydat1 <- transform(mydata, 降水=as.factor(降水量 > 0))
est1 <- lm(気温 ~ 降水, data=mydat1) # モデルの推定
summary(est1) # 雨の日に気温が高いことを支持
## 東京では冬より夏の方が降水が多いことを考慮して月を表す変数をダミー化する
mydat2 <- transform(mydat1, 月=as.factor(月))
est2 <- lm(気温 ~ 降水 + 月, data=mydat2)
summary(est2) # 雨の日の方が気温が低いことを支持
|
b378de6be0c2adc851bf8ad047c0c3d9321b95a1
|
46eab7350f674de87ae4e84e7b3e8b0860463443
|
/tests/testthat/test_cluster_regions.R
|
766c87bd34422ea30cc48499c07b5be5e99b5e2c
|
[] |
no_license
|
yangxhcaf/NINA
|
f2c028958973fc3aabd262fbab429dff3cfa3280
|
f80b41590f83730f7fbb0bbc9592e9f8a8075067
|
refs/heads/main
| 2023-03-06T15:17:01.559313
| 2021-02-15T15:03:41
| 2021-02-15T15:03:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 304
|
r
|
test_cluster_regions.R
|
context("Testing")
test_that("Succes", {
library(NINA)
env_clus = cluster_regions(env_data, n.clus = 5, plot = T)
expect_equal(class(env_clus), "data.frame")
expect_equal(nrow(env_clus), nrow(na.exclude(raster::getValues(env_data))))
expect_equal(length(unique(env_clus$cluster)), 5)
})
|
ef8e1a735dc7f62b700b1ab76f82c69b3a32230e
|
c836b2dd62c2c87a013301e33fe349c89562e764
|
/MLExpResso/man/islands_locations.Rd
|
9f5f429188779052e075ac322144936de5a377cc
|
[] |
no_license
|
xulijunji/MLGenSig
|
95f9acd4f2dabfd57cdd99ecb97ea48d72205261
|
4dfd28aaa85cbeb808c2367b486bbea80b093e29
|
refs/heads/master
| 2021-07-08T14:43:39.877920
| 2017-10-03T21:08:47
| 2017-10-03T21:08:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 388
|
rd
|
islands_locations.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/islands_locations.R
\name{islands_locations}
\alias{islands_locations}
\title{Locations of CpG islands}
\usage{
islands_locations(data_A, data_B, condition)
}
\arguments{
\item{data_A}{d}
\item{data_B}{d}
\item{condition}{vector with conditions}
}
\value{
data frame
}
\description{
ddd
}
\keyword{internal}
|
732c2fa91473304509bcc390db42072fc7a2a0ea
|
afd66b36eb652894c79ab964dd602e4970de6458
|
/figures/paso_1_multiplicacion_normales_image.R
|
334c4a90e3eb3a46b99f4f916a34e2bf17d5c598
|
[] |
no_license
|
glandfried/liaa2018
|
54202dcb2fb4aa1c2a37ead77fc6e6440abf92d8
|
b926e9dd0ca4a49591ed903bdefc0c4dd247f1bf
|
refs/heads/master
| 2020-12-26T21:25:29.740240
| 2020-06-08T22:08:33
| 2020-06-08T22:08:33
| 237,649,069
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,818
|
r
|
paso_1_multiplicacion_normales_image.R
|
###########################################
# Header
oldpar <- par(no.readonly = TRUE)
oldwd <- getwd()
this.dir <- dirname(parent.frame(2)$ofile)
nombre.R <- sys.frame(1)$ofile
require(tools)
nombre <- print(file_path_sans_ext(nombre.R))
pdf(paste0(nombre,".pdf"))
setwd(this.dir)
#setwd("~/gaming/materias/inferencia_bayesiana/trabajoFinal/imagenes")
#####################################
par(mar=c(3.75,3.75,.75,.75))
a<--10;b<-60 # limites inferior y superior de la integral en el eje de las habilidades
mu<-25
denominador_sigma2 <- 3; numerador_sigma2 <- mu
sigma <- numerador_sigma2/denominador_sigma2
denominador_beta2 <- 2; numerador_beta2 <- sigma
beta <- numerador_beta2/denominador_beta2
#p(s_i) = N(s_i;mu_i,sigma_i^2)
p.s <- function(s_i,mu=25,sigma=sqrt(25/3)){
return(dnorm(s_i,mean=mu,sd=sigma ))
}
#int_{s_i} N(s_i;mu_i,sigma_i^2) ds_i
#int_{s_i} p(s_i) ds_i
ds=0.25;s_grilla <- seq(a,b,by=ds)
int_s = sum(p.s(s_grilla))*ds; int_s
#p(p_i) = int_{s_i} N(p_i;s_i,beta^2) N(si;mu_i,sigma_i^2) ds_i
p.p <- function(p,mu=25,sigma=sqrt(25/3),beta=sqrt((25/3)/2),ds=0.1,a=0,b=50){
s <- seq(a,b,by=ds)
res <- sum(dnorm(p,s,beta)*dnorm(s,mu,sigma)*ds)
return(res)
}
#Si integramos p_i nos tiene que dar 1.
#int_{p_i} p(p_i) dp_i = int_{p_i} int_{s_i} N(p_i;s_i,beta^2) N(si;mu_i,sigma_i^2) dp_i ds_i
dp <- 0.1; p_grilla <- seq(a,b,by=dp)
int_p = sum(unlist(Map(p.p,p_grilla)))*dp; int_p
#Si expandimos esto a 2 dimensiones encontramos
normal_product_dep <- function(x1,x2,mu1=25,sigma1=25/3,sigma2=25/6){
return(dnorm(x2,x1,sigma2)*dnorm(x1,mu1,sigma1))
}
p <- 30; ds <- 0.1;s_grilla <- seq(a,b,by=ds);p_grilla <- s_grilla
producto <- outer(s_grilla,p_grilla, normal_product_dep)
colnames(producto) <- s_grilla
rownames(producto) <- p_grilla
levels <- seq(min(producto),max(producto),length.out = 11)
image(s_grilla,p_grilla,producto,ylim=c(0,50),xlim=c(0,50),col=rev(gray.colors(10,start=0.2,end=0.95)),breaks = levels,useRaster=T,
ylab="",xlab="",axes=F)
contour(s_grilla,p_grilla,producto,ylim=c(0,50),xlim=c(0,50),drawlabels=F,levels = levels,add = T,col=rev(gray.colors(11,start=0,end=0.6)),lwd=1.1)
mtext("Rendimiento", side=2, line=2,cex = 2)
mtext("Habilidad", side=1, line=2,cex = 2)
axis(lwd=1,lwd.ticks=1,side=2, labels=NA,cex.axis=0.6,tck=0.02)
axis(lwd=1,side=2,at=25, labels= expression(mu[i]),cex.axis=1.5,line=-0.66)
axis(lwd=0,side=2,at=30, labels= expression(p[i]),cex.axis=1.5,line=-0.66)
axis(lwd=1,lwd.ticks=1,side=1, labels=NA,cex.axis=0.6,tck=0.02)
axis(lwd=1,side=1,at=25, labels= expression(mu[i]),cex.axis=1.5,line=-0.66)
points(mu,mu,pch=19)
lines(mu+(p_grilla-mu)*(4/5) ,s_grilla,cex=0.1)
abline(h=p,lty=3,col=rgb(0,0,0,1))
#######################################
# end
dev.off()
setwd(oldwd)
par(oldpar, new=F)
#########################################
|
f39b033b52d2de0847a00982117ebcfd83b688f8
|
22dc322d68a8bfaecf3c57be5ec99a433f0a95a8
|
/man/IPDNa.Rd
|
18ade01191c5bb4d27395c8852b206e8fb0a153b
|
[] |
no_license
|
cran/micemd
|
19a1acfeb69da9e62d1639265a518ecebeb1f3a5
|
e5adbe076babd9f6c9aa3926eaabdd73d76dd69f
|
refs/heads/master
| 2023-06-09T21:04:43.211056
| 2023-06-01T11:00:04
| 2023-06-01T11:00:04
| 91,136,501
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,868
|
rd
|
IPDNa.Rd
|
\name{IPDNa}
\alias{IPDNa}
\docType{data}
\title{
A simulated Individual Patient Data (IPD) meta-analysis with missing values.
}
\description{
This dataset is a simulated version of an IPD meta-analysis consisting of 28 studies focusing on risk factors in acute heart failure (GREAT, 2013). Each study includes a list of patient characteristics and potential risk factors. Each of them is incomplete, leading to sporadically missing values (Resche-Rigon, et al 2013). In addition, some variables have been collected on some studies only, leading to systematically missing values. More details on the original dataset are provided in Audigier et al. (2018). To mimic the real data, a general location model has been fitted on each study (Schafer, 1997). Then, each study has been generated according to the estimated parameters. Finally, missing values have been allocated similarly to the original dataset.
}
\usage{data("IPDNa")}
\format{
A data frame with 11685 observations on the following 10 variables.
\describe{
\item{\code{centre}}{a numeric indexing the center where the study is conducted}
\item{\code{gender}}{a factor with levels \code{0} \code{1}}
\item{\code{bmi}}{a numeric vector indicating the body mass index}
\item{\code{age}}{a numeric vector indicating the age}
\item{\code{sbp}}{a numeric vector indicating the systolic blood pressure}
\item{\code{dbp}}{a numeric vector indicating the diastolic blood pressure}
\item{\code{hr}}{a numeric vector indicating the heart rate}
\item{\code{lvef}}{a numeric vector indicating the ventricular ejection fraction}
\item{\code{bnp}}{a numeric vector indicating the level of the brain natriuretic peptide biomarker}
\item{\code{afib}}{a factor with levels \code{0} \code{1} indicating the atrial fibrillation}
}
}
\details{
For more details, see Audigier et al. (2018)
}
\source{
GREAT Network (2013). Managing acute heart failure in the ed - case studies
from the acute heart failure academy. http://www.greatnetwork.org
}
\references{
Audigier, V., White, I. , Jolani ,S. Debray, T., Quartagno, M., Carpenter, J., van Buuren, S. and Resche-Rigon, M.
Multiple imputation for multilevel data with continuous and binary variables (2018). Statistical Science. <doi:10.1214/18-STS646>.
Resche-Rigon, M., White, I. R., Bartlett, J., Peters, S., Thompson, S., and on behalf of the
PROG-IMT Study Group (2013). Multiple imputation for handling systematically missing
confounders in meta-analysis of individual participant data. Statistics in Medicine,
32(28):4890{4905}. <doi:10.1002/sim.5894>
Schafer, J. L. (1997) Analysis of Incomplete Multivariate Data. Chapman & Hall, Chapter 9.
}
\examples{
data(IPDNa)
#summary
summary(IPDNa)
#summary per study
by(IPDNa,IPDNa$centre,summary)
}
\keyword{datasets}
|
04d518649d0656b9889f974195ee31942ef85246
|
b0831da803f0de5197a9a8584feae1009e03ec8b
|
/knn.R
|
571afc3860fe8d438bea93e60c467400922499d0
|
[] |
no_license
|
bdewilde/kaggle_digit_recognizer
|
86f1a74c7a93f517808e39d98b6bf6a7b2bd15e1
|
b99b76688a5cf74aa3cc18bbcb4f10e3556e3c70
|
refs/heads/master
| 2016-09-05T23:31:26.821727
| 2012-10-27T17:29:41
| 2012-10-27T17:29:41
| 6,131,556
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,479
|
r
|
knn.R
|
# Kaggle: Digit Recognizer
# https://www.kaggle.com/c/digit-recognizer/data
# produce submission file with optimal knn model
# load training and test datasets
train <- read.csv("train.csv", header=TRUE)
test <- read.csv("test.csv", header=TRUE)
##############################
# Weighted k-Nearest Neighbors
library(kknn)
# remove pixels with near zero variance -- not good predictors
library(caret)
badCols <- nearZeroVar(train[,-1])
print(paste("Fraction of nearZeroVar columns:", round(length(badCols)/length(train),4)))
train <- train[, -(badCols+1)]
test <- test[, -badCols]
# train the kknn model
model <- kknn(as.factor(label) ~ ., train, test, k=9, kernel="triangular")
results <- model$fitted.values
# save the output as column vector
write(as.numeric(levels(results))[results], file="kknn_submission.csv", ncolumns=1)
##########################################################
# Fast Nearest Neighbor Search Algorithms and Applications
library(FNN)
# drop label columns for use in KNN
trainCl <- train[, 1]
train <- train[, -1]
# remove pixels with near zero variance -- not good predictors
library(caret)
badCols <- nearZeroVar(train)
print(paste("Fraction of nearZeroVar columns:", round(length(badCols)/length(train),4)))
train <- train[, -badCols]
test <- test[, -badCols]
# train the knn model
results <- (0:9)[knn(train, test, trainCl, k=5, algorithm="cover_tree")]
# save the output as column vector
write(results, file="knn_submission.csv", ncolumns=1)
|
23d5f831d5eacfc999ebf4e9856de81c8c149583
|
c9134ea0d1873a0a7a6157b30da2699ec4df9478
|
/R/adfgConvert_StatAreas-LatLons.R
|
14a4ecd40b51bc2714af1f8f757ed513dc2d6fd6
|
[
"MIT"
] |
permissive
|
wStockhausen/tcsamFisheryDataADFG
|
2031a5302e4590dbf0eee10f065ea0610bf56010
|
d1c9c72d0a54d32ed62cee1763376fa65f412ae2
|
refs/heads/master
| 2023-04-13T09:34:49.059459
| 2023-04-01T00:28:47
| 2023-04-01T00:28:47
| 180,407,629
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,095
|
r
|
adfgConvert_StatAreas-LatLons.R
|
#'
#' @title Convert ADFG stat areas to lat-lon coordinates
#'
#' @description Function to convert ADFG stat areas to lat-lon coordinates.
#'
#' @param stat_areas - character vector of ADFG stat areas
#'
#' @return a tibble with columns "lat", "lon" representing the center of each stat area.
#'
#' @details ADFG stat areas are coded AABBCC, where\cr
#' lon = 100+AA W longitude \cr
#' and \cr
#' lat = BB+CC/60 N latitude \cr
#' in decimal degrees.
#'
#' AABBCC represents the lower right corner
#' of the stat area.
#'
#' @importFrom tibble tibble
#'
#' @export
#'
adfgConvert_StatAreasToLatLons<-function(stat_areas){
#get locations of lower left
lond=-(100+as.numeric(substr(stat_areas,1,2)));#longitude (deg E)
latd=as.numeric(substr(stat_areas,3,4)); #latitude, whole deg N
latm=as.numeric(substr(stat_areas,5,6)); #latitude, minutes N
tbl = tibble(lat=latd+(latm+15)/60,lon=lond-0.5);#shift from corner to center
return(tbl);
}
#'
#' @title Convert lat-lon coordinates to ADFG stat areas
#'
#' @description Function to convert lat-lon coordinates to ADFG stat areas.
#'
#' @param dfr - dataframe with lat, lon coordinates
#' @param latCol - name of column with latitudes
#' @param lonCol - name of column with longitudes
#'
#' @return a character vector with the ADFG stat area corresponding to each set
#' of lat-lon coordinates.
#'
#' @details ADFG stat areas are coded AABBCC, where\cr
#' lon = 100+AA W longitude \cr
#' and \cr
#' lat = BB+CC/60 N latitude \cr
#' in decimal degrees.
#'
#' AABBCC represents the lower right corner
#' of the stat area.
#'
#' @importFrom wtsUtilities formatZeros
#'
#' @export
#'
adfgConvert_LatLonsToStatAreas<-function(dfr,latCol="lat",lonCol="lon"){
lats = dfr[[latCol]];
BB = as.character(floor(lats));
CC = wtsUtilities::formatZeros(30*floor(2*(lats %% 1)));
lons = abs(dfr[[lonCol]]);
lons = floor(ifelse(lons>180,360-lons,lons)-100);
idx<-lons==-100; #--these were NA's at some point
lons[idx]<-0;
AA = wtsUtilities::formatZeros(lons);
stat_areas<-paste0(AA,BB,CC);
return(stat_areas);
}
|
eb6825a42b03b412c5194cfca41fd2241b86380e
|
23fde5f01c1f5c21a985da4b72b48319f848bab1
|
/plot1.r
|
657ed241678963467e569c3903bfd2acffeb4d4b
|
[] |
no_license
|
andenzen/Exploratory-Data-Analysis-Week1-CourseProject
|
45c3dffe87ac18f8af5bbb6d991d3154973c2d56
|
42ec6446337fa93ec7ac3a800c9a6b040b3ebdce
|
refs/heads/master
| 2020-05-25T14:54:21.399711
| 2019-05-21T14:44:02
| 2019-05-21T14:44:02
| 187,856,578
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 922
|
r
|
plot1.r
|
# Exploratory Data Analysis Week 1 Course Project
# Author: Antti Merisalo
# Date 2019-5-20
## PLEASE NOTE THAT IN ORDER FOR THIS CODE TO WORK YOU HAVE TO HAVE THE REQUIRED DATASET "household_power_consumption.txt" already in your working directory!
# Load in the required libraries for data manipulation
library(lubridate)
library(dplyr)
# Read in the data and assign variable names.
# Obs! This only includes the required rows for the dates 2007-02-01 and 2007-02-02.
data <- read.table("household_power_consumption.txt", skip = 66637, nrows = 2880, sep = ";", na.strings = "?")
colnames(data) <- unlist(read.table("household_power_consumption.txt", nrows = 1, sep = ";"))
# Create Plot1, 'a histogram' and save it to .png file
png("plot1.png", width = 480, height = 480)
hist(data$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
dev.off()
|
f0a73476448618e5628cd8528375935f350627eb
|
833bd5182389d0162730091d9da528c39b178ca7
|
/R/gam.R
|
46e1b635f1376edb5175988ccf63a3cfe618a0d3
|
[] |
no_license
|
alramadona/denfor
|
e26241b888ca0c29383d1e4f4cf7f39a8f08aebb
|
a69cb099d217b5bfee84a477746f8506333afd58
|
refs/heads/master
| 2021-04-03T04:46:37.724458
| 2016-07-19T07:12:12
| 2016-07-19T07:12:12
| 63,670,869
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,046
|
r
|
gam.R
|
denforM <- function(templ3,rainl2,rainl3,month)
{
require(mgcv)
gam_train <- data.frame(dengue=(c(48,47,47,63,142,66,67,50,35,24,15,3,61,43,32,59,52,32,21,20,24,16,15,4,56,43,37,86,51,51,50,20,37,33,17,22,151,160,261,36,13,4,12,10,20,11,9,18,15,31,39,30,23,14,7,17,13,19,34,101,172,230,247,63,31,37,31,31,15,14,11,12,80,99,129,99,136,74,60,35,15,15,17,8,96,95,81,139,132,61,52,18,10,16,19,49,85,67,54,96,54,64,91,56,18,23,24,56,70,95,162,152,162,208,160,136,71,90,92,119)),templ3=(c(NA,NA,NA,25.4,26.0,26.1,26.9,27.2,26.3,25.6,25.6,27.4,27.1,27.0,26.3,26.5,26.2,27.1,27.1,27.2,26.2,25.9,25.1,26.2,27.5,27.8,27.2,26.6,26.4,27.0,27.8,26.9,26.3,24.8,25.3,26.7,27.1,NA,26.5,27.0,26.5,26.6,28.0,27.2,25.9,26.2,25.4,26.8,27.5,27.6,26.7,26.7,26.7,27.3,27.3,27.7,27.5,26.2,25.8,27.0,27.3,27.4,25.9,26.4,26.8,26.6,26.5,26.7,25.6,24.9,25.1,25.7,27.5,29.0,27.5,27.0,27.3,26.6,27.0,27.5,26.4,25.7,25.5,26.0,27.5,26.8,26.4,26.3,25.8,25.7,26.8,26.3,25.6,24.4,25.7,27.0,27.5,26.3,26.0,26.2,25.7,26.7,27.3,26.8,26.6,25.2,25.4,26.8,27.8,27.9,27.5,26.8,27.2,27.6,27.5,27.6,27.3,26.9,27.2,27.2)),rainl2=(c(NA,NA,493,304,466,335,46,83,30,0,2,212,201,161,530,456,166,120,97,0,1,0,0,2,262,236,174,465,233,44,123,14,0,0,0,29,NA,301,334,159,268,19,23,6,13,7,1,13,329,424,346,484,152,254,1,106,123,12,5,128,87,481,391,309,337,232,195,0,0,0,0,1,42,329,100,335,284,326,40,39,3,0,0,53,185,709,174,346,242,264,106,9,0,0,2,191,306,232,317,312,249,191,122,42,0,0,0,70,121,185,254,312,414,131,319,113,35,155,401,157)),rainl3=(c(NA,NA,NA,493,304,466,335,46,83,30,0,2,212,201,161,530,456,166,120,97,0,1,0,0,2,262,236,174,465,233,44,123,14,0,0,0,29,NA,301,334,159,268,19,23,6,13,7,1,13,329,424,346,484,152,254,1,106,123,12,5,128,87,481,391,309,337,232,195,0,0,0,0,1,42,329,100,335,284,326,40,39,3,0,0,53,185,709,174,346,242,264,106,9,0,0,2,191,306,232,317,312,249,191,122,42,0,0,0,70,121,185,254,312,414,131,319,113,35,155,401)),month=(c(1,2,3,4,5,6,7,8,9,10,11,12,1,2,3,4,5,6,7,8,9,10,11,12,1,2,3,4,5,6,7,8,9,10,11,12,1,2,3,4,5,6,7,8,9,10,11,12,1,2,3,4,5,6,7,8,9,10,11,12,1,2,3,4,5,6,7,8,9,10,11,12,1,2,3,4,5,6,7,8,9,10,11,12,1,2,3,4,5,6,7,8,9,10,11,12,1,2,3,4,5,6,7,8,9,10,11,12,1,2,3,4,5,6,7,8,9,10,11,12)))
gam_train$dengue <- as.integer(gam_train$dengue)
gam_train$templ3 <- as.numeric(gam_train$templ3)
gam_train$rainl2 <- as.integer(gam_train$rainl2)
gam_train$rainl3 <- as.integer(gam_train$rainl3)
gam_train$month <- as.factor(gam_train$month)
gam.mod <- gam(dengue ~ s(templ3,k=4) + s(rainl2,k=4) + s(rainl3,k=4) + month, family=quasipoisson, na.action=na.exclude, data=gam_train)
gam_test <- data.frame(templ3,rainl2,rainl3,month)
gam.prediction.m <- predict(gam.mod, type="response", newdata=as.data.frame(gam_test))
}
denforW <- function(rainl5,rainl7,rainl8,rainl9)
{
require(mgcv)
gam_trainW <- data.frame(OutMean=(c(0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,1,0,0,0,1,0,0,0,1,1,1,1,1,0,1,0,1,1,1,0,1,1,1,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0)),
rainl5=(c(NA,NA,NA,NA,NA,70.2,12.2,1.1,44.8,48.5,127.3,79.0,64.2,197.8,101.3,42.2,56.4,28.2,17.1,23.4,0.0,2.2,82.5,41.4,0.0,0.0,0.0,0.9,0.0,12.5,0.6,0.4,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.4,1.0,0.0,5.0,NA,NA,NA,NA,NA,109.4,36.7,62.8,84.0,22.9,21.0,50.8,42.9,204.3,43.5,11.7,87.6,15.6,95.1,40.5,106.2,25.2,0.6,14.8,0.0,4.6,2.6,0.0,6.6,8.5,5.4,0.0,2.5,0.0,3.6,10.0,0.0,2.7,0.0,0.0,0.0,0.0,0.0,7.0,0.0,0.8,0.0,0.6,0.0,0.0,0.0,11.6,1.0,4.6,14.9,3.8,219.1,240.2,7.0,27.4,142.1,93.6,29.0,31.8,227.2,45.6,15.7,145.2,167.5,152.3,48.3,31.3,2.1,15.8,144.2,71.9,89.9,14.0,4.0,1.0,0.0,0.0,0.0,0.0,0.0,38.3,62.3,5.3,120.4,2.6,0.0,0.0,0.0,0.0,12.2,0.0,0.0,0.9,3.4,0.8,0.0,0.0,9.9,114.6,3.9,53.5,0.0,4.8,24.4,56.8,97.4,154.8,82.1,104.3,18.9,50.7,73.3,212.7,91.5,73.3,67.2,75.1,70.3,6.2,194.9,61.1,71.7,49.2,102.6,38.2,0.4,68.5,17.9,23.9,58.3,26.1,0.0,0.0,0.0,0.0,0.0,0.3,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.4,0.0,0.0,0.2,16.1,0.0,12.6,12.6,23.9,93.0,70.8,141.0,3.5,10.1,31.9,52.0,85.9,45.0,108.0,93.2,70.5,26.1,71.5,88.5,62.6,62.4,122.0,89.1,22.1,0.2,1.0,35.4,0.0,3.5,8.9,0.0,11.7,18.6,0.4,2.4,0.5,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,7.7,100.8,87.5,24.7,0.0,20.5,62.4,293.4,36.4,277.4,65.2,24.2,29.7,54.3,62.9,128.8,60.0,71.1,72.3,57.2,66.8,86.8,18.4,113.7,127.1,22.6,0.2,15.1,32.6,0.0,58.0,0.0,0.0,9.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.2,0.0,0.0,0.0,27.8,27.7,77.1,75.4,115.0,59.2,61.8,52.2,37.0,78.7,37.6,55.4,93.9,30.9,83.5,29.9,119.8,110.8,99.9,25.9,76.4,29.5,4.5,112.6,46.5,105.8,3.0,8.7,59.9,50.6,19.1,1.8,36.4,65.0,0.4,41.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.3,69.0,0.0,0.0,24.1,79.9,16.9,6.5,55.0,0.0,103.1,20.8,52.1,83.0,77.2,41.5,67.0,92.6,145.0,7.0,9.2,145.5,158.1,91.1,9.7,39.4,3.8,78.4,24.2,29.4,126.7,54.3,94.3,7.4,73.2,22.5,0.3,29.5,7.1,3.2,0.0,4.4,0.0,58.8,10.8,71.6,20.7,169.7,61.4,162.7,3.0,14.1,45.6,59.5,35.0,52.4,103.0,0.0)),
rainl7=(c(NA,NA,NA,NA,NA,NA,NA,70.2,12.2,1.1,44.8,48.5,127.3,79.0,64.2,197.8,101.3,42.2,56.4,28.2,17.1,23.4,0.0,2.2,82.5,41.4,0.0,0.0,0.0,0.9,0.0,12.5,0.6,0.4,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.4,1.0,0.0,5.0,NA,NA,NA,NA,NA,109.4,36.7,62.8,84.0,22.9,21.0,50.8,42.9,204.3,43.5,11.7,87.6,15.6,95.1,40.5,106.2,25.2,0.6,14.8,0.0,4.6,2.6,0.0,6.6,8.5,5.4,0.0,2.5,0.0,3.6,10.0,0.0,2.7,0.0,0.0,0.0,0.0,0.0,7.0,0.0,0.8,0.0,0.6,0.0,0.0,0.0,11.6,1.0,4.6,14.9,3.8,219.1,240.2,7.0,27.4,142.1,93.6,29.0,31.8,227.2,45.6,15.7,145.2,167.5,152.3,48.3,31.3,2.1,15.8,144.2,71.9,89.9,14.0,4.0,1.0,0.0,0.0,0.0,0.0,0.0,38.3,62.3,5.3,120.4,2.6,0.0,0.0,0.0,0.0,12.2,0.0,0.0,0.9,3.4,0.8,0.0,0.0,9.9,114.6,3.9,53.5,0.0,4.8,24.4,56.8,97.4,154.8,82.1,104.3,18.9,50.7,73.3,212.7,91.5,73.3,67.2,75.1,70.3,6.2,194.9,61.1,71.7,49.2,102.6,38.2,0.4,68.5,17.9,23.9,58.3,26.1,0.0,0.0,0.0,0.0,0.0,0.3,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.4,0.0,0.0,0.2,16.1,0.0,12.6,12.6,23.9,93.0,70.8,141.0,3.5,10.1,31.9,52.0,85.9,45.0,108.0,93.2,70.5,26.1,71.5,88.5,62.6,62.4,122.0,89.1,22.1,0.2,1.0,35.4,0.0,3.5,8.9,0.0,11.7,18.6,0.4,2.4,0.5,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,7.7,100.8,87.5,24.7,0.0,20.5,62.4,293.4,36.4,277.4,65.2,24.2,29.7,54.3,62.9,128.8,60.0,71.1,72.3,57.2,66.8,86.8,18.4,113.7,127.1,22.6,0.2,15.1,32.6,0.0,58.0,0.0,0.0,9.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.2,0.0,0.0,0.0,27.8,27.7,77.1,75.4,115.0,59.2,61.8,52.2,37.0,78.7,37.6,55.4,93.9,30.9,83.5,29.9,119.8,110.8,99.9,25.9,76.4,29.5,4.5,112.6,46.5,105.8,3.0,8.7,59.9,50.6,19.1,1.8,36.4,65.0,0.4,41.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.3,69.0,0.0,0.0,24.1,79.9,16.9,6.5,55.0,0.0,103.1,20.8,52.1,83.0,77.2,41.5,67.0,92.6,145.0,7.0,9.2,145.5,158.1,91.1,9.7,39.4,3.8,78.4,24.2,29.4,126.7,54.3,94.3,7.4,73.2,22.5,0.3,29.5,7.1,3.2,0.0,4.4,0.0,58.8,10.8,71.6,20.7,169.7,61.4,162.7,3.0,14.1,45.6,59.5,35.0,52.4)),
rainl8=(c(NA,NA,NA,NA,NA,NA,NA,NA,70.2,12.2,1.1,44.8,48.5,127.3,79.0,64.2,197.8,101.3,42.2,56.4,28.2,17.1,23.4,0.0,2.2,82.5,41.4,0.0,0.0,0.0,0.9,0.0,12.5,0.6,0.4,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.4,1.0,0.0,5.0,NA,NA,NA,NA,NA,109.4,36.7,62.8,84.0,22.9,21.0,50.8,42.9,204.3,43.5,11.7,87.6,15.6,95.1,40.5,106.2,25.2,0.6,14.8,0.0,4.6,2.6,0.0,6.6,8.5,5.4,0.0,2.5,0.0,3.6,10.0,0.0,2.7,0.0,0.0,0.0,0.0,0.0,7.0,0.0,0.8,0.0,0.6,0.0,0.0,0.0,11.6,1.0,4.6,14.9,3.8,219.1,240.2,7.0,27.4,142.1,93.6,29.0,31.8,227.2,45.6,15.7,145.2,167.5,152.3,48.3,31.3,2.1,15.8,144.2,71.9,89.9,14.0,4.0,1.0,0.0,0.0,0.0,0.0,0.0,38.3,62.3,5.3,120.4,2.6,0.0,0.0,0.0,0.0,12.2,0.0,0.0,0.9,3.4,0.8,0.0,0.0,9.9,114.6,3.9,53.5,0.0,4.8,24.4,56.8,97.4,154.8,82.1,104.3,18.9,50.7,73.3,212.7,91.5,73.3,67.2,75.1,70.3,6.2,194.9,61.1,71.7,49.2,102.6,38.2,0.4,68.5,17.9,23.9,58.3,26.1,0.0,0.0,0.0,0.0,0.0,0.3,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.4,0.0,0.0,0.2,16.1,0.0,12.6,12.6,23.9,93.0,70.8,141.0,3.5,10.1,31.9,52.0,85.9,45.0,108.0,93.2,70.5,26.1,71.5,88.5,62.6,62.4,122.0,89.1,22.1,0.2,1.0,35.4,0.0,3.5,8.9,0.0,11.7,18.6,0.4,2.4,0.5,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,7.7,100.8,87.5,24.7,0.0,20.5,62.4,293.4,36.4,277.4,65.2,24.2,29.7,54.3,62.9,128.8,60.0,71.1,72.3,57.2,66.8,86.8,18.4,113.7,127.1,22.6,0.2,15.1,32.6,0.0,58.0,0.0,0.0,9.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.2,0.0,0.0,0.0,27.8,27.7,77.1,75.4,115.0,59.2,61.8,52.2,37.0,78.7,37.6,55.4,93.9,30.9,83.5,29.9,119.8,110.8,99.9,25.9,76.4,29.5,4.5,112.6,46.5,105.8,3.0,8.7,59.9,50.6,19.1,1.8,36.4,65.0,0.4,41.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.3,69.0,0.0,0.0,24.1,79.9,16.9,6.5,55.0,0.0,103.1,20.8,52.1,83.0,77.2,41.5,67.0,92.6,145.0,7.0,9.2,145.5,158.1,91.1,9.7,39.4,3.8,78.4,24.2,29.4,126.7,54.3,94.3,7.4,73.2,22.5,0.3,29.5,7.1,3.2,0.0,4.4,0.0,58.8,10.8,71.6,20.7,169.7,61.4,162.7,3.0,14.1,45.6,59.5,35.0)),
rainl9=(c(NA,NA,NA,NA,NA,NA,NA,NA,NA,70.2,12.2,1.1,44.8,48.5,127.3,79.0,64.2,197.8,101.3,42.2,56.4,28.2,17.1,23.4,0.0,2.2,82.5,41.4,0.0,0.0,0.0,0.9,0.0,12.5,0.6,0.4,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.4,1.0,0.0,5.0,NA,NA,NA,NA,NA,109.4,36.7,62.8,84.0,22.9,21.0,50.8,42.9,204.3,43.5,11.7,87.6,15.6,95.1,40.5,106.2,25.2,0.6,14.8,0.0,4.6,2.6,0.0,6.6,8.5,5.4,0.0,2.5,0.0,3.6,10.0,0.0,2.7,0.0,0.0,0.0,0.0,0.0,7.0,0.0,0.8,0.0,0.6,0.0,0.0,0.0,11.6,1.0,4.6,14.9,3.8,219.1,240.2,7.0,27.4,142.1,93.6,29.0,31.8,227.2,45.6,15.7,145.2,167.5,152.3,48.3,31.3,2.1,15.8,144.2,71.9,89.9,14.0,4.0,1.0,0.0,0.0,0.0,0.0,0.0,38.3,62.3,5.3,120.4,2.6,0.0,0.0,0.0,0.0,12.2,0.0,0.0,0.9,3.4,0.8,0.0,0.0,9.9,114.6,3.9,53.5,0.0,4.8,24.4,56.8,97.4,154.8,82.1,104.3,18.9,50.7,73.3,212.7,91.5,73.3,67.2,75.1,70.3,6.2,194.9,61.1,71.7,49.2,102.6,38.2,0.4,68.5,17.9,23.9,58.3,26.1,0.0,0.0,0.0,0.0,0.0,0.3,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.4,0.0,0.0,0.2,16.1,0.0,12.6,12.6,23.9,93.0,70.8,141.0,3.5,10.1,31.9,52.0,85.9,45.0,108.0,93.2,70.5,26.1,71.5,88.5,62.6,62.4,122.0,89.1,22.1,0.2,1.0,35.4,0.0,3.5,8.9,0.0,11.7,18.6,0.4,2.4,0.5,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,7.7,100.8,87.5,24.7,0.0,20.5,62.4,293.4,36.4,277.4,65.2,24.2,29.7,54.3,62.9,128.8,60.0,71.1,72.3,57.2,66.8,86.8,18.4,113.7,127.1,22.6,0.2,15.1,32.6,0.0,58.0,0.0,0.0,9.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.2,0.0,0.0,0.0,27.8,27.7,77.1,75.4,115.0,59.2,61.8,52.2,37.0,78.7,37.6,55.4,93.9,30.9,83.5,29.9,119.8,110.8,99.9,25.9,76.4,29.5,4.5,112.6,46.5,105.8,3.0,8.7,59.9,50.6,19.1,1.8,36.4,65.0,0.4,41.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.3,69.0,0.0,0.0,24.1,79.9,16.9,6.5,55.0,0.0,103.1,20.8,52.1,83.0,77.2,41.5,67.0,92.6,145.0,7.0,9.2,145.5,158.1,91.1,9.7,39.4,3.8,78.4,24.2,29.4,126.7,54.3,94.3,7.4,73.2,22.5,0.3,29.5,7.1,3.2,0.0,4.4,0.0,58.8,10.8,71.6,20.7,169.7,61.4,162.7,3.0,14.1,45.6,59.5)))
gam_trainW$OutMean <- as.integer(gam_trainW$OutMean)
gam_trainW$rainl5 <- as.numeric(gam_trainW$rainl5)
gam_trainW$rainl7 <- as.numeric(gam_trainW$rainl7)
gam_trainW$rainl8 <- as.numeric(gam_trainW$rainl8)
gam_trainW$rainl9 <- as.numeric(gam_trainW$rainl9)
gamW <- gam(OutMean ~ s(rainl5,k=4) + s(rainl7,k=4) + s(rainl8,k=4) + s(rainl9,k=4),
family=binomial, na.action=na.exclude, data=gam_trainW)
gamW_test <- data.frame(rainl5,rainl7,rainl8,rainl9)
gam.prediction.w <- round(predict(gamW, type="response", newdata=as.data.frame(gamW_test)),0)
}
|
d3ffeb32444d69865de8c904d6c7918dcb4158c3
|
2b554623ba47451026229abdf4269d2c49803c43
|
/regression.R
|
5fe2b6ad82441cb54fff38ec36075f849b83d3c3
|
[] |
no_license
|
sathishdhevarapalli/Airbnb-Occupancy-Rate-Academic-Project
|
e920cad41dbeee386da2220278d299de8090b235
|
888e2ce59a084a15de9524a6aab5d03d147564ea
|
refs/heads/master
| 2022-07-20T06:34:16.181051
| 2020-05-22T19:53:11
| 2020-05-22T19:53:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 722
|
r
|
regression.R
|
data_set = read.csv ("C:\\Users\\Sathish Reddy\\Desktop\\Final QMB project\\Data_with_out-dummies.csv")
str(data_set)
hist(data_set$Occupancy_Rate)
hist(log(data_set$Occupancy_Rate))
names(data_set)
dim(data_set)
lm2 = lm (log(data_set$Occupancy_Rate)~Accommodates+Guests_included+Bathrooms+
Bedrooms+Beds+Price+Extra_people+Minimum_nights+
Availability_365+Tv+Wifi+Air_conditioning
+Free_parking_on_premises+Heating+Room_type+
Host_is_superhost+Price*Minimum_nights*Room_type,data = data_set)
hist(model_1$res)
plot(model_1$residuals)
install.packages("stargazer")
library("stargazer")
stargazer(model_1,model_2,model_3,model_4,type = "text")
|
a022db613c80da086263c7ca06f89ff5c0077ef4
|
9a79c6d33fc2776d08c72e71c8ad91aa73df2e10
|
/man/as.dataset-methods.Rd
|
c06761182f35bf5be1dda351fd0a063d002d1d26
|
[] |
no_license
|
giupo/rdataset
|
8e6d1d645e1806bed616f2c9d34fdeac3af65129
|
7786febfadb60bf37343976a0d0d2a0286cca259
|
refs/heads/master
| 2021-06-07T19:17:06.802211
| 2021-05-14T15:29:23
| 2021-05-14T15:29:23
| 67,530,543
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 644
|
rd
|
as.dataset-methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as_dataset.r
\docType{methods}
\name{as.dataset}
\alias{as.dataset}
\alias{as.dataset,list-method}
\alias{as.dataset,Dataset-method}
\alias{as.dataset,data.frame-method}
\title{Casts to a Dataset}
\usage{
as.dataset(x, ...)
\S4method{as.dataset}{list}(x)
\S4method{as.dataset}{Dataset}(x)
\S4method{as.dataset}{data.frame}(x)
}
\arguments{
\item{x}{a generico object with "[[" method defined and names}
\item{...}{forza la creazione di un nuovo dataset, anche se x e' un `Dataset`}
}
\value{
a Dataset with data defined in x
}
\description{
Casts to a Dataset
}
|
4e29d73fb6bbd9ead23f7c311a0c24dc6a4ceb82
|
ce72162d2546955b5f65edb587436928481692e5
|
/forecast.r
|
0f07715cd9738f07adfe414da269cb18b1db5fd3
|
[] |
no_license
|
zackery65/ml-learn-r
|
0f493b0e56586a638341c9e35692b8bb42c32262
|
a6cf7045e3af0118945c320b6163dff83e437621
|
refs/heads/master
| 2021-01-23T02:54:56.202411
| 2017-05-08T06:16:04
| 2017-05-08T06:16:04
| 86,035,562
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,283
|
r
|
forecast.r
|
library(xts)
library(forecast)
flag=data.frame(flag=c(1,2,3,4,5,6,7))
#预测函数
yuce<-function(ts,d,od,ss)
{
ts=ts[2]
ts2<-xts(ts,seq(as.POSIXct("2016-09-01"),len=nrow(ts),by="day"))
data.fit <- arima(ts2,order= od,seasonal= ss)
forecast <- forecast.Arima(data.fit,h=d,level=c(65))
dt=as.data.frame(forecast)
}
:
total=read.csv('F:/data/total.csv',header=T,sep=',',encoding='UTF-8',na.string = "NULL")
total_ct=read.csv('F:/data/total_ct.csv',header=T,sep=',',encoding='UTF-8',na.string = "NULL")
driver=read.csv('F:/data/driver.csv',header=T,sep=',',encoding='UTF-8',na.string = "NULL")
driver_avg=read.csv('F:/data/driver_avg.csv',header=T,sep=',',encoding='UTF-8',na.string = "NULL")
user=read.csv('F:/data/user.csv',header=T,sep=',',encoding='UTF-8',na.string = "NULL")
user_avg=read.csv('F:/data/user_avg.csv',header=T,sep=',',encoding='UTF-8',na.string = "NULL")
price=read.csv('F:/data/price.csv',header=T,sep=',',encoding='UTF-8',na.string = "NULL")
gmv=read.csv('F:/data/gmv.csv',header=T,sep=',',encoding='UTF-8',na.string = "NULL")
dt_total=yuce(total,7,c(7,1,0),list(order=c(1,1,0), period=7))
names(dt_total)=c('forecast_total','low_total','high_total')
dt_total_ct=yuce(total_ct,7,c(7,1,0),list(order=c(1,1,0), period=7))
names(dt_total_ct)=c('forecast_total_ct','low_total_ct','high_total_ct')
dt_driver=yuce(driver,7,c(7,1,0),list(order=c(1,1,0), period=7))
names(dt_driver)=c('forecast_driver','low_driver','high_driver')
dt_driver_avg=yuce(driver_avg,7,c(7,1,0),list(order=c(1,1,0), period=7))
names(dt_driver_avg)=c('forecast_driver_avg','low_driver_avg','high_driver_avg')
dt_user=yuce(user,7,c(7,1,0),list(order=c(1,1,0), period=7))
names(dt_user)=c('forecast_user','low_user','high_user')
dt_user_avg=yuce(user_avg,7,c(7,1,0),list(order=c(1,1,0), period=7))
names(dt_user_avg)=c('forecast_user_avg','low_user_avg','high_user_avg')
dt_price=yuce(price,7,c(7,1,0),list(order=c(1,1,0), period=7))
names(dt_price)=c('forecast_price','low_price','high_price')
dt_gmv=yuce(gmv,7,c(7,1,0),list(order=c(1,1,0), period=7))
names(dt_gmv)=c('forecast_gmv','low_gmv','high_gmv')
dt=cbind(flag,dt_total[1],dt_total_ct[1],dt_driver[1],dt_driver_avg[1],dt_user[1],dt_user_avg[1],dt_price[1],dt_gmv[1])
write.csv(dt,'F:/result.csv',row.names=FALSE)
|
26c1993da08195a2b4071ada29799be63bd8ff9b
|
8ad3594325900e5a4715ca4405cd765bc9958158
|
/getting-started/exercises/exercises_05/exercise_05_06.R
|
ad95c8689b1a82cc99989176dda77d63be00ca4c
|
[
"Apache-2.0"
] |
permissive
|
garciparedes/r-examples
|
22806859c7c147a6d503f1b1223a5168b6fa9d76
|
0e0e18439ad859f97eafb27c5e7f77d33da28bc6
|
refs/heads/master
| 2021-01-25T16:59:16.020983
| 2019-05-21T10:26:27
| 2019-05-21T10:26:27
| 102,385,669
| 1
| 0
|
Apache-2.0
| 2018-05-24T07:38:21
| 2017-09-04T17:27:27
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 619
|
r
|
exercise_05_06.R
|
#
# Title:
# Ejercicios Computación Estadística - Hoja 5 - Ejercicio 6
#
# Author:
# Sergio García Prado - garciparedes.me
#
# Date:
# April 2018
#
rm(list = ls())
#
# Apartado a)
#
(tabla.normal <- pnorm(t(matrix(seq(0.0, 4.49, by = 0.01), nrow = 10))))
#
# Apartado b)
#
alpha <- c(0.4, 0.25, 0.1, 0.05, 0.025, 0.01, 0.005, 0.0025, 0.001, 0.0005)
v <- 1:100
(tabla.t <- t(sapply(v, qt, p = alpha, lower.tail = FALSE )))
#
# Apartado c)
#
alpha <- c(0.995, 0.990, 0.975, 0.95, 0.9, 0.5, 0.1, 0.05, 0.025, 0.01, 0.005 )
v <- 1:100
(tabla.chi <- t(sapply(v, qchisq, p = alpha, lower.tail = FALSE )))
|
539a45f3aee46f6fe2b41171ea4913c63d6046b9
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/durmod/R/datagen.R
|
c5af5bbd5e6d4a756dca07f73468366b1e252c29
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,969
|
r
|
datagen.R
|
#' Generate example data
#'
#' @description
#' Generate a data table with example data
#' @details
#' The dataset simulates a labour market programme. People entering the dataset are without a job.
#'
#' They experience two hazards, i.e. probabilities per time period. They can either get a job and exit from
#' the dataset, or they can enter a labour market programme, e.g. a subsidised job or similar, and remain
#' in the dataset and possibly get a job later.
#' In the terms of this package, there are two transitions, \code{"job"} and \code{"program"}.
#'
#' The two hazards are influenced by covariates observed by the researcher, called \code{"x1"} and
#' \code{"x2"}. In addition there are unobserved characteristics influencing the hazards. Being
#' on a programme also influences the hazard to get a job. In the generated dataset, being on
#' a programme is the indicator variable \code{alpha}. While on a programme, the only transition that can
#' be made is \code{"job"}.
#'
#' The dataset is organized as a series of rows for each individual. Each row is a time period
#' with constant covariates.
#'
#' The length of the time period is in the covariate \code{duration}.
#'
#' The transition being made at the end of the period is coded in the covariate \code{d}. This
#' is an integer which is 0 if no transition occurs (e.g. if a covariate changes), it is 1 for
#' the first transition, 2 for the second transition. It can also be a factor, in which case the
#' level marking no transition must be called \code{"none"}.
#'
#' The covariate \code{alpha} is zero when unemployed, and 1 if on a programme. It is used
#' for two purposes. It is used as an explanatory variable for transition to job, this yields
#' a coefficient which can be interpreted as the effect of being on the programme. It is also
#' used as a "state variable", as an index into a "risk set". I.e. when estimating, the
#' \code{\link{mphcrm}} function must be told which risks/hazards are present.
#' When on a programme the \code{"toprogram"} transition can not be made. This is implemented
#' by specifying a list of risksets and using \code{alpha+1} as an index into this set.
#'
#' The two hazards are modeled as \eqn{exp(X \beta + \mu)}, where \eqn{X} is a matrix of covariates
#' \eqn{\beta} is a vector of coefficients to be estimated, and \eqn{\mu} is an intercept. All of
#' these quantities are transition specific. This yields an individual likelihood which we call
#' \eqn{M_i(\mu)}. The idea behind the mixed proportional hazard model is to model the
#' individual heterogeneity as a probability distribution of intercepts. We obtain the individual
#' likelihood \eqn{L_i = \sum_j p_j M_i(\mu_j)}, and, thus, the likelihood \eqn{L = \sum_j L_j}.
#'
#' The likelihood is to be maximized over the parameter vectors \eqn{\beta} (one for each transition),
#' the masspoints \eqn{\mu_j}, and probabilites \eqn{p_j}.
#'
#' The probability distribution is built up in steps. We start with a single masspoint, with
#' probability 1. Then we search for another point with a small probability, and maximize the
#' likelihood from there. We continue with adding masspoints until we no longer can improve
#' the likelihood.
#'
#' @param N integer.
#' The number of individuals in the dataset.
#' @param censor numeric. The total observation period. Individuals are removed
#' from the dataset if they do not exit to \code{"job"} before this time.
#' @note
#' The example illustrates how \code{data(durdata)} was generated.
#' @examples
#' data.table::setDTthreads(1) # avoid screams from cran-testing
#' dataset <- datagen(5000,80)
#' print(dataset)
#' risksets <- list(unemp=c("job","program"), onprogram="job")
#' # just two iterations to save time
#' Fit <- mphcrm(d ~ x1+x2 + ID(id) + D(duration) + S(alpha+1) + C(job,alpha),
#' data=dataset, risksets=risksets,
#' control=mphcrm.control(threads=1,iters=2))
#' best <- Fit[[1]]
#' print(best)
#' summary(best)
#' @export
datagen <- function(N,censor=80) {
x1 <- rnorm(N)
x2 <- rnorm(N)
id <- seq_len(N)
# two transitions, to exit(1) and to program(2)
# exit is absorbing, program is not, but only exit is allowed afterwards
# if on program one can only exit
means <- c(-2.5,-3)
cov2 <- matrix(c(1,0.5,0.5,1),2)
persons <- data.table(id,x1,x2)
# draw correlated unobserved characteristics
`:=` <- .N <- ve <- vp <- NULL #avoid check NOTEs
persons[, c('ve','vp') := {
vv <- mvtnorm::rmvnorm(.N, mean=means, sigma=cov2)
list(vv[,1],vv[,2])
}]
means <- persons[,colMeans(cbind(job=exp(ve),program=exp(vp)))]
cv <- persons[,cov(cbind(job=exp(ve),program=exp(vp)))]
# create spells
spells <- persons[,{
genspell(x1,x2,ve,vp,censor)
}, by=id]
spells$d <- factor(spells$d,levels=0:2,labels=c('none','job','program'))
spells$state <- factor(spells$alpha+1,levels=1:2,labels=c('unemp','onprogram'))
setattr(spells,'means',means)
setattr(spells,'cov',cv)
spells
}
|
ae1e68f3db0be54ea0c636aec0fcb19d85e6c6bb
|
c7ee9553b8b2e67bffeea0874db00dd1e7fdb541
|
/R/pdSpecEst.R
|
9c33ff7c8bf8d0f73aa9a8958dfa01e663caf80f
|
[] |
no_license
|
cran/pdSpecEst
|
2c80e79f2f5e6a64b2d017ccad56192519b3c3b8
|
e8bc2a87aed91542fc804910f1556c7c24419fc1
|
refs/heads/master
| 2021-01-11T20:27:28.345499
| 2020-01-08T08:10:07
| 2020-01-08T08:10:07
| 79,120,097
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,945
|
r
|
pdSpecEst.R
|
#' pdSpecEst: An Analysis Toolbox for Hermitian Positive Definite Matrices
#'
#' The \code{pdSpecEst} (\strong{p}ositive \strong{d}efinite \strong{Spec}tral \strong{Est}imation)
#' package provides data analysis tools for samples of symmetric or Hermitian positive definite matrices,
#' such as collections of positive definite covariance matrices or spectral density matrices.
#'
#' The tools in this package can be used to perform:
#' \itemize{
#' \item \emph{Intrinsic wavelet transforms} for curves (1D) and surfaces (2D) of Hermitian positive
#' definite matrices, with applications to for instance: dimension reduction, denoising and clustering for curves or
#' surfaces of Hermitian positive definite matrices, such as (time-varying) Fourier spectral density matrices.
#' These implementations are based in part on the paper \insertCite{CvS17}{pdSpecEst} and Chapters 3
#' and 5 of \insertCite{C18}{pdSpecEst}.
#' \item Exploratory data analysis and inference for samples of Hermitian positive definite matrices by
#' means of \emph{intrinsic data depth} and \emph{depth rank-based hypothesis tests}. These implementations are based
#' on the paper \insertCite{COvS17}{pdSpecEst} and Chapter 4 of \insertCite{C18}{pdSpecEst}.
#' }
#' For more details and examples on how to use the package see the accompanying vignettes in the vignettes folder.
#' An R-Shiny app to demonstrate and test the implemented functionality in the package is available
#' \href{https://jchau.shinyapps.io/pdSpecEst/}{here}.
#'
#' Author and maintainer: \strong{Joris Chau} (\email{j.chau@@uclouvain.be}).
#'
#' Install the current development version via \code{devtools::install_github("JorisChau/pdSpecEst")}.
#'
#' @references
#' \insertAllCited{}
#'
#' @docType package
#' @useDynLib pdSpecEst, .registration = TRUE
#' @importFrom Rcpp evalCpp
#' @importFrom Rdpack reprompt
#' @import utils
#' @import stats
#' @name pdSpecEst
NULL
|
aa4b9f2080ee3a0ce197999096c4db303ab9083c
|
9d24829289bb8a48bf6ce4954e04bb7a4e2abc42
|
/benchmarking_data_set/hypoxia/hallmark_pathway_hypoxia.R
|
f57ce872033c0dcfe78a98f99d4e935a6144229c
|
[] |
no_license
|
michellemeier27/Semesterproject
|
231a51168f104c5bce4e13ca1a4d4782a29301e8
|
fe923ad7974e06a1b45271f466785e0465912c86
|
refs/heads/master
| 2022-12-09T13:13:02.536402
| 2020-08-31T15:57:58
| 2020-08-31T15:57:58
| 262,081,776
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,006
|
r
|
hallmark_pathway_hypoxia.R
|
##HALLMARKS PATHWAY SCORING
#loading libraries
library(singscore)
library(qusage)
library(ggplot2)
library(GSVA)
#loading all source files
source("/Users/michellemeier/Semesterproject/ARCHS4/singscore_function.R")
source("/Users/michellemeier/Semesterproject/ARCHS4/GSVA_function.R")
#not sure if i can just run this but that would be so lit
source("/Users/michellemeier/Semesterproject/ARCHS4/hypoxia/runscript_hypoxia.R")
#get gene sets for both method types
gene_sets <- read.gmt("/Users/michellemeier/Semesterproject/ARCHS4/raw_data/gene_sets/hallmark_gene_sets.gmt")
geneSet <- getGmt("/Users/michellemeier/Semesterproject/ARCHS4/raw_data/gene_sets/hallmark_gene_sets.gmt")
#only keep the series with case and control
hypoxia_res_5 = hypoxia_res_4[hypoxia_res_4$case == TRUE,]
#only keep the series with direction case
hypoxia_res_5 = hypoxia_res_5[hypoxia_res_5$direction == "case",]
#make result df beforehand
#length gene set (= number of pathways)
geneSet <- getGmt("/Users/michellemeier/Semesterproject/ARCHS4/raw_data/gene_sets/hallmark_gene_sets.gmt")
l_gs <- length(geneSet)
result_df <- data.frame(pathway = integer(l_gs),delta = integer(l_gs))
#all methods
#gsva
GSVA_rank <- findRankPathway(meta_frame = hypoxia_res_5, expression_frame = expression_hypoxia_cutoff, geneset = gene_sets, pathway_wanted = "HALLMARK_HYPOXIA",
method_wanted = "gsva")
#ssGSEA
ssGSEA_rank <- findRankPathway(meta_frame = hypoxia_res_5, expression_frame = expression_hypoxia_cutoff, geneset = gene_sets, pathway_wanted = "HALLMARK_HYPOXIA",
method_wanted = "ssgsea")
#zscore
zscore_rank <- findRankPathway(meta_frame = hypoxia_res_5, expression_frame = expression_hypoxia_cutoff, geneset = gene_sets, pathway_wanted = "HALLMARK_HYPOXIA",
method_wanted = "zscore")
#PLAGE
plage_rank <- findRankPathway(meta_frame = hypoxia_res_5, expression_frame = expression_hypoxia_cutoff, geneset = gene_sets, pathway_wanted = "HALLMARK_HYPOXIA",
method_wanted = "plage")
#singscore
singscore_rank <- findRankPathwaySingscore(meta_frame = hypoxia_res_5, expression_frame = expression_hypoxia_cutoff, geneset = geneSet, pathway_wanted = "HALLMARK_HYPOXIA")
#make result overview data frame
all_res <- data.frame(GSVA = as.vector(GSVA_rank),
ssGSEA = as.vector(ssGSEA_rank),
zscore = as.vector(zscore_rank),
plage = as.vector(plage_rank),
singscore = as.vector(singscore_rank),
stringsAsFactors=FALSE)
##plot boxplot
bp_1 <- ggplot(data = stack(all_res), aes(x = ind, y= values)) + geom_boxplot()
bp_1 <- bp_1 + labs(title ="Hypoxia pathway score (case)",
subtitle = "hallmark gene sets")
show(bp_1)
ggsave(plot = bp_1, filename = "case_hallmarks_hypoxia.png", path = "/Users/michellemeier/Semesterproject/ARCHS4/results/pathway_scoring/hallmarks/case")
|
325b1b3dfe3d93981ae01f3c851a917f3606e0ad
|
fbcc0aee35bcd0ac8c866720ac95727ac831778a
|
/man/get_ch_data.Rd
|
63cd6f9a608d70d220098aea379ff61027da4aaa
|
[] |
no_license
|
laboshinl/cloudhistorianr
|
18ec33f3da48f1dff3b95fe6561b93ca5b4e53df
|
9727476339be921fb074a8788f083b3da93a41b7
|
refs/heads/master
| 2020-12-13T07:56:59.424178
| 2019-12-03T03:41:07
| 2019-12-03T03:41:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,134
|
rd
|
get_ch_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_ch_data.R
\name{get_ch_data}
\alias{get_ch_data}
\title{get_ch_data Function:}
\usage{
get_ch_data(uri, token, start_time, end_time, tag_names, down_sample,
sys_guid)
}
\arguments{
\item{uri}{Cloud Historian Server Address}
\item{token}{Oauth2 security token. Use getCHToken to acquire a token}
\item{start_time}{The start time to be used for data retrieval}
\item{end_time}{The start time to be used for data retrieval}
\item{tag_names}{A vector of tagnems to be used for data retrieval}
\item{down_sample}{The the downsampling function to be used in the data retrieval
Valid responses:
Downsample to 1 min average: "1m-avg",
Downsample to 60 second average: "60s-avg",
Downsample to 1 hour average: "1h-avg",
Downsample to avg of all results: "0all-avg",
Downsample to max of all results: "0all-max",
return raw data: ""}
}
\value{
An object with the request information and the formated data in the $data attribute.
}
\description{
A function to connect to Honewell Sentience Cloud Historian Server and return the requested timeseries data
}
|
474ded296c59ba33bd7fd59942dfb9037e69a43f
|
0d815aceefae3946543348f35e29527f3e231ee5
|
/run_analysis.R
|
8fe019f51d2bb0771daeca64b3ce832c4d09623f
|
[] |
no_license
|
jmp2173/Samsung
|
35bb9f93735e41b2c741623eb69270951d81758b
|
3c22b2a592bdf5e53ba5715a13f15cadff8ba70a
|
refs/heads/master
| 2020-06-04T09:18:50.648329
| 2014-05-25T19:28:12
| 2014-05-25T19:28:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,791
|
r
|
run_analysis.R
|
##read in datafiles
a <- read.table("subject_test.txt", sep = "")
b <- read.table("X_test.txt", sep = "")
c <- read.table("y_test.txt", sep = "")
d <- read.table("subject_train.txt", sep = "")
e <- read.table("X_train.txt", sep = "")
f <- read.table("y_train.txt", sep = "")
names <- read.table("activity_labels.txt", sep = "")
##change headers of activity file
colnames(names) <- c("activity", "activityname")
head <- read.table("features.txt", sep="")
head <- subset(head, select = -V1)
headvec <- head[["V2"]]
##combine x files; y files; and subject files and add column headers as necessary
g <- rbind(b,e)
h <- rbind(c,f)
i <- rbind(a,d)
names(i) <- "Subject"
names(g) <- headvec
##combine these files to create big dataset
final <- cbind(g,h,i)
names(final)[names(final) == "V1"] <- "Number"
##create smaller dataset with just the mean and standard deviation variables
final2 <- final[,grep("mean", names(final))]
final3 <- final[,grep("std", names(final))]
final4 <- cbind(final2,final3)
final6 <- cbind(final4, final$Number, final$Subject)
names(final6)[names(final6) %in% c("final$Number","final$Subject")] <- c("Activity","Subject")
##clean up names by removing parentheses and dashes, making all letters lowercase, and elongating variables to their full names
final7 <- final6[,!grepl("meanFreq", names(final6))]
names(final7) <- gsub("()","",names(final7), fixed=TRUE)
names(final7) <- gsub("-", "", names(final7))
names(final7) <- tolower(names(final7))
names(final7) <- gsub("fbody","frequencybody", names(final7))
names(final7) <- gsub("tbody","timebody", names(final7))
names(final7) <- gsub("meanx", "meanxdirection", names(final7))
names(final7) <- gsub("meany", "meanydirection", names(final7))
names(final7) <- gsub("meanz", "meanzdirection", names(final7))
names(final7) <- gsub("stdx", "stdxdirection", names(final7))
names(final7) <- gsub("stdy", "stdydirection", names(final7))
names(final7) <- gsub("stdz", "stdzdirection", names(final7))
names(final7) <- gsub("std", "standarddeviation", names(final7))
names(final7) <- gsub("acc", "accelerometer", names(final7))
names(final7) <- gsub("gyro", "gyroscope", names(final7))
names(final7) <- gsub("tgrav", "timegravity", names(final7))
names(final7) <- gsub("mag", "magnitude", names(final7))
##create final tidy dataset showing the average means and standard deviations for each particpant-activity combination
install.packages("reshape2")
library(reshape2)
as.numeric(final7$subject)
as.numeric(final7$activity)
fmelt <- melt(final7, id.vars = c("activity", "subject"))
final9 <- dcast(fmelt, subject + activity ~ variable, fun.aggregate=mean)
##add in activity labels corresponding with the activity number
final10 <- merge(names,final9,by="activity")
##save as text file
write.csv(final10, file='finaldata.txt')
|
15f14c7dfce8a66e4b3716c751d6148a9570b4d8
|
1e48c563b2b9c723ed2a234df90f1dcd9338e6c3
|
/tests/testthat/testDb.R
|
ef0f00269cdc9e627f8d47a9aaa420c089bd51ee
|
[] |
no_license
|
bobjansen/mattR
|
e87f254d9bc54022a83ea4dc7eb1561528e3c956
|
9dfba5bd5436e0954b5dab77ba283f1adc94c13d
|
refs/heads/master
| 2021-01-20T06:04:58.701879
| 2018-04-03T19:47:09
| 2018-04-03T19:49:20
| 101,481,868
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,866
|
r
|
testDb.R
|
context("Database")
test_that("Connecting to the database works", {
con <- createConnection()
on.exit(DBI::dbDisconnect(con))
expect_true(is(con, "SQLiteConnection"))
})
test_that("Setting up the database works", {
con <- setupDatabase()
on.exit(DBI::dbDisconnect(con))
dbTables <- DBI::dbListTables(con)
dbTables <- dbTables[order(dbTables)]
expect_equal(dbTables, c("SESSION", "USERS"))
})
test_that("Creating a user works", {
con <- setupDatabase()
on.exit(DBI::dbDisconnect(con))
createUser(con, "user", "42")
res <- DBI::dbGetQuery(con, "SELECT count(1) FROM USERS;")
expect_equal(res[[1]], 1)
})
test_that("Check valid user credentials work", {
con <- setupDatabase()
on.exit(DBI::dbDisconnect(con))
createUser(con, "user", "42")
expect_equal(checkUserCredentials(con, "user", "42"), 1)
})
test_that("Check invalid user credentials don't work", {
con <- setupDatabase()
on.exit(DBI::dbDisconnect(con))
createUser(con, "user", "42")
expect_null(checkUserCredentials(con, "user2", "42"))
})
test_that("Check user exists checks work", {
con <- setupDatabase()
on.exit(DBI::dbDisconnect(con))
createUser(con, "user", "42")
expect_true(existsUser(con, "user"))
expect_false(existsUser(con, "user2"))
})
test_that("Check wrong password doesn't work", {
con <- setupDatabase()
on.exit(DBI::dbDisconnect(con))
createUser(con, "user", "42")
expect_null(checkUserCredentials(con, "user", "56"))
})
test_that("Check empty username doesn't work", {
con <- setupDatabase()
on.exit(DBI::dbDisconnect(con))
createUser(con, "user", "42")
expect_null(checkUserCredentials(con, NULL, "42"))
})
test_that("Check invalid user credentials don't work", {
con <- setupDatabase()
on.exit(DBI::dbDisconnect(con))
createUser(con, "user", "42")
expect_null(checkUserCredentials(con, "user", NULL))
})
|
83bfdace3562aea7f3df3e24b65e2ba255e517cf
|
c750c1991c8d0ed18b174dc72f3014fd35e5bd8c
|
/pkgs/bayesm/man/rivDP.Rd
|
351f9157a598add810c300e31ccbcef7bcb9347f
|
[] |
no_license
|
vaguiar/EDAV_Project_2017
|
4b190e66fe7a6b4078cfe1b875bccd9b5a594b25
|
288ffaeec1cfdd873fe7439c0fa0c46a90a16a4f
|
refs/heads/base
| 2021-01-23T02:39:36.272851
| 2017-05-01T23:21:03
| 2017-05-01T23:21:03
| 86,010,131
| 1
| 0
| null | 2017-05-01T23:43:04
| 2017-03-24T00:21:20
|
HTML
|
UTF-8
|
R
| false
| false
| 6,553
|
rd
|
rivDP.Rd
|
\name{rivDP}
\alias{rivDP}
\concept{Instrumental Variables}
\concept{Gibbs Sampler}
\concept{Dirichlet Process}
\concept{bayes}
\concept{endogeneity}
\concept{simultaneity}
\concept{MCMC}
\title{ Linear "IV" Model with DP Process Prior for Errors}
\description{
\code{rivDP} is a Gibbs Sampler for a linear structural equation with an arbitrary number of instruments.
\code{rivDP} uses a mixture of normals for the structural and reduced form equation implemented with a
Dirichlet Process Prior.
}
\usage{
rivDP(Data, Prior, Mcmc)
}
\arguments{
\item{Data}{ list(z,w,x,y) }
\item{Prior}{ list(md,Ad,mbg,Abg,lambda,Prioralpha,lambda_hyper) (optional) }
\item{Mcmc}{ list(R,keep,nprint,maxuniq,SCALE,gridsize) (R required) }
}
\details{
Model:\cr
\eqn{x=z'\delta + e1}. \cr
\eqn{y=\beta*x + w'\gamma + e2}. \cr
\eqn{e1,e2} \eqn{\sim}{~} \eqn{N(\theta_{i})}. \eqn{\theta_{i}} represents \eqn{\mu_{i},\Sigma_{i}}
Note: Error terms have non-zero means. DO NOT include intercepts in the z or w matrices. This is different
from \code{rivGibbs} which requires intercepts to be included explicitly.
Priors:\cr
\eqn{\delta} \eqn{\sim}{~} \eqn{N(md,Ad^{-1})}. \eqn{vec(\beta,\gamma)} \eqn{\sim}{~} \eqn{N(mbg,Abg^{-1})} \cr
\eqn{\theta_{i}} \eqn{\sim}{~} \eqn{G} \cr
\eqn{G} \eqn{\sim}{~} \eqn{DP(alpha,G_{0})} \cr
\eqn{G_{0}} is the natural conjugate prior for \eqn{(\mu,\Sigma)}: \cr
\eqn{\Sigma} \eqn{\sim}{~} \eqn{IW(nu,vI)} and \eqn{\mu|\Sigma} \eqn{\sim}{~} \eqn{N(0,\Sigma (x) a^{-1})} \cr
These parameters are collected together in the list \eqn{\lambda}. It is highly
recommended that you use the default settings for these hyper-parameters.\cr
\eqn{\lambda(a,nu,v):}\cr
\eqn{a} \eqn{\sim}{~} uniform[alim[1],alimb[2]]\cr
\eqn{nu} \eqn{\sim}{~} dim(data)-1 + exp(z) \cr
\eqn{z} \eqn{\sim}{~} uniform[dim(data)-1+nulim[1],nulim[2]]\cr
\eqn{v} \eqn{\sim}{~} uniform[vlim[1],vlim[2]]
\eqn{alpha} \eqn{\sim}{~} \eqn{(1-(alpha-alpha_{min})/(alpha_{max}-alpha{min}))^{power}} \cr
where \eqn{alpha_{min}} and \eqn{alpha_{max}} are set using the arguments in the reference
below. It is highly recommended that you use the default values for the hyperparameters
of the prior on alpha
List arguments contain:
Data:\cr
\itemize{
\item{\code{z}}{ matrix of obs on instruments}
\item{\code{y}}{ vector of obs on lhs var in structural equation}
\item{\code{x}}{ "endogenous" var in structural eqn}
\item{\code{w}}{ matrix of obs on "exogenous" vars in the structural eqn}}
Prior:\cr
\itemize{
\item{\code{md}}{ prior mean of delta (def: 0)}
\item{\code{Ad}}{ pds prior prec for prior on delta (def: .01I)}
\item{\code{mbg}}{ prior mean vector for prior on beta,gamma (def: 0)}
\item{\code{Abg}}{ pds prior prec for prior on beta,gamma (def: .01I)}}
Prioralpha:\cr
\itemize{
\item{\code{Istarmin}}{ expected number of components at lower bound of support of alpha (def: 1)}
\item{\code{Istarmax}}{ expected number of components at upper bound of support of alpha}
\item{\code{power}}{ power parameter for alpha prior (def: .8)}
}
lambda_hyper:\cr
\itemize{
\item{\code{alim}}{ defines support of a distribution,def:c(.01,10) }
\item{\code{nulim}}{ defines support of nu distribution, def:c(.01,3)}
\item{\code{vlim}}{ defines support of v distribution, def:c(.1,4)}
}
MCMC:\cr
\itemize{
\item{\code{R}}{ number of MCMC draws}
\item{\code{keep}}{ MCMC thinning parm: keep every keepth draw (def: 1)}
\item{\code{nprint}}{ print the estimated time remaining for every nprint'th draw (def: 100)}
\item{\code{maxuniq}}{ storage constraint on the number of unique components (def: 200)}
\item{\code{SCALE}}{ scale data (def: TRUE)}
\item{\code{gridsize}}{ gridsize parm for alpha draws (def: 20)}
}
output includes object \code{nmix} of class "bayesm.nmix" which contains draws of predictive distribution of
errors (a Bayesian analogue of a density estimate for the error terms).\cr
nmix:\cr
\itemize{
\item{\code{probdraw}}{ not used}
\item{\code{zdraw}}{ not used}
\item{\code{compdraw}}{ list R/keep of draws from bivariate predictive for the errors}
}
note: in compdraw list, there is only one component per draw
}
\value{
a list containing:
\item{deltadraw}{R/keep x dim(delta) array of delta draws}
\item{betadraw}{R/keep x 1 vector of beta draws}
\item{gammadraw}{R/keep x dim(gamma) array of gamma draws }
\item{Istardraw}{R/keep x 1 array of drawsi of the number of unique normal components}
\item{alphadraw}{R/keep x 1 array of draws of Dirichlet Process tightness parameter}
\item{nmix}{R/keep x list of draws for predictive distribution of errors}
}
\references{ For further discussion, see "A Semi-Parametric Bayesian Approach to the Instrumental
Variable Problem," by Conley, Hansen, McCulloch and Rossi, Journal of Econometrics (2008).\cr
}
\seealso{\code{rivGibbs}}
\author{ Peter Rossi, Anderson School, UCLA,
\email{perossichi@gmail.com}.
}
\examples{
##
if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10}
##
## simulate scaled log-normal errors and run
##
set.seed(66)
k=10
delta=1.5
Sigma=matrix(c(1,.6,.6,1),ncol=2)
N=1000
tbeta=4
set.seed(66)
scalefactor=.6
root=chol(scalefactor*Sigma)
mu=c(1,1)
##
## compute interquartile ranges
##
ninterq=qnorm(.75)-qnorm(.25)
error=matrix(rnorm(100000*2),ncol=2)%*%root
error=t(t(error)+mu)
Err=t(t(exp(error))-exp(mu+.5*scalefactor*diag(Sigma)))
lnNinterq=quantile(Err[,1],prob=.75)-quantile(Err[,1],prob=.25)
##
## simulate data
##
error=matrix(rnorm(N*2),ncol=2)\%*\%root
error=t(t(error)+mu)
Err=t(t(exp(error))-exp(mu+.5*scalefactor*diag(Sigma)))
#
# scale appropriately
Err[,1]=Err[,1]*ninterq/lnNinterq
Err[,2]=Err[,2]*ninterq/lnNinterq
z=matrix(runif(k*N),ncol=k)
x=z\%*\%(delta*c(rep(1,k)))+Err[,1]
y=x*tbeta+Err[,2]
# set intial values for MCMC
Data = list(); Mcmc=list()
Data$z = z; Data$x=x; Data$y=y
# start MCMC and keep results
Mcmc$maxuniq=100
Mcmc$R=R
end=Mcmc$R
begin=100
out=rivDP(Data=Data,Mcmc=Mcmc)
cat("Summary of Beta draws",fill=TRUE)
summary(out$betadraw,tvalues=tbeta)
if(0){
## plotting examples
plot(out$betadraw,tvalues=tbeta)
plot(out$nmix) ## plot "fitted" density of the errors
##
}
}
\keyword{ models }
|
9ee442f5d19c003174ee9b12c0674d826e55d7fc
|
daeee1f6fa2191038550e6dde443d6554bce2c61
|
/man/quantile.geometric.Rd
|
a67a10ff66ad2a581a7149bcc9b75aa13a89f617
|
[
"MIT"
] |
permissive
|
nfultz/distributions3
|
a58f88146c81a70ab09e43c3f2762e8dcd42c52a
|
945dcecd6488329127bc5585b6042cf9ec4dba81
|
refs/heads/master
| 2020-08-24T20:17:53.012731
| 2020-06-26T03:09:45
| 2020-06-26T03:09:45
| 216,898,661
| 0
| 0
|
NOASSERTION
| 2019-10-22T19:57:28
| 2019-10-22T19:57:27
| null |
UTF-8
|
R
| false
| true
| 931
|
rd
|
quantile.geometric.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Geometric.R
\name{quantile.Geometric}
\alias{quantile.Geometric}
\title{Determine quantiles of a Geometric distribution}
\usage{
\method{quantile}{Geometric}(d, p, ...)
}
\arguments{
\item{d}{A \code{Geometric} object created by a call to \code{\link[=Geometric]{Geometric()}}.}
\item{p}{A vector of probabilites.}
\item{...}{Unused. Unevaluated arguments will generate a warning to
catch mispellings or other possible errors.}
}
\value{
A vector of quantiles, one for each element of \code{p}.
}
\description{
Determine quantiles of a Geometric distribution
}
\examples{
set.seed(27)
X <- Geometric(0.3)
X
random(X, 10)
pdf(X, 2)
log_pdf(X, 2)
cdf(X, 4)
quantile(X, 0.7)
}
\seealso{
Other Geometric distribution:
\code{\link{cdf.Geometric}()},
\code{\link{pdf.Geometric}()},
\code{\link{random.Geometric}()}
}
\concept{Geometric distribution}
|
df9f218be33d3ae504a23ceca014bbc8118a18b6
|
35706505050bf8d1d91929e5a3cfe4617684ecfd
|
/R/plotCCDF.R
|
a0b61aa8e04358a35b43e317e5a066bdd484a0b7
|
[] |
no_license
|
dgarcia-eu/DGarciaTools
|
f9564ce41e55fcf8d0b9eab5ec9b80c3c0a00054
|
4719f988119839f78affafb98b15901659ac4b2f
|
refs/heads/master
| 2020-12-24T05:19:33.812712
| 2016-08-01T17:06:01
| 2016-08-01T17:06:01
| 64,681,373
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,426
|
r
|
plotCCDF.R
|
#' Plot a Cumulative Complementary Density Function (CCDF)
#'
#' This function calculates and plots a CCDF with logarithmic axes by default
#' @param x numeric vector to calculate CCDF of
#' @param include.min whether to include a first point z=min(x)-1, which will have P(x>z)=1 (default FALSE)
#' @param counts whether to plot absolute counts instead of probability (default FALSE)
#' @param log character string determining the axes to display logarithmically (default "xy", both axes)
#' @param type of plot (default line "l")
#' @param xlab label of x axis (default "x")
#' @param ylab label of y axis (default "P(X>x)")
#' @param cex.lab size of the axis label (default 1.3)
#' @param cex.axis size of the axis values (default 1.3)
#' @param lwd width of the plot line (default 2)
#' @param ... additional parameters for plot function
#' @keywords density, power-law
#' @export
#' @examples
#' xs <- floor(runif(min=0,max=10,n=100))
#' plotCCDF(xs)
#'
plotCCDF <- function(x, include.min=FALSE, counts=FALSE, log="xy", type="l", xlab="x", ylab="P(X>x)", cex.lab=1.3, cex.axis=1.3, lwd=2, ...) {
CD <- CCDF(x, include.min)
P <- CD["P",]
v <- CD["v",]
f <- TRUE
if (counts)
{ P <- CD["N",] }
if (log != "")
{ f <- P>0 & v > 0 }
par(mar=c(4.5,4.5,0.5,0.5))
options(scipen=3)
plot(x=v[f], y=P[f], log=log, xlab=xlab, ylab=ylab, type=type, cex.lab=cex.lab, cex.axis=cex.axis, lwd=lwd, ...)
}
|
330fc9e6bc9d1bf125ddce9c7383f3a5a5415626
|
800486925da984567848f68fdcefa80118899164
|
/R Files/NHL_Spread_Current.R
|
81d297e6fd9d734514df9e622e251458aea865b9
|
[] |
no_license
|
kloudKlown/DFS-Website
|
e599e39fe45b07df18b1c3294afaf1c9ffadecc8
|
0a159d7404e48f0a76a5a0990b4cd337ffcadd56
|
refs/heads/master
| 2022-12-26T09:04:56.873353
| 2020-03-02T05:27:31
| 2020-03-02T05:27:31
| 213,518,931
| 0
| 0
| null | 2022-12-08T09:08:05
| 2019-10-08T01:12:53
|
C#
|
UTF-8
|
R
| false
| false
| 31,880
|
r
|
NHL_Spread_Current.R
|
library(Hmisc)
library(corrplot)
library(brnn)
library(h2o)
library(randomForest)
library(Matrix)
# library(xgboost)
library(stringdist)
library(varhandle)
library(tidyr)
require(devtools)
# library(mxnet)
setwd("D:/DFS Website/DFS/R Files")
source('NHL_GetDBData.R')
library(odbc)
con <- dbConnect(odbc(),
Driver = "SQL Server",
Server = "localhost",
Database = "NHL",
Trusted_Connection = "True",
Port = 1433)
NHLSavantPlayer = dbSendQuery(con, paste("Select * From NHL_Games where GameDate = '", Sys.Date() , "'"))
TodaysGamesNHL = dbFetch(NHLSavantPlayer)
dbClearResult(NHLSavantPlayer)
rm(NHLSavantPlayer)
rm(con)
NHLTableData = NHLTableData[,c("PlayerName","PlayerPosition","GID","GameDate","G","Age",
"Team","HW","Opp","WinLoss","Goals","Assists",
"Points","PlusMinus","Penalties","EGoals","PPGoals","SHGoals",
"GWGoals","EVAssits","PPAssits","SHAssits" ,"ShotsOnGoal","ShootingPer",
"Shits","MP","Hits","Blocks","FaceOffWins","FaceOffLoss", "FaceOffPer",
"Line", "VegasT")]
NHLTableData$TotalGoals = NHLTableData$Goals
NHLTableData$DKP = NHLTableData$EGoals * 8.5 + NHLTableData$EVAssits * 5 + NHLTableData$ShotsOnGoal * 1.5 +
NHLTableData$Blocks * 1.5 + NHLTableData$SHGoals * 2 + NHLTableData$SHAssits * 2
OffensiveStatsNHL = read.csv('OffensiveStatsNHL_All.csv')
DefensiveStatsNHL = read.csv('DefensiveStatsNHL_All.csv')
OffensiveStatsNHL = subset(OffensiveStatsNHL, select = -X)
DefensiveStatsNHL = subset(DefensiveStatsNHL, select = -X)
######################################################
############### Team Defensive stats ######################
######################################################
DefensiveStatsNewNHL = DefensiveStatsNHL[0,]
PositionsAll = unique( NHLTableData$PlayerPosition )
Teams = unique(NHLTableData$Team)
### Get Defensive stats for each team
for (eachTeam in Teams) {
# Iterate over each team
subsetTeamData = subset(NHLTableData, NHLTableData$Team == eachTeam)
if (nrow(subsetTeamData) == 0)
{
next;
}
DateLevels = as.factor(unique(subsetTeamData[order(subsetTeamData$GameDate , decreasing = FALSE ),]$GameDate))
DefensiveStatsMaxDate = max(as.Date(subset(DefensiveStatsNHL, DefensiveStatsNHL$Team == eachTeam)$GameDate ), na.rm = TRUE)
DateLevels = DateLevels[as.Date(DateLevels) > DefensiveStatsMaxDate]
if (length(DateLevels) == 0){
next;
}
#############################
### Position######
## Iterate over date
for (date in 1:length(DateLevels)){
print(paste("Team = ", eachTeam, " level " ,length(DateLevels)/date))
for (pos in as.factor(PositionsAll) ){
# Iterate over each date
temp = DefensiveStatsNHL[1,]
## Make sure do not include this date but eveytyhing before.
subsetTeamData = subset(NHLTableData, NHLTableData$Team == eachTeam
& as.Date(NHLTableData$GameDate) < as.Date(DateLevels[date]) &
as.Date(NHLTableData$GameDate) > (as.Date(DateLevels[date]) - 300)&
as.character(NHLTableData$PlayerPosition) == pos)
subsetTeamData = subsetTeamData[order(subsetTeamData$GameDate , decreasing = TRUE ),]
currentGame = subset(NHLTableData, NHLTableData$Team == eachTeam
& as.Date(NHLTableData$GameDate) == as.Date(DateLevels[date])&
as.character(NHLTableData$PlayerPosition) == pos)
subsetTeamData = subsetTeamData[subsetTeamData$PlayerName %in% currentGame$PlayerName,]
if(nrow(subsetTeamData) > 50){
subsetTeamData = subsetTeamData[0:50,]
}
if(nrow(subsetTeamData) == 0){
next;
}
temp$GameDate = DateLevels[date]
temp$PlayerPosition = pos
temp$Team = eachTeam
#### How does team perform in this position historically
for (column in 4:24){
print(colnames(temp)[column])
temp[, colnames(temp)[column]] = mean(subsetTeamData[, colnames(temp)[column]])
}
### Get Opposition Players in the game
OppPositionPlayers = unique(subset(NHLTableData, NHLTableData$Team == currentGame$Opp[1]
& as.Date(NHLTableData$GameDate) == as.Date(DateLevels[date])&
as.character(NHLTableData$PlayerPosition) == pos)$PlayerName)
### Get Opposition Players and their dates to find all games these players particiated in
OppositionTeams = unique(subset(NHLTableData, NHLTableData$PlayerName %in% OppPositionPlayers
& as.Date(NHLTableData$GameDate) < as.Date(DateLevels[date]))$Opp)
OppositionDates = unique(subset(NHLTableData, NHLTableData$PlayerName %in% OppPositionPlayers
& as.Date(NHLTableData$GameDate) < as.Date(DateLevels[date]))$GameDate)
## Make sure do not include this date but eveytyhing before.
subsetOppData = subset(NHLTableData, NHLTableData$Team %in% unique(OppositionTeams)
& as.Date(NHLTableData$GameDate) < as.Date(DateLevels[date])
& as.Date(NHLTableData$GameDate) %in% (as.Date(OppositionDates)) &
as.character(NHLTableData$PlayerPosition) == pos)
subsetOppData = subsetOppData[order(subsetOppData$GameDate , decreasing = TRUE ),]
if(nrow(subsetOppData) > 50){
subsetOppData = subsetOppData[0:50,]
}
#### How many points have been allowed
for (column in 24:length(colnames(temp)) ){
#print(colnames(temp)[column])
col = gsub('Opp', '', colnames(temp)[column], perl = TRUE)
temp[, colnames(temp)[column]] = mean(subsetOppData[, col])
}
DefensiveStatsNewNHL = rbind(temp, DefensiveStatsNewNHL)
}
}
}
#################################################################################
######################## Offensive Stats ########################################
OffensiveStatsNewNHL = OffensiveStatsNHL[0,]
PositionsAll = unique( NHLTableData$PlayerPosition )
allPlayers = unique(NHLTableData$PlayerName)
for (player in allPlayers) {
## Get Playerdata
subsetPlayerData = subset(NHLTableData, NHLTableData$PlayerName == player)
if (nrow(subsetPlayerData) == 0)
{
next;
}
DateLevels = as.factor(unique(subsetPlayerData[order(subsetPlayerData$GameDate , decreasing = FALSE ),]$GameDate))
DefensiveStatsMaxDate = max(as.Date(subset(OffensiveStatsNHL, OffensiveStatsNHL$PlayerName == player)$GameDate ), na.rm = TRUE)
DateLevels = DateLevels[as.Date(DateLevels) > DefensiveStatsMaxDate]
if (length(DateLevels) == 0){
next;
}
# Add current Date
# DateLevels = factor(c(levels(DateLevels),substring(Sys.time(),0,10)))
print(player)
## Iterate over date
for (date in 2:length(DateLevels)){
# Iterate over each date
temp = OffensiveStatsNHL[1,]
subsetPlayerData = subset(NHLTableData, NHLTableData$PlayerName == player
& as.Date(NHLTableData$GameDate) < as.Date(DateLevels[date])
& as.Date(NHLTableData$GameDate) > (as.Date(DateLevels[date]) - 30)
)
subsetPlayerData = subsetPlayerData[order(subsetPlayerData$GameDate , decreasing = TRUE ),]
if(nrow(subsetPlayerData) > 10){
subsetPlayerData = subsetPlayerData[0:10,]
}
currentGame = subset(NHLTableData, NHLTableData$PlayerName == player
& as.Date(NHLTableData$GameDate) == as.Date(DateLevels[date]))
if (nrow(currentGame) == 0 ){
next
}
temp$GameDate = DateLevels[date]
temp$PlayerName = player
temp$PlayerPosition = as.character(subsetPlayerData$PlayerPosition[1])
temp$Team = as.character(subsetPlayerData$Team[1])
temp$Opp = as.character(currentGame$Opp[1])
temp$MP = mean(as.numeric(subsetPlayerData$MP))/60
temp$Line = currentGame$Line[1]
if (currentGame$HW == '@' | currentGame$HW == 'N'){
temp$HW = 0
}
else{
temp$HW = 1
}
#### How good the player is last 30 days
for (column in 8:(length(colnames(temp)) - 6) ){
print(colnames(temp)[column])
temp[, colnames(temp)[column]] = mean(subsetPlayerData[, colnames(temp)[column]])
}
### Opposing Team stats
currentOppPlayers = unique(subset(NHLTableData, NHLTableData$Team == currentGame$Opp
& as.Date(NHLTableData$GameDate) == as.Date(DateLevels[date]))$PlayerName)
currentOppPlayers = subset(NHLTableData, NHLTableData$PlayerName %in% currentOppPlayers
& as.Date(NHLTableData$GameDate) < as.Date(DateLevels[date])
& as.Date(NHLTableData$GameDate) > (as.Date(DateLevels[date]) - 15))
currentOppPlayers = currentOppPlayers[order(currentOppPlayers$GameDate , decreasing = TRUE ),]
if(nrow(currentOppPlayers) > 100){
currentOppPlayers = currentOppPlayers[0:100,]
}
temp$GoalsOpp = mean(currentOppPlayers$Goals)
temp$EVAssitsOpp = mean(currentOppPlayers$EVAssits)
temp$ShotsOnGoalOpp = mean(currentOppPlayers$ShotsOnGoal)
temp$BlocksOpp = mean(currentOppPlayers$Blocks)
temp$HitsOpp = mean(currentOppPlayers$Hits)
temp$FaceOffPerOpp = mean(currentOppPlayers$FaceOffPer)
temp$DKP = currentGame$DKP[1]
temp$TotalGoals = currentGame$TotalGoals[1]
OffensiveStatsNewNHL = rbind(temp, OffensiveStatsNewNHL)
}
## Iterate over date
}
OffensiveStatsNewNHL[is.na(OffensiveStatsNewNHL)] = 0
OffensiveStatsNewNHL[is.null(OffensiveStatsNewNHL)] = 0
#########################################################################
######################### Goalie ########################################
GoalieStatsNewNHL = GoalieStatsNHL[0,]
NHLGoalieData$DKP = NHLGoalieData$Saves* 0.7 - NHLGoalieData$GoalsAgainst * 3.5
allPlayers = unique(NHLGoalieData$PlayerName)
for (player in allPlayers) {
## Get Playerdata
subsetPlayerData = subset(NHLGoalieData, NHLGoalieData$PlayerName == player)
if (nrow(subsetPlayerData) == 0)
{
next;
}
DateLevels = as.factor(unique(subsetPlayerData[order(subsetPlayerData$GameDate , decreasing = FALSE ),]$GameDate))
DefensiveStatsMaxDate = max(as.Date(subset(GoalieStatsNHL, GoalieStatsNHL$PlayerName == player)$GameDate ), na.rm = TRUE)
DateLevels = DateLevels[as.Date(DateLevels) > DefensiveStatsMaxDate]
if (length(DateLevels) == 0){
next;
}
# Add current Date
# DateLevels = factor(c(levels(DateLevels),substring(Sys.time(),0,10)))
print(player)
## Iterate over date
for (date in 2:length(DateLevels)){
# Iterate over each date
temp = GoalieStatsNHL[1,]
subsetPlayerData = subset(NHLGoalieData, NHLGoalieData$PlayerName == player
& as.Date(NHLGoalieData$GameDate) < as.Date(DateLevels[date])
& as.Date(NHLGoalieData$GameDate) > (as.Date(DateLevels[date]) - 30)
)
subsetPlayerData = subsetPlayerData[order(subsetPlayerData$GameDate , decreasing = TRUE ),]
currentGame = subset(NHLGoalieData, NHLGoalieData$PlayerName == player
& as.Date(NHLGoalieData$GameDate) == as.Date(DateLevels[date]))
if (nrow(currentGame) == 0 ){
next
}
### Current Team Stats
currentPlayers = unique(subset(NHLTableData, NHLTableData$Team == currentGame$Team
& as.Date(NHLTableData$GameDate) == as.Date(DateLevels[date]))$PlayerName)
currentPlayers = subset(NHLTableData, NHLTableData$PlayerName %in% currentPlayers
& as.Date(NHLTableData$GameDate) < as.Date(DateLevels[date])
& as.Date(NHLTableData$GameDate) > (as.Date(DateLevels[date]) - 30))
currentPlayers = currentPlayers[order(currentPlayers$GameDate , decreasing = TRUE ),]
if(nrow(currentOppPlayers) > 100){
currentOppPlayers = currentOppPlayers[0:100,]
}
if (nrow(currentPlayers) == 0 ){
next
}
currentPlayers = aggregate(currentPlayers[,11:30], by = list(currentPlayers$GameDate), FUN = sum, na.rm=TRUE)
temp$GameDate = DateLevels[date]
temp$PlayerName = player
temp$PlayerPosition = as.character(subsetPlayerData$PlayerPosition[1])
temp$Team = as.character(subsetPlayerData$Team[1])
temp$Opp = as.character(currentGame$Opp[1])
temp$MP = mean(as.numeric(subsetPlayerData$MP))/60
if (currentGame$HW == '@' | currentGame$HW == 'N'){
temp$HW = 0
}
else{
temp$HW = 1
}
#### How good the player is last 30 days
for (column in 9:(length(colnames(temp)) - 5) ){
print(colnames(temp)[column])
temp[, colnames(temp)[column]] = mean(subsetPlayerData[, colnames(temp)[column]])
}
temp$Hits = mean(currentPlayers$Hits)
temp$Blocks = mean(currentPlayers$Blocks)
temp$DKP = currentGame$DKP[1]
### Opposing Team stats
currentOppPlayers = unique(subset(NHLTableData, NHLTableData$Team == currentGame$Opp
& as.Date(NHLTableData$GameDate) == as.Date(DateLevels[date]))$PlayerName)
currentOppPlayers = subset(NHLTableData, NHLTableData$PlayerName %in% currentOppPlayers
& as.Date(NHLTableData$GameDate) < as.Date(DateLevels[date])
& as.Date(NHLTableData$GameDate) > (as.Date(DateLevels[date]) - 30))
currentOppPlayers = currentOppPlayers[order(currentOppPlayers$GameDate , decreasing = TRUE ),]
if(nrow(currentOppPlayers) > 100){
currentOppPlayers = currentOppPlayers[0:100,]
}
if (nrow(currentOppPlayers) == 0 ){
next
}
currentOppPlayers = aggregate(currentOppPlayers[,11:30], by = list(currentOppPlayers$GameDate), FUN = sum, na.rm=TRUE)
temp$ShotsOpp = mean(currentOppPlayers$Shots)
temp$HitsOpp = mean(currentOppPlayers$Hits)
temp$GoalsOpp = mean(currentOppPlayers$Goals)
GoalieStatsNewNHL = rbind(temp, GoalieStatsNewNHL)
}
## Iterate over date
}
GoalieStatsNewNHL[is.na(GoalieStatsNewNHL)] = 0
GoalieStatsNewNHL[is.null(GoalieStatsNewNHL)] = 0
DefensiveStatsNHL = rbind(DefensiveStatsNewNHL, DefensiveStatsNHL)
OffensiveStatsNHL = rbind(OffensiveStatsNewNHL, OffensiveStatsNHL)
GoalieStatsNHL = rbind(GoalieStatsNewNHL, GoalieStatsNHL)
GoalieStatsNHL[is.na(GoalieStatsNHL)] = 0
GoalieStatsNHL[is.null(GoalieStatsNHL)] = 0
DefensiveStatsNHL[is.na(DefensiveStatsNHL)] = 0
DefensiveStatsNHL[is.null(DefensiveStatsNHL)] = 0
OffensiveStatsNHL[is.na(OffensiveStatsNHL)] = 0
OffensiveStatsNHL[is.null(OffensiveStatsNHL)] = 0
write.csv(DefensiveStatsNHL, file = "DefensiveStatsNHL_All.csv")
write.csv(OffensiveStatsNHL, file = "OffensiveStatsNHL_All.csv")
write.csv(GoalieStatsNHL, file = "GoalieStatsNHL_All.csv")
###### Today's games ####################
###### Today's games ########################## Today's games ####################
###### Today's games ####################
###### Today's games ####################
TodaysDate = Sys.Date()
DefensiveStatsNHLToday = DefensiveStatsNHL[0,]
PositionsAll = unique(NHLTableData$PlayerPosition)
Teams = c(unique(TodaysGamesNHL$Team), unique(TodaysGamesNHL$Opp))
### Get Defensive stats for each team
for (eachTeam in Teams) {
# Iterate over each team
subsetTeamData = subset(NHLTableData, NHLTableData$Team == eachTeam)
if (nrow(subsetTeamData) == 0)
{
next;
}
#############################
### Position######
## Iterate over date
for (pos in as.factor(PositionsAll) ){
# Iterate over each date
temp = DefensiveStatsNHL[1,]
## Make sure do not include this date but eveytyhing before.
subsetTeamData = subset(NHLTableData, NHLTableData$Team == eachTeam
& as.Date(NHLTableData$GameDate) < TodaysDate &
as.Date(NHLTableData$GameDate) > (TodaysDate - 300) &
as.character(NHLTableData$PlayerPosition) == pos )
subsetTeamData = subsetTeamData[order(subsetTeamData$GameDate , decreasing = TRUE ),]
# currentGame = subset(TodaysPlayers, TodaysPlayers$Team == eachTeam & TodaysPlayers$Position == pos)
# subsetTeamData = subsetTeamData[subsetTeamData$PlayerName %in% currentGame$PlayerName,]
if(nrow(subsetTeamData) > 50){
subsetTeamData = subsetTeamData[0:50,]
}
if(nrow(subsetTeamData) == 0){
next;
}
temp$GameDate = TodaysDate
temp$Team = eachTeam
temp$PlayerPosition = pos
#### How does team perform in this position historically
for (column in 4:24){
print(colnames(temp)[column])
temp[, colnames(temp)[column]] = mean(subsetTeamData[, colnames(temp)[column]])
}
currentGame = TodaysGamesNHL[TodaysGamesNHL$Team == eachTeam,]
if(nrow(currentGame) == 0){
currentGame = TodaysGamesNHL[TodaysGamesNHL$Opp == eachTeam,]
}
### Get Opposition Players in the game
OppPositionPlayers = unique(subset(NHLTableData, NHLTableData$Team == currentGame$Opp[1] &
as.Date(NHLTableData$GameDate) < TodaysDate &
as.Date(NHLTableData$GameDate) > (TodaysDate - 30) &
as.character(NHLTableData$PlayerPosition) == pos)$PlayerName)
### Get Opposition Players and their dates to find all games these players particiated in
OppositionTeams = unique(subset(NHLTableData, NHLTableData$PlayerName %in% OppPositionPlayers)$Opp)
OppositionDates = unique(subset(NHLTableData, NHLTableData$PlayerName %in% OppPositionPlayers)$GameDate)
## Make sure do not include this date but eveytyhing before.
subsetOppData = subset(NHLTableData, NHLTableData$Team == currentGame$Opp[1]
& as.Date(NHLTableData$GameDate) < TodaysDate
& NHLTableData$PlayerName %in% OppPositionPlayers
& as.character(NHLTableData$PlayerPosition) == pos)
subsetOppData = subsetOppData[order(subsetOppData$GameDate , decreasing = TRUE ),]
if(nrow(subsetOppData) > 100){
subsetOppData = subsetOppData[0:50,]
}
#### How many points have been allowed
for (column in 25:length(colnames(temp)) ){
#print(colnames(temp)[column])
col = gsub('Opp', '', colnames(temp)[column], perl = TRUE)
temp[, colnames(temp)[column]] = mean(subsetOppData[, col])
}
DefensiveStatsNHLToday = rbind(temp, DefensiveStatsNHLToday)
}
}
DefensiveStatsNHLToday[is.na(DefensiveStatsNHLToday)] = 0
DefensiveStatsNHLToday[is.null(DefensiveStatsNHLToday)] = 0
######### Offensive Stats
TodaysPlayers = subset(NHLTableData, NHLTableData$Team %in% TodaysGamesNHL$Team
& as.Date(NHLTableData$GameDate) > as.Date("2019-10-25") )
TodaysPlayers = rbind(TodaysPlayers, subset(NHLTableData, NHLTableData$Team %in% TodaysGamesNHL$Opp
& as.Date(NHLTableData$GameDate) > as.Date("2019-10-25") ))
allPlayers = unique(TodaysPlayers$PlayerName)
OffensiveStatsNHLToday = OffensiveStatsNHL[1, ]
for (player in allPlayers) {
## Get Playerdata
subsetPlayerData = subset(NHLTableData, NHLTableData$PlayerName == player)
if (nrow(subsetPlayerData) == 0)
{
next;
}
# DateLevels = factor(c(levels(DateLevels),substring(Sys.time(),0,10)))
print(player)
## Iterate over date
# Iterate over each date
temp = OffensiveStatsNHL[1,]
subsetPlayerData = subset(NHLTableData, NHLTableData$PlayerName == player
& as.Date(NHLTableData$GameDate) < as.Date(TodaysDate)
& as.Date(NHLTableData$GameDate) > (as.Date(TodaysDate) - 30)
)
subsetPlayerData = subsetPlayerData[order(subsetPlayerData$GameDate , decreasing = TRUE ),]
if(nrow(subsetPlayerData) > 10){
subsetPlayerData = subsetPlayerData[0:10,]
}
currentGame = TodaysGamesNHL[TodaysGamesNHL$Team == subsetPlayerData$Team[1],]
if(nrow(currentGame) == 0){
Opp = TodaysGamesNHL[TodaysGamesNHL$Opp == subsetPlayerData$Team[1],]$Team
}
else{
Opp = currentGame$Opp
}
temp$GameDate = TodaysDate
temp$PlayerName = player
temp$PlayerPosition = as.character(subsetPlayerData$PlayerPosition[1])
temp$Team = as.character(subsetPlayerData$Team[1])
temp$Opp = as.character(Opp)
temp$MP = mean(as.numeric(subsetPlayerData$MP))/60
temp$HW = 0
#### How good the player is last 30 days
for (column in 8:(length(colnames(temp)) - 6) ){
print(colnames(temp)[column])
temp[, colnames(temp)[column]] = mean(subsetPlayerData[, colnames(temp)[column]])
}
### Opposing Team stats
currentOppPlayers = unique(NHLTableData[NHLTableData$Team == Opp, ]$PlayerName)
# (subset(NHLTableData, NHLTableData$Team == Opp)$PlayerName)
currentOppPlayers = subset(NHLTableData, NHLTableData$PlayerName %in% currentOppPlayers
& as.Date(NHLTableData$GameDate) < as.Date(TodaysDate)
& as.Date(NHLTableData$GameDate) > (as.Date(TodaysDate) - 15))
currentOppPlayers = currentOppPlayers[order(currentOppPlayers$GameDate , decreasing = TRUE ),]
if(nrow(currentOppPlayers) > 100){
currentOppPlayers = currentOppPlayers[0:100,]
}
temp$GoalsOpp = mean(currentOppPlayers$Goals)
temp$EVAssitsOpp = mean(currentOppPlayers$EVAssits)
temp$ShotsOnGoalOpp = mean(currentOppPlayers$ShotsOnGoal)
temp$BlocksOpp = mean(currentOppPlayers$Blocks)
temp$HitsOpp = mean(currentOppPlayers$Hits)
temp$FaceOffPerOpp = mean(currentOppPlayers$FaceOffPer)
temp$DKP = 0
temp$TotalGoals = 0
OffensiveStatsNHLToday = rbind(temp, OffensiveStatsNHLToday)
}
OffensiveStatsNHLToday$GameDate = as.character(OffensiveStatsNHLToday$GameDate)
OffensiveStatsNHLToday[is.na(OffensiveStatsNHLToday)] = 0
OffensiveStatsNHLToday[is.null(OffensiveStatsNHLToday)] = 0
OffensiveStatsNHL = rbind(OffensiveStatsNHLToday , OffensiveStatsNHL)
DefensiveStatsNHL = rbind(DefensiveStatsNHLToday , DefensiveStatsNHL)
OffensiveStatsNHL$GameDate = as.Date(OffensiveStatsNHL$GameDate)
DefensiveStatsNHL$GameDate = as.Date(DefensiveStatsNHL$GameDate)
#############################################################################################
#############################################################################################
####################################Prediction###############################################
CombinedStatsNHL = merge(x = OffensiveStatsNHL, y = DefensiveStatsNHL, by.x = c("GameDate", "PlayerPosition", "Opp"),
by.y = c("GameDate", "PlayerPosition", "Team"))
CombinedStatsNHL[is.na(CombinedStatsNHL)] = 0
CombinedStatsNHL[is.null(CombinedStatsNHL)] = 0
CombinedStatsNHL = merge(x = CombinedStatsNHL, y = GoalieStatsNHL, by.x = c("GameDate", "Opp"),
by.y = c("GameDate", "Team"))
CombinedStatsNHL[is.na(CombinedStatsNHL)] = 0
CombinedStatsNHL[is.null(CombinedStatsNHL)] = 0
# write.csv(CombinedStats, file = "CombinedStatsNHL.csv")
DateCheck = Sys.Date()
allPlayers = unique(CombinedStatsNHL$PlayerName)
Results = data.frame( RFPred = numeric(), player = factor(), position = factor(), salary = numeric(),
date = factor(), MP = numeric(), team = factor(),
Actual = numeric(), Opp = numeric(), ShotsTaken = numeric(), Goals = numeric())
allPlayers = subset(CombinedStatsNHL, as.Date(CombinedStatsNHL$GameDate) == DateCheck)$PlayerName
##############################################################
################## NHL Results ###############################
for (player in allPlayers){
print(player)
Data_Cleaned_Test = subset(CombinedStatsNHL, as.Date(CombinedStatsNHL$GameDate) == as.Date(DateCheck)
& CombinedStatsNHL$PlayerName == as.character(player) )
Data_Cleaned_Train = subset(CombinedStatsNHL, as.Date(CombinedStatsNHL$GameDate) < as.Date(DateCheck)
& as.Date(CombinedStatsNHL$GameDate) > (as.Date(DateCheck) - 30)
& CombinedStatsNHL$PlayerName == as.character(player) )
Actual = subset(NHLTableData, as.Date(NHLTableData$GameDate) == as.Date(DateCheck)
& NHLTableData$PlayerName == as.character(player) )
Actual = Actual[1,]
Data_Cleaned_Train[is.na(Data_Cleaned_Train)] = 0
Data_Cleaned_Test[is.na(Data_Cleaned_Test)] = 0
if (nrow(Data_Cleaned_Train) == 0 | nrow(Data_Cleaned_Test) == 0){
next;
}
# "MP.x","HW.x", "Goals.x", "Assists.x", "Points.x","Penalties.x","ShotsOnGoal.x",
# "Hits.x","Blocks.x", "FaceOffPer.x", "GoalsOpp.x", "FaceOffPerOpp", "EVAssitsOpp.x",
# "ShotsOnGoalOpp.x","HitsOpp.x","BlocksOpp.x","Goals.y", "Assists.y","Points.y", "Penalties.y",
# "MP.y","Hits.y","Blocks.y","FaceOffPer.y", "GoalsOpp.y", "AssistsOpp",
# "ShotsOnGoalOpp.y","HitsOpp.y","BlocksOpp.y","GoalsAgainst", "ShotsAgainst",
# "Saves","ShoutOuts", "Hits","Blocks","ShotsOpp","HitsOpp"
rf = randomForest(Data_Cleaned_Train[,c("MP.x","HW", "Goals.x", "Assists.x", "Points.x","Penalties.x","ShotsOnGoal.x",
"Hits.x","Blocks.x", "FaceOffPer.x", "GoalsOpp.x", "FaceOffPerOpp", "EVAssitsOpp.x",
"ShotsOnGoalOpp.x","HitsOpp.x","BlocksOpp.x","Goals.y", "Assists.y","Points.y", "Penalties.y",
"MP.y","Hits.y","Blocks.y","FaceOffPer.y", "GoalsOpp.y", "AssistsOpp",
"ShotsOnGoalOpp.y","HitsOpp.y","BlocksOpp.y")],
y = Data_Cleaned_Train[,c("DKP")], ntree=50 ,type='regression')
RFPred = predict( rf, Data_Cleaned_Test[,c("MP.x","HW", "Goals.x", "Assists.x", "Points.x","Penalties.x","ShotsOnGoal.x",
"Hits.x","Blocks.x", "FaceOffPer.x", "GoalsOpp.x", "FaceOffPerOpp", "EVAssitsOpp.x",
"ShotsOnGoalOpp.x","HitsOpp.x","BlocksOpp.x","Goals.y", "Assists.y","Points.y", "Penalties.y",
"MP.y","Hits.y","Blocks.y","FaceOffPer.y", "GoalsOpp.y", "AssistsOpp",
"ShotsOnGoalOpp.y","HitsOpp.y","BlocksOpp.y")] ,type = c("response") )
rfGoals = randomForest(Data_Cleaned_Train[,c("MP.x","HW", "Goals.x", "Assists.x", "Points.x","Penalties.x","ShotsOnGoal.x",
"Hits.x","Blocks.x", "FaceOffPer.x", "GoalsOpp.x", "FaceOffPerOpp", "EVAssitsOpp.x",
"ShotsOnGoalOpp.x","HitsOpp.x","BlocksOpp.x","Goals.y", "Assists.y","Points.y", "Penalties.y",
"MP.y","Hits.y","Blocks.y","FaceOffPer.y", "GoalsOpp.y", "AssistsOpp",
"ShotsOnGoalOpp.y","HitsOpp.y","BlocksOpp.y")],
y = Data_Cleaned_Train[,c("TotalGoals")], ntree=50 ,type='regression')
RFPredGoals = predict( rfGoals, Data_Cleaned_Test[,c("MP.x","HW", "Goals.x", "Assists.x", "Points.x","Penalties.x","ShotsOnGoal.x",
"Hits.x","Blocks.x", "FaceOffPer.x", "GoalsOpp.x", "FaceOffPerOpp", "EVAssitsOpp.x",
"ShotsOnGoalOpp.x","HitsOpp.x","BlocksOpp.x","Goals.y", "Assists.y","Points.y", "Penalties.y",
"MP.y","Hits.y","Blocks.y","FaceOffPer.y", "GoalsOpp.y", "AssistsOpp",
"ShotsOnGoalOpp.y","HitsOpp.y","BlocksOpp.y")] ,type = c("response") )
Prediction2 = as.data.frame(RFPred)
Prediction2["player"] = player
Prediction2["position"] = Data_Cleaned_Test$PlayerPosition
Prediction2["salary"] = Prediction2$RFPred * 1000/2
Prediction2["MP"] = Data_Cleaned_Test$MP.x
Prediction2["Team"] = Data_Cleaned_Test$Team
Prediction2$Actual = Actual$DKP.x
Prediction2["Opp"] = Data_Cleaned_Test$Opp
Prediction2["date"] = as.Date(DateCheck)
Prediction2["ShotsTaken"] = Data_Cleaned_Test$ShotsOnGoal.x
# # Get preivous teams against this position ( last 20 days )
# previousTeams = subset(NBAAllData, NBAAllData$Opp.x == as.character(Data_Cleaned_Test$Opp)
# & as.Date(NBAAllData$Date) > (as.Date(DateCheck) - 20) & as.Date(NBAAllData$Date) < (as.Date(DateCheck))
# & NBAAllData$Position == Data_Cleaned_Test$PlayerPosition
# & NBAAllData$PTS > (Data_Cleaned_Test$FT.x + Data_Cleaned_Test$ThreeP.x*3 + 2*(Data_Cleaned_Test$FG.x - Data_Cleaned_Test$ThreeP.x) - 1)
# & NBAAllData$PTS < (Data_Cleaned_Test$FT.x + Data_Cleaned_Test$ThreeP.x*3 + 2*(Data_Cleaned_Test$FG.x - Data_Cleaned_Test$ThreeP.x) + 1))
# i = 0
#
# while(nrow(previousTeams) < 4){
# i = i + 1
# # Get preivous teams against this position ( last 20 days )
# previousTeams = subset(NBAAllData, NBAAllData$Opp.x == as.character(Data_Cleaned_Test$Opp)
# & as.Date(NBAAllData$Date) > (as.Date(DateCheck) - 20)
# & as.Date(NBAAllData$Date) < (as.Date(DateCheck))
# & NBAAllData$Position == Data_Cleaned_Test$PlayerPosition
# & NBAAllData$PTS > (Data_Cleaned_Test$FT.x + Data_Cleaned_Test$ThreeP.x*3 + 2*(Data_Cleaned_Test$FG.x - Data_Cleaned_Test$ThreeP.x) - i*0.5)
# & NBAAllData$PTS < (Data_Cleaned_Test$FT.x + Data_Cleaned_Test$ThreeP.x*3 + 2*(Data_Cleaned_Test$FG.x - Data_Cleaned_Test$ThreeP.x)+ i*0.5)
# )
# if (i > 10){
# break
# }
# }
#
# dataTM = ""
#
# if (nrow(previousTeams) > 0){
# for (jk in 1:nrow(previousTeams)) {
# datejk = previousTeams$Date[jk]
# tm = previousTeams$Tm[jk]
# previousTeamData = subset(NBAAllData,
# as.Date(NBAAllData$Date) == as.Date(datejk)
# & NBAAllData$Tm == tm
# & NBAAllData$PTS > previousTeams$PTS[jk] )
# dataTM = paste(previousTeamData$PlayerName, '-', previousTeamData$PTS, collapse = '|H|')
# previousTeams$PlayerPosition[jk] = dataTM
# dataTM = ""
# }
#
# }
#
# ####### Previous teams
# if (nrow(previousTeams) > 0){
# Prediction2["playerList"] = paste('=',previousTeams$PlayerName,'-' , previousTeams$PTS, collapse = '||||||')
# Prediction2["pointsAllowedAgainstPosition"] = mean(previousTeams$PTS)
# }
# else{
# Prediction2["playerList"] = 0
# Prediction2["pointsAllowedAgainstPosition"] = 0
# }
#
# Prediction2["simpleProjection"] = Prediction2$pointsAllowedAgainstPosition * 1 + Data_Cleaned_Test$ThreeP.x * .5 + Data_Cleaned_Test$AST.x * 1.25 +
# Data_Cleaned_Test$TRB.x * 1.5 + (Data_Cleaned_Test$STL.x + Data_Cleaned_Test$BLK.x) * 2 - 3
#
Results = rbind(Results, Prediction2)
}
# dbWriteTable(con, name = "NBA_DK_Prediction", value = Results, row.names = FALSE, append = TRUE)
|
4180628fd36faa7bab294d70ef56512833a27375
|
5be4d4f8d533ba3d3d7751b55d40ae2b58e58c95
|
/BSMsimData_signal.R
|
cf256573e0e840258b949a4760f8af2e202f3c31
|
[] |
no_license
|
surf3s/Machine_Learning_Commentary
|
50f9a8de083afb1bec53b112dafb7b3c9916f257
|
eed236a26a03ebde303b7137656fd0fd0131a996
|
refs/heads/main
| 2023-08-20T00:36:02.647339
| 2021-10-12T11:38:44
| 2021-10-12T11:38:44
| 274,940,968
| 3
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 20,624
|
r
|
BSMsimData_signal.R
|
# Code to simulate structured BSM data as multinomial RVs
# probability information from
#
# Domínguez-Rodrigo, Manuel, S. De Juana, Ana Belén Galan, and Mathieu Rodríguez.
# "A new protocol to differentiate trampling marks from butchery cut marks."
# Journal of Archaeological Science 36, no. 12 (2009): 2643-2654.
# Table 5.
# https://doi.org/10.1016/j.jas.2009.07.017
##############################
####### Trampling Marks ######
##############################
Class <- as.data.frame(rep("trampling", 224))
colnames(Class)<- "Class"
# create n multinomial random variables of size = 1 and with probabilities =
# the proportions of the mutually exclusive levels observed within each variable.
# Trajectory of the groove
v1_groove_traj<-rmultinom(n = 224, size = 1 ,prob = c(.298, .167, .534))
v1_groove_traj<-t(v1_groove_traj)
v1_groove_traj[which(v1_groove_traj[,1]==1),1]<-1
v1_groove_traj[which(v1_groove_traj[,2]==1),2]<-2
v1_groove_traj[which(v1_groove_traj[,3]==1),3]<-3
v1_groove_traj<-rowSums(v1_groove_traj)
v1_groove_traj
table(v1_groove_traj)/length(v1_groove_traj)
# Presence or absence of a barb
v2_barb<-rmultinom(n = 224, size = 1 ,prob = c(.024,.976))
v2_barb<-t(v2_barb)
v2_barb[which(v2_barb[,1]==1),1]<-1
v2_barb[which(v2_barb[,2]==1),2]<-0
v2_barb<-rowSums(v2_barb)
v2_barb
table(v2_barb)/length(v2_barb)
# Mark orientation
v3_orientation<-rmultinom(n = 224, size = 1 ,
prob = c(.099, .08, .821))
v3_orientation<-t(v3_orientation)
v3_orientation[which(v3_orientation[,1]==1),1]<-1
v3_orientation[which(v3_orientation[,2]==1),2]<-2
v3_orientation[which(v3_orientation[,3]==1),3]<-3
v3_orientation<-rowSums(v3_orientation)
v3_orientation
table(v3_orientation)/length(v3_orientation)
# Groove shape
v4_groove_shape<-rmultinom(n = 224, size = 1 ,prob = c(.04,.96))
v4_groove_shape<-t(v4_groove_shape)
v4_groove_shape[which(v4_groove_shape[,1]==1),1]<-1
v4_groove_shape[which(v4_groove_shape[,2]==1),2]<-2
v4_groove_shape<-rowSums(v4_groove_shape)
v4_groove_shape
table(v4_groove_shape)/length(v4_groove_shape)
# Number of conspicuous Grooves
v5_groove_no<-rep("NA", length = 224)
# Symmetry of the groove
v6_groove_symmetry<-rmultinom(n = 224, size = 1 ,prob = c(0.9003984,0.09960159))
v6_groove_symmetry<-t(v6_groove_symmetry)
v6_groove_symmetry[which(v6_groove_symmetry[,1]==1),1]<-1
v6_groove_symmetry[which(v6_groove_symmetry[,2]==1),2]<-2
v6_groove_symmetry<-rowSums(v6_groove_symmetry)
v6_groove_symmetry
table(v6_groove_symmetry)/length(v6_groove_symmetry)
# Shoulder effect
v7_shoulder_effect<-rmultinom(n = 224, size = 1 ,prob = c(.059, .941))
v7_shoulder_effect<-t(v7_shoulder_effect)
v7_shoulder_effect[which(v7_shoulder_effect[,1]==1),1]<-1
v7_shoulder_effect[which(v7_shoulder_effect[,2]==1),2]<-0
v7_shoulder_effect<-rowSums(v7_shoulder_effect)
v7_shoulder_effect
table(v7_shoulder_effect)/length(v7_shoulder_effect)
# Presence of flaking
v8_flaking<-rmultinom(n = 224, size = 1 ,prob = c(.027, .973))
v8_flaking<-t(v8_flaking)
v8_flaking[which(v8_flaking[,1]==1),1]<-1
v8_flaking[which(v8_flaking[,2]==1),2]<-0
v8_flaking<-rowSums(v8_flaking)
v8_flaking
table(v8_flaking)/length(v8_flaking)
# Extent of the flaking
v9_extent_of_flaking<-rmultinom(n = 224, size = 1 ,prob = c(.07, .19, .972))
v9_extent_of_flaking<-t(v9_extent_of_flaking)
v9_extent_of_flaking[which(v9_extent_of_flaking[,1]==1),1]<-1
v9_extent_of_flaking[which(v9_extent_of_flaking[,2]==1),2]<-2
v9_extent_of_flaking[which(v9_extent_of_flaking[,3]==1),3]<-0
v9_extent_of_flaking<-rowSums(v9_extent_of_flaking)
v9_extent_of_flaking
table(v9_extent_of_flaking)/length(v9_extent_of_flaking)
# Striae overlapping
v10_stria_overlapping<-rmultinom(n = 224, size = 1 ,prob = c(.803, .197))
v10_stria_overlapping<-t(v10_stria_overlapping)
v10_stria_overlapping[which(v10_stria_overlapping[,1]==1),1]<-1
v10_stria_overlapping[which(v10_stria_overlapping[,2]==1),2]<-0
v10_stria_overlapping<-rowSums(v10_stria_overlapping)
v10_stria_overlapping
table(v10_stria_overlapping)/length(v10_stria_overlapping)
# Internal microstriations
v11_internal_micro_stria<-rmultinom(n = 224, size = 1 ,prob = c(.75, .25))
v11_internal_micro_stria<-t(v11_internal_micro_stria)
v11_internal_micro_stria[which(v11_internal_micro_stria[,1]==1),1]<-1
v11_internal_micro_stria[which(v11_internal_micro_stria[,2]==1),2]<-0
v11_internal_micro_stria<-rowSums(v11_internal_micro_stria)
v11_internal_micro_stria
table(v11_internal_micro_stria)/length(v11_internal_micro_stria)
v11_internal_micro_stria.abs<-which(v11_internal_micro_stria==0)
# Internal microstriations
v12_micro_stria_traj<-rmultinom(n = 224, size = 1 ,prob = c(.673, .327))
v12_micro_stria_traj<-t(v12_micro_stria_traj)
v12_micro_stria_traj[which(v12_micro_stria_traj[,1]==1),1]<-1
v12_micro_stria_traj[which(v12_micro_stria_traj[,2]==1),2]<-2
v12_micro_stria_traj<-rowSums(v12_micro_stria_traj)
v12_micro_stria_traj[v11_internal_micro_stria.abs]<-0
table(v12_micro_stria_traj)/length(v12_micro_stria_traj)
# Microstriation trajectory
v13_micro_stria_traj_shape<-rmultinom(n = 224, size = 1 ,prob = c(.828, .172))
v13_micro_stria_traj_shape<-t(v13_micro_stria_traj_shape)
v13_micro_stria_traj_shape[which(v13_micro_stria_traj_shape[,1]==1),1]<-1
v13_micro_stria_traj_shape[which(v13_micro_stria_traj_shape[,2]==1),2]<-2
v13_micro_stria_traj_shape<-rowSums(v13_micro_stria_traj_shape)
v13_micro_stria_traj_shape[v11_internal_micro_stria.abs]<-0
v13_micro_stria_traj_shape
table(v13_micro_stria_traj_shape)/length(v13_micro_stria_traj_shape)
# Location of microstriation
v14_micro_stria_traj_location<-rmultinom(n = 224, size = 1 ,prob = c(.028, .872, .1))
v14_micro_stria_traj_location<-t(v14_micro_stria_traj_location)
v14_micro_stria_traj_location[which(v14_micro_stria_traj_location[,1]==1),1]<-1
v14_micro_stria_traj_location[which(v14_micro_stria_traj_location[,2]==1),2]<-2
v14_micro_stria_traj_location[which(v14_micro_stria_traj_location[,3]==1),3]<-3
v14_micro_stria_traj_location<-rowSums(v14_micro_stria_traj_location)
v14_micro_stria_traj_location[v11_internal_micro_stria.abs]<-0
v14_micro_stria_traj_location
table(v14_micro_stria_traj_location)/length(v14_micro_stria_traj_location)
# Length of the main groove
v15_main_groove_length<-rep("NA", length = 224)
# Associated shallow striae
v16_assoc_shallow_stria<-rmultinom(n = 224, size = 1 ,prob = c(.004, 0.996))
v16_assoc_shallow_stria<-t(v16_assoc_shallow_stria)
v16_assoc_shallow_stria[which(v16_assoc_shallow_stria[,1]==1),1]<-0
v16_assoc_shallow_stria[which(v16_assoc_shallow_stria[,2]==1),2]<-1
v16_assoc_shallow_stria<-rowSums(v16_assoc_shallow_stria)
v16_assoc_shallow_stria
table(v16_assoc_shallow_stria)/length(v16_assoc_shallow_stria)
# Associated tooth pits on mid-shafts
v17_assoc_tooth_pits<-rep("NA", length = 224)
trampling <- cbind(Class, v1_groove_traj, v2_barb, v3_orientation,
v4_groove_shape, v5_groove_no, v6_groove_symmetry,
v7_shoulder_effect, v8_flaking, v9_extent_of_flaking,
v10_stria_overlapping, v11_internal_micro_stria,
v12_micro_stria_traj, v13_micro_stria_traj_shape,
v14_micro_stria_traj_location, v15_main_groove_length,
v16_assoc_shallow_stria, v17_assoc_tooth_pits)
###################################
####### Unretouched tools CM ######
###################################
Class <- as.data.frame(rep("unretouched", 246))
colnames(Class)<- "Class"
# Trajectory of the groove
v1_groove_traj<-rmultinom(n = 246, size = 1 ,prob = c(.935, .065, .0))
v1_groove_traj<-t(v1_groove_traj)
v1_groove_traj[which(v1_groove_traj[,1]==1),1]<-1
v1_groove_traj[which(v1_groove_traj[,2]==1),2]<-2
v1_groove_traj[which(v1_groove_traj[,3]==1),3]<-3
v1_groove_traj<-rowSums(v1_groove_traj)
v1_groove_traj
table(v1_groove_traj)/length(v1_groove_traj)
# Presence or absence of a barb
v2_barb<-rmultinom(n = 246, size = 1 ,prob = c(.102, .898))
v2_barb<-t(v2_barb)
v2_barb[which(v2_barb[,1]==1),1]<-1
v2_barb[which(v2_barb[,2]==1),2]<-0
v2_barb<-rowSums(v2_barb)
v2_barb
table(v2_barb)/length(v2_barb)
# Mark orientation
v3_orientation<-rmultinom(n = 246, size = 1 ,prob = c(.4, .39, .606))
v3_orientation<-t(v3_orientation)
v3_orientation[which(v3_orientation[,1]==1),1]<-1
v3_orientation[which(v3_orientation[,2]==1),2]<-2
v3_orientation[which(v3_orientation[,3]==1),3]<-3
v3_orientation<-rowSums(v3_orientation)
v3_orientation
table(v3_orientation)/length(v3_orientation)
# Groove shape
v4_groove_shape<-rmultinom(n = 246, size = 1 ,prob = c(.967, .033))
v4_groove_shape<-t(v4_groove_shape)
v4_groove_shape[which(v4_groove_shape[,1]==1),1]<-1
v4_groove_shape[which(v4_groove_shape[,2]==1),2]<-2
v4_groove_shape<-rowSums(v4_groove_shape)
v4_groove_shape
table(v4_groove_shape)/length(v4_groove_shape)
# Number of conspicuous Grooves
v5_groove_no<-rep("NA", length = 246)
# Symmetry of the groove
v6_groove_symmetry<-rmultinom(n = 246, size = 1 ,prob = c(.862, .138))
v6_groove_symmetry<-t(v6_groove_symmetry)
v6_groove_symmetry[which(v6_groove_symmetry[,1]==1),1]<-1
v6_groove_symmetry[which(v6_groove_symmetry[,2]==1),2]<-2
v6_groove_symmetry<-rowSums(v6_groove_symmetry)
v6_groove_symmetry
table(v6_groove_symmetry)/length(v6_groove_symmetry)
# Shoulder effect
v7_shoulder_effect<-rmultinom(n = 246, size = 1 ,prob = c(.329, .671))
v7_shoulder_effect<-t(v7_shoulder_effect)
v7_shoulder_effect[which(v7_shoulder_effect[,1]==1),1]<-1
v7_shoulder_effect[which(v7_shoulder_effect[,2]==1),2]<-0
v7_shoulder_effect<-rowSums(v7_shoulder_effect)
v7_shoulder_effect
table(v7_shoulder_effect)/length(v7_shoulder_effect)
# Presence of flaking
v8_flaking<-rmultinom(n = 246, size = 1 ,prob = c(.146, .854))
v8_flaking<-t(v8_flaking)
v8_flaking[which(v8_flaking[,1]==1),1]<-1
v8_flaking[which(v8_flaking[,2]==1),2]<-0
v8_flaking<-rowSums(v8_flaking)
v8_flaking
table(v8_flaking)/length(v8_flaking)
# Extent of the flaking
v9_extent_of_flaking<-rmultinom(n = 246, size = 1 ,prob = c(.00, .146, .854))
v9_extent_of_flaking<-t(v9_extent_of_flaking)
v9_extent_of_flaking[which(v9_extent_of_flaking[,1]==1),1]<-1
v9_extent_of_flaking[which(v9_extent_of_flaking[,2]==1),2]<-2
v9_extent_of_flaking[which(v9_extent_of_flaking[,3]==1),3]<-0
v9_extent_of_flaking<-rowSums(v9_extent_of_flaking)
v9_extent_of_flaking
table(v9_extent_of_flaking)/length(v9_extent_of_flaking)
# Striae overlapping
v10_stria_overlapping<-rmultinom(n = 246, size = 1 ,prob = c(.129, .951))
v10_stria_overlapping<-t(v10_stria_overlapping)
v10_stria_overlapping[which(v10_stria_overlapping[,1]==1),1]<-1
v10_stria_overlapping[which(v10_stria_overlapping[,2]==1),2]<-0
v10_stria_overlapping<-rowSums(v10_stria_overlapping)
v10_stria_overlapping
table(v10_stria_overlapping)/length(v10_stria_overlapping)
# Internal microstriations
v11_internal_micro_stria<-rmultinom(n = 246, size = 1 ,prob = c(.772, .228))
v11_internal_micro_stria<-t(v11_internal_micro_stria)
v11_internal_micro_stria[which(v11_internal_micro_stria[,1]==1),1]<-1
v11_internal_micro_stria[which(v11_internal_micro_stria[,2]==1),2]<-0
v11_internal_micro_stria<-rowSums(v11_internal_micro_stria)
v11_internal_micro_stria
table(v11_internal_micro_stria)/length(v11_internal_micro_stria)
v11_internal_micro_stria.abs<-which(v11_internal_micro_stria==0)
# Internal microstriations
v12_micro_stria_traj<-rmultinom(n = 246, size = 1 ,prob = c(1, 0))
v12_micro_stria_traj<-t(v12_micro_stria_traj)
v12_micro_stria_traj[which(v12_micro_stria_traj[,1]==1),1]<-1
v12_micro_stria_traj[which(v12_micro_stria_traj[,2]==1),2]<-2
v12_micro_stria_traj<-rowSums(v12_micro_stria_traj)
v12_micro_stria_traj[v11_internal_micro_stria.abs]<-0
v12_micro_stria_traj
table(v12_micro_stria_traj)/length(v12_micro_stria_traj)
# Microstriation trajectory
v13_micro_stria_traj_shape<-rmultinom(n = 246, size = 1 ,prob = c(1, 0))
v13_micro_stria_traj_shape<-t(v13_micro_stria_traj_shape)
v13_micro_stria_traj_shape[which(v13_micro_stria_traj_shape[,1]==1),1]<-1
v13_micro_stria_traj_shape[which(v13_micro_stria_traj_shape[,2]==1),2]<-2
v13_micro_stria_traj_shape<-rowSums(v13_micro_stria_traj_shape)
v13_micro_stria_traj_shape[v11_internal_micro_stria.abs]<-0
v13_micro_stria_traj_shape
table(v13_micro_stria_traj_shape)/length(v13_micro_stria_traj_shape)
# Location of microstriation
v14_micro_stria_traj_location<-rmultinom(n = 246, size = 1, prob = c(.732, 0, .041))
v14_micro_stria_traj_location<-t(v14_micro_stria_traj_location)
v14_micro_stria_traj_location[which(v14_micro_stria_traj_location[,1]==1),1]<-1
v14_micro_stria_traj_location[which(v14_micro_stria_traj_location[,2]==1),2]<-2
v14_micro_stria_traj_location[which(v14_micro_stria_traj_location[,3]==1),3]<-3
v14_micro_stria_traj_location<-rowSums(v14_micro_stria_traj_location)
v14_micro_stria_traj_location
v14_micro_stria_traj_location[v11_internal_micro_stria.abs]<-0
table(v14_micro_stria_traj_location)/length(v14_micro_stria_traj_location)
# Length of the main groove
v15_main_groove_length<-rep("NA", length =246)
# Associated shallow striae
v16_assoc_shallow_stria<-rmultinom(n = 246, size = 1 ,prob = c(.976, 0.024))
v16_assoc_shallow_stria<-t(v16_assoc_shallow_stria)
v16_assoc_shallow_stria[which(v16_assoc_shallow_stria[,1]==1),1]<-0
v16_assoc_shallow_stria[which(v16_assoc_shallow_stria[,2]==1),2]<-1
v16_assoc_shallow_stria<-rowSums(v16_assoc_shallow_stria)
v16_assoc_shallow_stria
table(v16_assoc_shallow_stria)/length(v16_assoc_shallow_stria)
# Associated tooth pits on mid-shafts
v17_assoc_tooth_pits<-rep("NA", length = 246)
unretouched <- cbind(Class, v1_groove_traj, v2_barb, v3_orientation,
v4_groove_shape, v5_groove_no, v6_groove_symmetry,
v7_shoulder_effect,v8_flaking, v9_extent_of_flaking,
v10_stria_overlapping, v11_internal_micro_stria,
v12_micro_stria_traj, v13_micro_stria_traj_shape,
v14_micro_stria_traj_location, v15_main_groove_length,
v16_assoc_shallow_stria, v17_assoc_tooth_pits)
#################################
####### Retouched tools CM ######
#################################
Class <- as.data.frame(rep("retouched", 105))
colnames(Class)<- "Class"
# Trajectory of the groove
v1_groove_traj<-rmultinom(n = 105, size = 1 ,prob = c(.971, .0, .029))
v1_groove_traj<-t(v1_groove_traj)
v1_groove_traj[which(v1_groove_traj[,1]==1),1]<-1
v1_groove_traj[which(v1_groove_traj[,2]==1),2]<-2
v1_groove_traj[which(v1_groove_traj[,3]==1),3]<-3
v1_groove_traj<-rowSums(v1_groove_traj)
v1_groove_traj
table(v1_groove_traj)/length(v1_groove_traj)
# Presence or absence of a barb
v2_barb<-rmultinom(n = 105, size = 1 ,prob = c(.057, .943))
v2_barb<-t(v2_barb)
v2_barb[which(v2_barb[,1]==1),1]<-1
v2_barb[which(v2_barb[,2]==1),2]<-0
v2_barb<-rowSums(v2_barb)
v2_barb
table(v2_barb)/length(v2_barb)
# Mark orientation
v3_orientation<-rmultinom(n = 105, size = 1 ,prob = c(.0, .029, .971))
v3_orientation<-t(v3_orientation)
v3_orientation[which(v3_orientation[,1]==1),1]<-1
v3_orientation[which(v3_orientation[,2]==1),2]<-2
v3_orientation[which(v3_orientation[,3]==1),3]<-3
v3_orientation<-rowSums(v3_orientation)
v3_orientation
table(v3_orientation)/length(v3_orientation)
# Groove shape
v4_groove_shape<-rmultinom(n = 105, size = 1 ,prob = c(.057, .943))
v4_groove_shape<-t(v4_groove_shape)
v4_groove_shape[which(v4_groove_shape[,1]==1),1]<-1
v4_groove_shape[which(v4_groove_shape[,2]==1),2]<-2
v4_groove_shape<-rowSums(v4_groove_shape)
v4_groove_shape
table(v4_groove_shape)/length(v4_groove_shape)
# Number of conspicuous Grooves
v5_groove_no<-rep("NA", length = 105)
# Symmetry of the grooveetry
v6_groove_symmetry<-rmultinom(n = 105, size = 1 ,prob = c(.40, .60))
v6_groove_symmetry<-t(v6_groove_symmetry)
v6_groove_symmetry[which(v6_groove_symmetry[,1]==1),1]<-1
v6_groove_symmetry[which(v6_groove_symmetry[,2]==1),2]<-2
v6_groove_symmetry<-rowSums(v6_groove_symmetry)
v6_groove_symmetry
table(v6_groove_symmetry)/length(v6_groove_symmetry)
# Shoulder effect
v7_shoulder_effect<-rmultinom(n = 105, size = 1 ,prob = c(.743, .257))
v7_shoulder_effect<-t(v7_shoulder_effect)
v7_shoulder_effect[which(v7_shoulder_effect[,1]==1),1]<-1
v7_shoulder_effect[which(v7_shoulder_effect[,2]==1),2]<-0
v7_shoulder_effect<-rowSums(v7_shoulder_effect)
v7_shoulder_effect
table(v7_shoulder_effect)/length(v7_shoulder_effect)
# Presence of flaking
v8_flaking<-rmultinom(n = 105, size = 1 ,prob = c(.514, .486))
v8_flaking<-t(v8_flaking)
v8_flaking[which(v8_flaking[,1]==1),1]<-1
v8_flaking[which(v8_flaking[,2]==1),2]<-0
v8_flaking<-rowSums(v8_flaking)
v8_flaking
table(v8_flaking)/length(v8_flaking)
# Extent of the flaking
v9_extent_of_flaking<-rmultinom(n = 105, size = 1 ,prob = c(.114, .40, .486))
v9_extent_of_flaking<-t(v9_extent_of_flaking)
v9_extent_of_flaking[which(v9_extent_of_flaking[,1]==1),1]<-1
v9_extent_of_flaking[which(v9_extent_of_flaking[,2]==1),2]<-2
v9_extent_of_flaking[which(v9_extent_of_flaking[,3]==1),3]<-0
v9_extent_of_flaking<-rowSums(v9_extent_of_flaking)
v9_extent_of_flaking
table(v9_extent_of_flaking)/length(v9_extent_of_flaking)
# Striae overlapping
v10_stria_overlapping<-rmultinom(n = 105, size = 1 ,prob = c(0, 1.0))
v10_stria_overlapping<-t(v10_stria_overlapping)
v10_stria_overlapping[which(v10_stria_overlapping[,1]==1),1]<-1
v10_stria_overlapping[which(v10_stria_overlapping[,2]==1),2]<-0
v10_stria_overlapping<-rowSums(v10_stria_overlapping)
v10_stria_overlapping
table(v10_stria_overlapping)/length(v10_stria_overlapping)
# Internal microstriations
v11_internal_micro_stria<-rmultinom(n = 105, size = 1 ,prob = c(1.0, .0))
v11_internal_micro_stria<-t(v11_internal_micro_stria)
v11_internal_micro_stria[which(v11_internal_micro_stria[,1]==1),1]<-1
v11_internal_micro_stria[which(v11_internal_micro_stria[,2]==1),2]<-0
v11_internal_micro_stria<-rowSums(v11_internal_micro_stria)
v11_internal_micro_stria
table(v11_internal_micro_stria)/length(v11_internal_micro_stria)
# Internal microstriations
v12_micro_stria_traj <-rmultinom(n = 105, size = 1 ,prob = c(1.0, .0))
v12_micro_stria_traj<-t(v12_micro_stria_traj)
v12_micro_stria_traj[which(v12_micro_stria_traj[,1]==1),1]<-1
v12_micro_stria_traj[which(v12_micro_stria_traj[,2]==1),2]<-2
v12_micro_stria_traj<-rowSums(v12_micro_stria_traj)
v12_micro_stria_traj
table(v12_micro_stria_traj)/length(v12_micro_stria_traj)
# Microstriation trajectory
v13_micro_stria_traj_shape<-rmultinom(n = 105, size = 1 ,prob = c(1.0, .0))
v13_micro_stria_traj_shape<-t(v13_micro_stria_traj_shape)
v13_micro_stria_traj_shape[which(v13_micro_stria_traj_shape[,1]==1),1]<-1
v13_micro_stria_traj_shape[which(v13_micro_stria_traj_shape[,2]==1),2]<-2
v13_micro_stria_traj_shape<-rowSums(v13_micro_stria_traj_shape)
v13_micro_stria_traj_shape
table(v13_micro_stria_traj_shape)/length(v13_micro_stria_traj_shape)
# Location of microstriation
v14_micro_stria_traj_location<-rmultinom(n = 105, size = 1 ,prob = c(.029, .886, .086))
v14_micro_stria_traj_location<-t(v14_micro_stria_traj_location)
v14_micro_stria_traj_location[which(v14_micro_stria_traj_location[,1]==1),1]<-1
v14_micro_stria_traj_location[which(v14_micro_stria_traj_location[,2]==1),2]<-2
v14_micro_stria_traj_location[which(v14_micro_stria_traj_location[,3]==1),3]<-3
v14_micro_stria_traj_location<-rowSums(v14_micro_stria_traj_location)
v14_micro_stria_traj_location
table(v14_micro_stria_traj_location)/length(v14_micro_stria_traj_location)
# Length of the main groove
v15_main_groove_length<-rep("NA", length = 105)
# Associated shallow striae
v16_assoc_shallow_stria<-rmultinom(n = 105, size = 1 ,prob = c(1.0, .0))
v16_assoc_shallow_stria<-t(v16_assoc_shallow_stria)
v16_assoc_shallow_stria[which(v16_assoc_shallow_stria[,1]==1),1]<-0
v16_assoc_shallow_stria[which(v16_assoc_shallow_stria[,2]==1),2]<-1
v16_assoc_shallow_stria<-rowSums(v16_assoc_shallow_stria)
v16_assoc_shallow_stria
table(v16_assoc_shallow_stria)/length(v16_assoc_shallow_stria)
# Associated tooth pits on mid-shafts
v17_assoc_tooth_pits<-rep("NA", length = 105)
retouched <- cbind(Class, v1_groove_traj, v2_barb, v3_orientation,
v4_groove_shape, v5_groove_no, v6_groove_symmetry,
v7_shoulder_effect, v8_flaking, v9_extent_of_flaking,
v10_stria_overlapping, v11_internal_micro_stria,
v12_micro_stria_traj, v13_micro_stria_traj_shape,
v14_micro_stria_traj_location, v15_main_groove_length,
v16_assoc_shallow_stria, v17_assoc_tooth_pits)
########################
#### Generated Data ####
########################
gen.data <- rbind(trampling, unretouched, retouched)
|
9132dc1388bbed397ee0472ba124171a3b560a44
|
3adf28de039a411cc6ea82bf3cb4e39b2f408d42
|
/R/idahoans.R
|
6b5a25f8d89c4897284708d82b77dfe2d19635f7
|
[
"MIT"
] |
permissive
|
JoeyStanley/joeysvowels
|
980d5b4f896f6323a8ad97ed02e54dffde0633dd
|
c560e52d58ddb789e329c6ed68b50723b2123a5d
|
refs/heads/master
| 2022-12-19T22:48:44.573636
| 2020-09-30T08:35:43
| 2020-09-30T08:35:43
| 284,346,307
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,035
|
r
|
idahoans.R
|
#' Vowel formants from 11 Idahoans
#'
#' A dataset containing information on 1,100 vowel tokens produced by 10 people
#' from the state of Idaho in the United States. For each speaker, there are
#' ten tokens per canonical monophthong, randomly selected from a larger
#' dataset. Vowels are not preceding sonorants and not following coronal
#' consonants. For each token, F1, F2, F3, and F4 were extracted at the midpoint
#' of each vowel using a Praat script. The individuals consented to their data
#' being used in this way.
#'
#' This dataset is useful for testing and demonstrating vowel normalization
#' functions.
#'
#' @format A dataframe with 1,100 rows and 7 variables.
#' \describe{
#' \item{speaker}{a unique identifier per speaker}
#' \item{sex}{biological sex of the speakers}
#' \item{vowel}{vowel, in ARPABET. This is a handy transcription system since
#' all General American English vowels are represented using two-letter codes.}
#' \item{F1, F2, F3, F4}{vowel formant measurements, in Hz}
#' }
"idahoans"
|
ee049698273af4a1073f6600099e72da78b53290
|
3c25f49d8592847a741b9324b482eb769721d985
|
/man/ACTmath.Rd
|
d7307de48c55728b18f54eb53be0f1712b882dc6
|
[] |
no_license
|
talbano/equate
|
5de3d041aab6817dfad9b2fef9a37ca87321aeef
|
3583de82faf337c4c9e0651db9293ed8b8a768c5
|
refs/heads/master
| 2022-12-01T17:23:14.460656
| 2022-12-01T16:12:50
| 2022-12-01T16:12:50
| 44,709,479
| 7
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 814
|
rd
|
ACTmath.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ACTmath.R
\docType{data}
\name{ACTmath}
\alias{ACTmath}
\title{ACT Mathematics Test Scores}
\format{
A 41 by 3 \code{data.frame} containing the score scale, frequencies
for form X, and frequencies for form Y.
}
\source{
Kolen, M. J., and Brennan, R. L. (2004). \emph{Test Equating,
Scaling, and Linking}. (2nd ed.), New York: Springer.
The dataset is also provided with the equating software RAGE, available at
the following link:
\url{https://education.uiowa.edu/casma/computer-programs}
}
\usage{
ACTmath
}
\description{
This dataset contains score distributions for two forms of the ACT
mathematics test, as presented in table 2.5 of \emph{Test Equating, Scaling,
and Linking} (Kolen and Brennan, 2004; p. 50).
}
\keyword{datasets}
|
44fc78772a7cbdffb65e33fb4ea5a66949d04162
|
a2d1b0a9274dc34982d7be7e74a45679131d4ccd
|
/R/plot_lagcurve.R
|
bb513a1b26ee8ae210c4942d3fad2678263ed4a1
|
[] |
no_license
|
alastairrushworth/badlm
|
d6b5671167a5a4b1f24947c9184bc1f8dd3307d7
|
99a078fafb62ef000f5d8c1f4db677368c394c0f
|
refs/heads/master
| 2023-03-07T12:57:00.726065
| 2023-03-01T13:52:08
| 2023-03-01T13:52:08
| 163,391,604
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 824
|
r
|
plot_lagcurve.R
|
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 geom_line
#' @importFrom ggplot2 xlab
#' @importFrom ggplot2 ylab
#' @importFrom ggplot2 ggtitle
#' @importFrom tibble tibble
#' @importFrom tidyr gather
#' @importFrom magrittr %>%
#' @export
plot_lagcurve <- function(dlm_object){
lagdf <- tibble(lag = dlm_object$coefs$lag,
`97.5%` = dlm_object$coefs$upper,
`2.5%` = dlm_object$coefs$lower,
median = dlm_object$coefs$beta)
plt <- lagdf %>%
gather(key = 'quantile', value = 'coef', -lag) %>%
ggplot(aes(x = lag, y = coef, group = quantile, col = quantile)) +
geom_line() +
xlab('Lag number') +
ylab('Lag coefficient') +
ggtitle('Posterior median and credible interval for lag function')
return(plt)
}
|
db83837ec44549283a48b645c700f957fda4e9e3
|
c2cada714ea0a494a72a1d1a60deb500549fee68
|
/src/generate_reads.R
|
3c2feffa653837f975c035445ad7b089aab072bf
|
[] |
no_license
|
hothman/Investigate_differentially_expressed_genes_in_COVID_patients_using_RNASeq_data
|
50370d8470dd6356cdb93fd11243a3bd08c8324d
|
0518402a2a484d64009ed642ac4ef34f508e110e
|
refs/heads/master
| 2022-12-17T00:52:57.636637
| 2020-09-07T13:57:31
| 2020-09-07T13:57:31
| 284,758,957
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,806
|
r
|
generate_reads.R
|
# Installing Bioconda if it's not available
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("polyester")
BiocManager::install("Biostrings")
library(polyester)
library(Biostrings)
setwd("./src")
fasta_file = system.file('extdata', './fasta_ref.fa', package='polyester')
# read the big compressed fasta file (it can also read plain text fasta)
fasta = readDNAStringSet("human.5.rna.fna.gz")
print(paste("Number of sequences in the FASTA file"))
print(length(fasta))
# We want to subset only a handful of sequences from the fasta file
n <- 100 # here we take only the first 100 sequences from the file if you want to inlude them all just use the 'fasta' object in the further steps
# subset the FASTA file to first 20 transcripts
small_fasta = fasta[1:n]
writeXStringSet(small_fasta, 'subset_genes.fa')
# generate random reads per transcript to get different values of FPKM
max_read <- 20
n_groups <- 2
n_samples <- 1 # how many samples you want to simulate
max_fold_change <- 5
coverage <- sample.int(max_read, n, replace = TRUE)
# ~20x coverage ----> reads per transcript = transcriptlength/readlength * 20
# here all transcripts will have ~equal FPKM
readspertx = round(coverage * width(small_fasta) / 100)
matrix(sample.int(max_fold_change, size = n, replace = TRUE), nrow = n, ncol = n_samples)
# Build a matrix of random fold change for each n_samples
fold_changes <- matrix(, nrow = n, ncol = 0)
for(i in 1:n_groups) {
fold_changes <- cbind(fold_changes, sample.int(max_fold_change, size = n, replace = TRUE))
print(fold_changes)
}
# simulation call:
simulate_experiment('subset_genes.fa', reads_per_transcript=readspertx,
num_reps=c(1,1), fold_changes=fold_changes, outdir='simulated_reads')
|
c41853c5f5ee1ec96615e05f700529e271087418
|
8cdb9b46931ac056c31a2dd0941ba714540edb8c
|
/2012-02-22-PREBS-rma/scripts/combine-stats.R
|
6d61ec73cf76702d37726031e3a101cbff76d933
|
[] |
no_license
|
uziela/helsinki-project
|
6ced7a27aefa103df8c73b85d50acbebaa317dc5
|
86335b6508c00b7f94298a963dfaa693faab9270
|
refs/heads/master
| 2021-03-12T22:50:38.445324
| 2015-02-24T10:51:21
| 2015-02-24T10:51:21
| 18,182,026
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 603
|
r
|
combine-stats.R
|
#! /usr/bin/Rscript
#library(xtable)
cargs <- commandArgs(trailingOnly = TRUE)
base_file <- cargs[1]
out_file <- cargs[2]
dir_names <- cargs[3:length(cargs)]
N <- 6
#thresh <- gsub(".*basic-([0-9]+)-([0-9]+).*", "\\1-\\2%", dir_names)
thresh <- gsub(".*-[0-9]+-([0-9]+).*", "\\1%", dir_names)
#print(thresh)
my_stats <- NULL
#for (i in 1:length(dir_names)) {
for (i in 1:6) {
stat_file <- paste(dir_names[i], "/", base_file, sep="")
my_tab <- read.csv(stat_file)
#print(my_tab)
my_stats <- cbind(my_stats, t(my_tab[1,]))
}
colnames(my_stats) <- thresh[1:6]
save(my_stats, file=out_file)
|
6d45696aa72ab9c51f5c4ae7624bb8bdd45a4146
|
32f8a61dcdb255df6f73c63c93e411db958e776e
|
/lecture3/Raphael_Nash_p2/server.R
|
39f99f86fbdf1d63399e1597efcda00ac35085ae
|
[] |
no_license
|
RaphaelNash/CUNY-DATA-608-VisualAnalytics
|
ac1cb36c3aeb040efbf0aec285d37c8a1aeb953a
|
6fbc62320b89a74be239a5e473298ee609246fd1
|
refs/heads/master
| 2021-06-06T03:50:28.808655
| 2017-12-17T17:21:03
| 2017-12-17T17:21:03
| 101,523,524
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,031
|
r
|
server.R
|
##
#Server logic for CUNY DATA 608 Lecure 3 p2
# Change in mortality Rates
#Author: Raphael Nash
library(shiny)
library(dplyr)
library(DT)
library("ggplot2")
library("maps")
library(DT)
library(tidyr)
shinyServer(function(input, output) {
mortality_df <- read.csv('https://raw.githubusercontent.com/RaphaelNash/CUNY-DATA-608-VisualAnalytics/master/lecture3/Raphael_Nash_p1/cleaned-cdc-mortality-1999-2010-2.csv')
mort_reactive <- reactive({input$var
mort_df <- filter(mortality_df, Year %in% c(2010, 1999) ) %>%
filter(ICD.Chapter == input$dx) %>%
select(State, Year, Crude.Rate, Deaths, Population ) %>%
unite ( Deaths_Population_Crude.Rate, Deaths, Population, Crude.Rate) %>%
spread( Year, Deaths_Population_Crude.Rate) %>%
separate( "1999", c( "Deaths_1999", "Population_1999","Crude.Rate_1999" )) %>%
separate( "2010", c("Deaths_2010", "Population_2010","Crude.Rate_2010" ))
mort_df <- mort_df[complete.cases(mort_df), ]
mort_df$Deaths_2010 <- as.numeric(mort_df$Deaths_2010)
mort_df$Population_2010 <- as.numeric(mort_df$Population_2010)
mort_df$Crude.Rate_2010 <- as.numeric(mort_df$Crude.Rate_2010)
mort_df$Deaths_1999 <- as.numeric(mort_df$Deaths_1999)
mort_df$Population_1999 <- as.numeric(mort_df$Population_1999)
mort_df$Crude.Rate_1999 <- as.numeric(mort_df$Crude.Rate_1999)
mort_df$percent_change <- (mort_df$Crude.Rate_2010 - mort_df$Crude.Rate_1999) / mort_df$Crude.Rate_1999 *100
mort_df$percent_change <- round(mort_df$percent_change, 2)
mort_df %>%
arrange((percent_change)) %>%
mutate(State = factor(State, State) ) %>%
mutate(rank = dense_rank((percent_change)))
})
us_average_rate_change <- reactive({
mm <- mort_reactive()
deaths_1999 <- sum( mm$Deaths_1999)
deaths_2010 <- sum( mm$Deaths_2010)
pop_1999 <- sum( mm$Population_1999)
pop_2010 <- sum( mm$Population_2010)
cr_1999 <- (deaths_1999 / pop_1999 )
cr_2010 <- (deaths_2010 / pop_2010 )
round(((cr_2010 - cr_1999 )/cr_1999)*100,2)
})
output$data_table <-DT::renderDataTable({
mm<- mort_reactive() %>%
select(State,percent_change, rank )
colnames(mm) <- c("State", "Mortality Percentage Change", "Rank")
DT::datatable(mm)
})
output$preg_error <- renderText({
err <- ""
if ( input$dx == "Pregnancy, childbirth and the puerperium") {
err <- "Deaths too low to computer percentages"
}
err
})
output$us_average_rate_change_txt <- renderText({
paste ( "Average US change in Crude Mortality Rate: ", us_average_rate_change(), "%" ,sep = "")
})
output$plot <- renderPlot({
ggplot(mort_reactive(), aes(x=State, y=percent_change) )+
geom_bar(stat = "Identity") +
geom_hline(yintercept = us_average_rate_change(), color="blue") +
coord_flip() +
geom_text(aes(label=percent_change) , colour="black" , size = 5 , hjust= -.5) +
ylab ( "Percent Change in Mortality")
}, height = 1000
)
})
|
77f3b25aa4573e6269689de139521391c3aa1f0d
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/jti/man/print.jt.Rd
|
476ab4810d5e409d8d9cd3ecd676ad090981a45e
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 382
|
rd
|
print.jt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api_jt.R
\name{print.jt}
\alias{print.jt}
\title{A print method for junction trees}
\usage{
\method{print}{jt}(x, ...)
}
\arguments{
\item{x}{A junction tree object, \code{jt}.}
\item{...}{For S3 compatability. Not used.}
}
\description{
A print method for junction trees
}
\seealso{
\code{\link{jt}}
}
|
7e16353ad0997cc9f286992c35cc2bb1365edead
|
f2ba53cf640a5bf141b824a4bd29d4020bd94da1
|
/man/usgsimR.Rd
|
a5a11b9e55af20ab1b1672d94f9619e3fdd065f9
|
[
"MIT"
] |
permissive
|
juliocesarsolano/rgslib
|
777bdf86799be43a5543ba068b97910d0b52e267
|
d6c4ac02794bb82ca490ed0b01c2d325ee1757ea
|
refs/heads/master
| 2021-09-29T05:08:56.518514
| 2018-11-23T22:12:08
| 2018-11-23T22:12:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,354
|
rd
|
usgsimR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/usgsimR.R
\name{usgsimR}
\alias{usgsimR}
\title{Simulation Using \code{usgsim} Fortran Program from CCG.}
\usage{
usgsimR(data, mvario, vars, corr, xyz = c("x", "y"), n_realz = 1,
seed = 60221409, grid_def = c(n_x = 50, n_y = 50, n_z = 1, min_x =
0.5, min_y = 0.5, min_z = 0.5, dim_x = 1, dim_y = 1, dim_z = 1),
simout = "sgsim", imputeout = "impute", debuglevel = 0,
n_prev = 12, srchdist = c(10, 10, 10), srchang = c(0, 0, 0),
sortmethod = 0, covarsort = c(1, 1, 1), clip = TRUE, assign = 1,
trans = TRUE, nquant = 200, krigmethod = 2, altflag = 0,
cosim = TRUE, domgrid = NULL)
}
\arguments{
\item{data}{Data frame containing coordinates and variables to condition the
simulations.}
\item{mvario}{Variogram model data frame or list of models as \code{gstat}
class "variogramModel" or "variogramModelList".}
\item{vars}{Character vector of column names to be simulated.}
\item{corr}{Correalation matrix (from \code{cor}) for \code{vars}.}
\item{xyz}{Character vector of coordinate column names.}
\item{n_realz}{Scalar integer number of simulation realizations.}
\item{seed}{Scalar integer seed for renadom number generation.}
\item{grid_def}{Named numeric vector with x, y, and z-axis grid definition:
n_x, n_y, n_z, min_x, min_y, min_z, dim_x, dim_y, dim_z, realz.}
\item{simout}{Name of GeoEase grid file to contain output simulations.}
\item{imputeout}{Name of file to contain imputed data in heterotopic case.}
\item{debuglevel}{Scalar integer either 0 (none) or 1, 2, or 3 for increasing
levels of reporting.}
\item{n_prev}{Scalar integer number of previously simulated nodes to use.}
\item{srchdist}{Numeric vector search readii: x, y, z.}
\item{srchang}{Numeric vector search angles: ang1, ang2, ang3.}
\item{sortmethod}{Scalar numeric: sort by distance (0) or covariance (1).}
\item{covarsort}{Numeric vector: if sorting by covariance, indicate variogram
rock type, head, tail to use.}
\item{clip}{Scalar boolean: clip data to grid.}
\item{assign}{Scalar integer: assign to the grid, 0=none, 1=nearest, and
2=average.}
\item{trans}{Scalar boolean: perform normal score transormation of input
samples and backtransofrmation of simulation.}
\item{nquant}{Scalar integer: number of quantiles to keep from transform.}
\item{krigmethod}{Scalar integer: kriging method: 1=independent, 2=CCK, 4=CK,
5=ICCK, 6=BU.}
\item{altflag}{Scalar integer: option for primary variables if BU; or for
secondary variables if CK.}
\item{cosim}{Scalar boolean: perform cosimulation of multiple variables.}
\item{domgrid}{Data frame containing x, y, and, optionally z coordinate
columns that define the domain grid and a domain or zone field(s). Output
data will be contain domain code for those coorindate points. Coordinate
columns must be the same as \code{xyz}.}
}
\value{
Data frame of simulation results.
}
\description{
\code{usgsimR} is an interface to the CCG simulation program \code{usgsim},
which uses SGS. Some functionality of the Fortran program is not implimented
in this interface:
- Weight field.
- Rock types.
- Seconday variables.
- Normal-score transformation reference distributions.
- Trend data.
- Only writes output format 0: regular.
}
|
76ed38967ac7d1b5126054d3c380a6803ba84553
|
6f796a1015d40c1e0ca7871fcf82a0dc74451914
|
/cachematrix.R
|
0a8d436be8296555165fbed6be12b5d78672ba3c
|
[] |
no_license
|
ManuKandpal/ProgrammingAssignment2
|
5298c3abebf6411cfd041978d800e120c60dc41a
|
e1f08683e16612ff9be7cbc554b93b1d10800fa8
|
refs/heads/master
| 2021-01-20T01:34:43.769098
| 2015-01-25T17:37:12
| 2015-01-25T17:37:12
| 29,823,069
| 0
| 0
| null | 2015-01-25T17:30:55
| 2015-01-25T17:30:54
| null |
UTF-8
|
R
| false
| false
| 1,278
|
r
|
cachematrix.R
|
##pair of functions that cache the inverse of a matrix
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.which is really a list containing a function to
##set the value of the matrix
##get the value of the matrix
##set the value of the inverse of matrix
##get the value of the inverse o matrix
makeCacheMatrix <- function(x=matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(inverse) m <<-inverse
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
##cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getInverse()
if ( ! is.null(m)) {
print("getting cached data")
return(m)
}
m <- solve(x$get())
x$setInverse(m)
m
}
a <- makeCacheMatrix(matrix(1:4,2))
a$get()
a$getInverse()
a$set(matrix(5:8,2))
a$get()
cacheSolve(a)
cacheSolve(a)
a$getInverse()
b = a$getInverse()
a$get() %*% b
|
a588bd345bb8bcfebe8c8de8bd0602cce435a3a6
|
20b0f38b4bf7f1d50d3328fbab010ee33d80078e
|
/man/check_folds.Rd
|
930f3043227c7c244fba04e9d02f3ad258b00e6b
|
[] |
no_license
|
ntyndall/mltools
|
88927392c32eaf64da948d5fe5b5e1ebd87fcab0
|
ffe824835e867787cb6d73f2b09071c081f168b2
|
refs/heads/master
| 2021-06-27T02:19:08.495641
| 2019-05-26T16:00:07
| 2019-05-26T16:00:07
| 146,769,268
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 201
|
rd
|
check_folds.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check_folds.R
\name{check_folds}
\alias{check_folds}
\title{Check Folds}
\usage{
check_folds(data.set, cName, fold.info)
}
|
790f58d3baa58d091ab7f00fadc39276b18241e7
|
32694467865205579b98f15bf738d88c19fb954d
|
/tests/testthat/test_status_api.R
|
fb42be4d649833775ccf713a27cbf8add97ffd33
|
[] |
no_license
|
vjcitn/terraClientR
|
ee0dc11c00b8d707d023d93776637b5622c189b6
|
85ab30d88da3b4c3da9e36a9b2f9dbc7ab5f237a
|
refs/heads/main
| 2023-06-05T04:42:36.218619
| 2021-06-29T15:43:36
| 2021-06-29T15:43:36
| 381,414,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 940
|
r
|
test_status_api.R
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test StatusApi")
api.instance <- StatusApi$new()
test_that("Error", {
# tests for Error
# base path: http://localhost
# An error status endpoint for load balancing purposes
# @return [Void]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("Health", {
# tests for Health
# base path: http://localhost
# A health status endpoint for load balancing purposes
# @return [Void]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("Status", {
# tests for Status
# base path: http://localhost
# An orchestrated status check that includes a health report of underlying systems
# @return [SystemStatus]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
|
2214a73596be308d6e41a678bf4b64a99737634f
|
5f89a48ae87264a697476717b543215e1ef496aa
|
/scripts/time_series_regression.R
|
ac0559e2608b8e47365df6742390db518e059f97
|
[] |
no_license
|
miloknowles/17.835-finalproject
|
3c9c2d97efa306f9d72401a55339ac4e24aff29e
|
885f80345d2ff9d6903b751f5acf7b53b86bbd93
|
refs/heads/master
| 2020-03-08T23:31:17.939211
| 2018-05-19T00:16:57
| 2018-05-19T00:16:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,611
|
r
|
time_series_regression.R
|
# Time Series Regression
# 17.835 Final Project
# Milo & Jen
# install.packages('DataCombine')
install.packages('xtable')
library(DataCombine)
library(xtable)
# Use the WDI/WGI matched dataset, since we don't care about global terrorism.
data = read.csv("../data/final/data_matched_wdi_wgi.csv")
data$year = as.factor(data$year)
# Variables to try:
# SH.H2O.SAFE.ZS = Access to improved water source (% of pop)
# SH.H2O.BASW.ZS = People using basic drinking water services (% of pop)
# EG.ELC.ACCS.ZS = Access to electricity (% of pop)
# SN.ITK.DEFC.ZS = Percent undernourishment
# AG.PRD.FOOD.XD = Food production index
# SN.ITK.DFCT = Food kilocalorie deficit per person
# See which columns have an acceptable number of NaNs.
summary(data$SH.H2O.SAFE.ZS) # 376
summary(data$SH.H2O.BASW.ZS) # 633
summary(data$EG.ELC.ACCS.ZS) # 444
summary(data$SN.ITK.DEFC.ZS) # 1071
summary(data$AG.PRD.FOOD.XD) # 491
summary(data$SN.ITK.DFCT) # 1517
# Understanding columns.
wdi.codes = read.csv("../data/original/wdi_info.csv")
wdi.codes = wdi.codes[,-c(3)] # Remove long definitions.
################### Create Lagged Dataset ##################
# Note: data must be sorted in ascending data order, at regular intervals.
data.chrono = data[order(data$year),]
# For each country-year pair, store the stability_index_estimate at the next observation.
# Also store several more lookaheads (t+1, t+2, t+3,...).
# Note: Positive slideBy means that we are LEADING the data (getting future values).
data.slid1 = slide(data.chrono, Var='stability_index_estimate', TimeVar='year',
GroupVar='country', NewVar='stability_index_estimate_tplus1',
slideBy=1, keepInvalid=FALSE, reminder=TRUE)
data.slid2 = slide(data.chrono, Var='stability_index_estimate', TimeVar='year',
GroupVar='country', NewVar='stability_index_estimate_tplus2',
slideBy=2, keepInvalid=FALSE, reminder=TRUE)
data.slid3 = slide(data.chrono, Var='stability_index_estimate', TimeVar='year',
GroupVar='country', NewVar='stability_index_estimate_tplus3',
slideBy=3, keepInvalid=FALSE, reminder=TRUE)
data.tm1 = slide(data.chrono, Var='stability_index_estimate', TimeVar='year',
GroupVar='country', NewVar='stability_index_estimate_tm1',
slideBy=-1, keepInvalid=FALSE, reminder=TRUE)
data.tm2 = slide(data.chrono, Var='stability_index_estimate', TimeVar='year',
GroupVar='country', NewVar='stability_index_estimate_tm2',
slideBy=-2, keepInvalid=FALSE, reminder=TRUE)
data.tm3 = slide(data.chrono, Var='stability_index_estimate', TimeVar='year',
GroupVar='country', NewVar='stability_index_estimate_tm3',
slideBy=-3, keepInvalid=FALSE, reminder=TRUE)
#################### SINGLE VARIABLE MODELS ######################
# Variables to try:
# SH.H2O.SAFE.ZS = Access to improved water source (% of pop)
# SH.H2O.BASW.ZS = People using basic drinking water services (% of pop)
# EG.ELC.ACCS.ZS = Access to electricity (% of pop)
# SN.ITK.DEFC.ZS = Percent undernourishment
# AG.PRD.FOOD.XD = Food production index
# SN.ITK.DFCT = Food kilocalorie deficit per person
## SH.H2O.SAFE.ZS = Access to improved water source (% of pop)
model.water_access.tplus1 = lm(stability_index_estimate_tplus1 ~ SH.H2O.SAFE.ZS, data=data.slid1)
model.water_access.tplus2 = lm(stability_index_estimate_tplus2 ~ SH.H2O.SAFE.ZS, data=data.slid2)
model.water_access.tplus3 = lm(stability_index_estimate_tplus3 ~ SH.H2O.SAFE.ZS, data=data.slid3)
summary(model.water_access.tplus1) # R^2 = 0.2607
summary(model.water_access.tplus2) # R^2 = 0.2629
summary(model.water_access.tplus3) # R^2 = 0.2637
plot(x = data.slid1$SH.H2O.SAFE.ZS, y = data.slid1$stability_index_estimate_tplus1,
pch = 1, cex = 0.2, col='blue', main='Stability Index (t+1)',
xlab = 'Access to improved water source (% of pop)', ylab='Stability Index')
abline(model.water_access.tplus1)
## SH.H2O.BASW.ZS = People using basic drinking water services (% of pop)
model.basic_water.tplus1 = lm(stability_index_estimate_tplus1 ~ SH.H2O.BASW.ZS, data=data.slid1)
model.basic_water.tplus2 = lm(stability_index_estimate_tplus2 ~ SH.H2O.BASW.ZS, data=data.slid2)
model.basic_water.tplus3 = lm(stability_index_estimate_tplus3 ~ SH.H2O.BASW.ZS, data=data.slid3)
summary(model.basic_water.tplus1) # R^2 = 0.2566
summary(model.basic_water.tplus2) # R^2 = 0.2572
summary(model.basic_water.tplus3) # R^2 = 0.2591
## SN.ITK.DEFC.ZS = Percent undernourishment
model.undernourishment.tplus1 = lm(stability_index_estimate_tplus1 ~ SN.ITK.DEFC.ZS, data=data.slid1)
model.undernourishment.tplus2 = lm(stability_index_estimate_tplus2 ~ SN.ITK.DEFC.ZS, data=data.slid2)
model.undernourishment.tplus3 = lm(stability_index_estimate_tplus3 ~ SN.ITK.DEFC.ZS, data=data.slid3)
summary(model.undernourishment.tplus1) # R^2 = 0.194
summary(model.undernourishment.tplus2) # R^2 = 0.1894
summary(model.undernourishment.tplus3) # R^2 = 0.1862
## AG.PRD.FOOD.XD = Food production index
# Note: this has a really tiny R^2 = 0.001, so it isn't a good predictor.
model.fpi.tplus1 = lm(stability_index_estimate_tplus1 ~ AG.PRD.FOOD.XD, data=data.slid1)
model.fpi.tplus2 = lm(stability_index_estimate_tplus2 ~ AG.PRD.FOOD.XD, data=data.slid2)
model.fpi.tplus3 = lm(stability_index_estimate_tplus3 ~ AG.PRD.FOOD.XD, data=data.slid3)
summary(model.fpi.tplus1)
summary(model.fpi.tplus2)
summary(model.fpi.tplus3)
## SN.ITK.DFCT = Food kilocalorie deficit per person
# Note: also a small R^2 = 0.0833, not a good predictor.
model.kcaldef.tplus1 = lm(stability_index_estimate_tplus1 ~ SN.ITK.DFCT, data=data.slid1)
model.kcaldef.tplus2 = lm(stability_index_estimate_tplus2 ~ SN.ITK.DFCT, data=data.slid2)
model.kcaldef.tplus3 = lm(stability_index_estimate_tplus3 ~ SN.ITK.DFCT, data=data.slid3)
summary(model.kcaldef.tplus1)
summary(model.kcaldef.tplus2)
summary(model.kcaldef.tplus3)
## EG.ELC.ACCS.ZS = Access to electricity (% of pop)
# R^2 = 0.1706
model.elec.tplus1 = lm(stability_index_estimate_tplus1 ~ EG.ELC.ACCS.ZS, data=data.slid1)
model.elec.tplus2 = lm(stability_index_estimate_tplus2 ~ EG.ELC.ACCS.ZS, data=data.slid2)
model.elec.tplus3 = lm(stability_index_estimate_tplus3 ~ EG.ELC.ACCS.ZS, data=data.slid3)
summary(model.elec.tplus1)
summary(model.elec.tplus2)
summary(model.elec.tplus3)
# Some correlation tests...
length(data.slid1$stability_index_estimate)
length(data.slid1$stability_index_estimate_tplus1)
summary(data.slid1$stability_index_estimate)
summary(data.slid1$stability_index_estimate_tplus1)
# Really strong autocorrelation between stability now and stability next year (expected)
cor(data.slid1$stability_index_estimate, data.slid1$stability_index_estimate_tplus1, use='complete.obs')
# Slightly higher correlation between current stability and access to water than
# next year's stability and access to water.
cor(data.slid1$stability_index_estimate_tplus1, data.slid1$SH.H2O.BASW.ZS, use='complete.obs')
cor(data.slid1$stability_index_estimate, data.slid1$SH.H2O.BASW.ZS, use='complete.obs')
###################### SINGLE VARIABLE MODELS, CONTROLLING FOR CONFOUNDING VARIABLES ##############
model.foodwater.formula = stability_index_estimate_tplus1 ~ SH.H2O.SAFE.ZS + SH.H2O.BASW.ZS +
SN.ITK.DEFC.ZS + AG.PRD.FOOD.XD + stability_index_estimate
model.foodwater.tplus1 = lm(model.foodwater.formula, data=data.slid1)
model.foodwater.tplus2 = lm(model.foodwater.formula, data=data.slid2)
model.foodwater.tplus3 = lm(model.foodwater.formula, data=data.slid3)
summary(model.foodwater.tplus1) # R^2 = 0.2607
summary(model.foodwater.tplus2) # R^2 = 0.2629
summary(model.foodwater.tplus3) # R^2 = 0.2637
###################### DISTRIBUTED LAG MODEL ############################
## SN.ITK.DEFC.ZS
data.all.lagged = slide(data.chrono, Var='stability_index_estimate', TimeVar='year',
GroupVar='country', NewVar='stability_index_estimate_tm1',
slideBy=-1, keepInvalid=FALSE, reminder=TRUE)
data.all.lagged = slide(data.all.lagged, Var='SN.ITK.DEFC.ZS', TimeVar='year',
GroupVar='country', NewVar='SN.ITK.DEFC.ZS_tm1',
slideBy=-1, keepInvalid=FALSE, reminder=TRUE)
data.all.lagged = slide(data.all.lagged, Var='SN.ITK.DEFC.ZS', TimeVar='year',
GroupVar='country', NewVar='SN.ITK.DEFC.ZS_tm2',
slideBy=-2, keepInvalid=FALSE, reminder=TRUE)
data.all.lagged = slide(data.all.lagged, Var='SN.ITK.DEFC.ZS', TimeVar='year',
GroupVar='country', NewVar='SN.ITK.DEFC.ZS_tm3',
slideBy=-3, keepInvalid=FALSE, reminder=TRUE)
data.all.lagged$SN.ITK.DEFC.ZS_delta1 = data.all.lagged$SN.ITK.DEFC.ZS - data.all.lagged$SN.ITK.DEFC.ZS_tm1
data.all.lagged$SN.ITK.DEFC.ZS_delta2 = data.all.lagged$SN.ITK.DEFC.ZS_tm1 - data.all.lagged$SN.ITK.DEFC.ZS_tm2
data.all.lagged$SN.ITK.DEFC.ZS_delta3 = data.all.lagged$SN.ITK.DEFC.ZS_tm2 - data.all.lagged$SN.ITK.DEFC.ZS_tm3
model.all.lagged.formula = stability_index_estimate ~ stability_index_estimate_tm1 +
SN.ITK.DEFC.ZS_delta1 + SN.ITK.DEFC.ZS_delta2 + SN.ITK.DEFC.ZS_delta3 # SN.ITK.DEFC.ZS_tm1 + SN.ITK.DEFC.ZS
model.all.lagged = lm(model.all.lagged.formula, data=data.all.lagged)
summary(model.all.lagged)
## AG.PRD.FOOD.XD
# Lag the stability index estimate.
data.foodprod.lagged = slide(data.chrono, Var='stability_index_estimate', TimeVar='year',
GroupVar='country', NewVar='stability_index_estimate_tm1',
slideBy=-1, keepInvalid=FALSE, reminder=TRUE)
# Lag the food production index.
data.foodprod.lagged = slide(data.foodprod.lagged, Var='AG.PRD.FOOD.XD', TimeVar='year',
GroupVar='country', NewVar='AG.PRD.FOOD.XD_tm1',
slideBy=-1, keepInvalid=FALSE, reminder=TRUE)
data.foodprod.lagged = slide(data.foodprod.lagged, Var='AG.PRD.FOOD.XD', TimeVar='year',
GroupVar='country', NewVar='AG.PRD.FOOD.XD_tm2',
slideBy=-2, keepInvalid=FALSE, reminder=TRUE)
data.foodprod.lagged = slide(data.foodprod.lagged, Var='AG.PRD.FOOD.XD', TimeVar='year',
GroupVar='country', NewVar='AG.PRD.FOOD.XD_tm3',
slideBy=-3, keepInvalid=FALSE, reminder=TRUE)
data.foodprod.lagged$SIE_delta1 = data.foodprod.lagged$stability_index_estimate - data.foodprod.lagged$stability_index_estimate_tm1
data.foodprod.lagged$SIE_delta2 = data.foodprod.lagged$stability_index_estimate_tm1 - data.foodprod.lagged$stability_index_estimate_tm2
data.foodprod.lagged$SIE_delta3 = data.foodprod.lagged$stability_index_estimate_tm2 - data.foodprod.lagged$stability_index_estimate_tm3
data.foodprod.lagged$AG.PRD.FOOD.XD_delta1 = data.foodprod.lagged$AG.PRD.FOOD.XD - data.foodprod.lagged$AG.PRD.FOOD.XD_tm1
data.foodprod.lagged$AG.PRD.FOOD.XD_delta2 = data.foodprod.lagged$AG.PRD.FOOD.XD_tm1 - data.foodprod.lagged$AG.PRD.FOOD.XD_tm2
data.foodprod.lagged$AG.PRD.FOOD.XD_delta3 = data.foodprod.lagged$AG.PRD.FOOD.XD_tm2 - data.foodprod.lagged$AG.PRD.FOOD.XD_tm3
model.foodprod.lagged.formula = stability_index_estimate ~ stability_index_estimate_tm1 +
AG.PRD.FOOD.XD_delta1 + AG.PRD.FOOD.XD_delta2 + AG.PRD.FOOD.XD_delta3
model.foodprod.lagged = lm(model.foodprod.lagged.formula, data=data.foodprod.lagged)
summary(model.foodprod.lagged)
############################# Access to basic drinking water.
data.basicwater.lagged = slide(data.chrono, Var='stability_index_estimate', TimeVar='year',
GroupVar='country', NewVar='stability_index_estimate_tm1',
slideBy=-1, keepInvalid=FALSE, reminder=TRUE)
data.basicwater.lagged = slide(data.basicwater.lagged, Var='SH.H2O.BASW.ZS', TimeVar='year',
GroupVar='country', NewVar='SH.H2O.BASW.ZS_tm1',
slideBy=-1, keepInvalid=FALSE, reminder=TRUE)
data.basicwater.lagged = slide(data.basicwater.lagged, Var='SH.H2O.BASW.ZS', TimeVar='year',
GroupVar='country', NewVar='SH.H2O.BASW.ZS_tm2',
slideBy=-2, keepInvalid=FALSE, reminder=TRUE)
data.basicwater.lagged = slide(data.basicwater.lagged, Var='SH.H2O.BASW.ZS', TimeVar='year',
GroupVar='country', NewVar='SH.H2O.BASW.ZS_tm3',
slideBy=-3, keepInvalid=FALSE, reminder=TRUE)
data.basicwater.lagged$SH.H2O.BASW.ZS_delta1 = data.basicwater.lagged$SH.H2O.BASW.ZS - data.basicwater.lagged$SH.H2O.BASW.ZS_tm1
data.basicwater.lagged$SH.H2O.BASW.ZS_delta2 = data.basicwater.lagged$SH.H2O.BASW.ZS_tm1 - data.basicwater.lagged$SH.H2O.BASW.ZS_tm2
data.basicwater.lagged$SH.H2O.BASW.ZS_delta3 = data.basicwater.lagged$SH.H2O.BASW.ZS_tm2 - data.basicwater.lagged$SH.H2O.BASW.ZS_tm3
model.basicwater.lagged.formula = stability_index_estimate ~ stability_index_estimate_tm1 +
SH.H2O.BASW.ZS_delta1 + SH.H2O.BASW.ZS_delta2 + SH.H2O.BASW.ZS_delta3
model.basicwater.lagged = lm(model.basicwater.lagged.formula, data=data.basicwater.lagged)
summary(model.basicwater.lagged)
############################# Percent undernourishment
data.undernourished.lagged = slide(data.chrono, Var='stability_index_estimate', TimeVar='year',
GroupVar='country', NewVar='stability_index_estimate_tm1',
slideBy=-1, keepInvalid=FALSE, reminder=TRUE)
data.undernourished.lagged = slide(data.undernourished.lagged, Var='SN.ITK.DEFC.ZS', TimeVar='year',
GroupVar='country', NewVar='SN.ITK.DEFC.ZS_tm1',
slideBy=-1, keepInvalid=FALSE, reminder=TRUE)
data.undernourished.lagged = slide(data.undernourished.lagged, Var='SN.ITK.DEFC.ZS', TimeVar='year',
GroupVar='country', NewVar='SN.ITK.DEFC.ZS_tm2',
slideBy=-2, keepInvalid=FALSE, reminder=TRUE)
data.undernourished.lagged = slide(data.undernourished.lagged, Var='SN.ITK.DEFC.ZS', TimeVar='year',
GroupVar='country', NewVar='SN.ITK.DEFC.ZS_tm3',
slideBy=-3, keepInvalid=FALSE, reminder=TRUE)
data.undernourished.lagged$SN.ITK.DEFC.ZS_delta1 = data.undernourished.lagged$SN.ITK.DEFC.ZS - data.undernourished.lagged$SN.ITK.DEFC.ZS_tm1
data.undernourished.lagged$SN.ITK.DEFC.ZS_delta2 = data.undernourished.lagged$SN.ITK.DEFC.ZS_tm1 - data.undernourished.lagged$SN.ITK.DEFC.ZS_tm2
data.undernourished.lagged$SN.ITK.DEFC.ZS_delta3 = data.undernourished.lagged$SN.ITK.DEFC.ZS_tm2 - data.undernourished.lagged$SN.ITK.DEFC.ZS_tm3
model.undernourished.lagged.formula = stability_index_estimate ~ stability_index_estimate_tm1 +
SN.ITK.DEFC.ZS_delta1 + SN.ITK.DEFC.ZS_delta2 + SN.ITK.DEFC.ZS_delta3
model.undernourished.lagged = lm(model.undernourished.lagged.formula, data=data.undernourished.lagged)
summary(model.undernourished.lagged)
############################### Kcal deficit
data.kcaldef.lagged = slide(data.chrono, Var='stability_index_estimate', TimeVar='year',
GroupVar='country', NewVar='stability_index_estimate_tm1',
slideBy=-1, keepInvalid=FALSE, reminder=TRUE)
data.kcaldef.lagged = slide(data.kcaldef.lagged, Var='SN.ITK.DFCT', TimeVar='year',
GroupVar='country', NewVar='SN.ITK.DFCT_tm1',
slideBy=-1, keepInvalid=FALSE, reminder=TRUE)
data.kcaldef.lagged = slide(data.kcaldef.lagged, Var='SN.ITK.DFCT', TimeVar='year',
GroupVar='country', NewVar='SN.ITK.DFCT_tm2',
slideBy=-2, keepInvalid=FALSE, reminder=TRUE)
data.kcaldef.lagged = slide(data.kcaldef.lagged, Var='SN.ITK.DFCT', TimeVar='year',
GroupVar='country', NewVar='SN.ITK.DFCT_tm3',
slideBy=-3, keepInvalid=FALSE, reminder=TRUE)
data.kcaldef.lagged$SN.ITK.DFCT_delta1 = data.kcaldef.lagged$SN.ITK.DFCT - data.kcaldef.lagged$SN.ITK.DFCT_tm1
data.kcaldef.lagged$SN.ITK.DFCT_delta2 = data.kcaldef.lagged$SN.ITK.DFCT_tm1 - data.kcaldef.lagged$SN.ITK.DFCT_tm2
data.kcaldef.lagged$SN.ITK.DFCT_delta3 = data.kcaldef.lagged$SN.ITK.DFCT_tm2 - data.kcaldef.lagged$SN.ITK.DFCT_tm3
model.kcaldef.lagged.formula = stability_index_estimate ~ stability_index_estimate_tm1 +
SN.ITK.DFCT_delta1 + SN.ITK.DFCT_delta2 + SN.ITK.DFCT_delta3
model.kcaldef.lagged = lm(model.kcaldef.lagged.formula, data=data.kcaldef.lagged)
summary(model.kcaldef.lagged)
################# Electricity
data.elec.lagged = slide(data.chrono, Var='stability_index_estimate', TimeVar='year',
GroupVar='country', NewVar='stability_index_estimate_tm1',
slideBy=-1, keepInvalid=FALSE, reminder=TRUE)
data.elec.lagged = slide(data.elec.lagged, Var='EG.ELC.ACCS.ZS', TimeVar='year',
GroupVar='country', NewVar='EG.ELC.ACCS.ZS_tm1',
slideBy=-1, keepInvalid=FALSE, reminder=TRUE)
data.elec.lagged = slide(data.elec.lagged, Var='EG.ELC.ACCS.ZS', TimeVar='year',
GroupVar='country', NewVar='EG.ELC.ACCS.ZS_tm2',
slideBy=-2, keepInvalid=FALSE, reminder=TRUE)
data.elec.lagged = slide(data.elec.lagged, Var='EG.ELC.ACCS.ZS', TimeVar='year',
GroupVar='country', NewVar='EG.ELC.ACCS.ZS_tm3',
slideBy=-3, keepInvalid=FALSE, reminder=TRUE)
data.elec.lagged = slide(data.elec.lagged, Var='EG.ELC.ACCS.ZS', TimeVar='year',
GroupVar='country', NewVar='EG.ELC.ACCS.ZS_tm4',
slideBy=-4, keepInvalid=FALSE, reminder=TRUE)
data.elec.lagged = slide(data.elec.lagged, Var='EG.ELC.ACCS.ZS', TimeVar='year',
GroupVar='country', NewVar='EG.ELC.ACCS.ZS_tm5',
slideBy=-5, keepInvalid=FALSE, reminder=TRUE)
data.elec.lagged$EG.ELC.ACCS.ZS_delta1 = data.elec.lagged$EG.ELC.ACCS.ZS - data.elec.lagged$EG.ELC.ACCS.ZS_tm1
data.elec.lagged$EG.ELC.ACCS.ZS_delta2 = data.elec.lagged$EG.ELC.ACCS.ZS_tm1 - data.elec.lagged$EG.ELC.ACCS.ZS_tm2
data.elec.lagged$EG.ELC.ACCS.ZS_delta3 = data.elec.lagged$EG.ELC.ACCS.ZS_tm2 - data.elec.lagged$EG.ELC.ACCS.ZS_tm3
data.elec.lagged$EG.ELC.ACCS.ZS_delta4 = data.elec.lagged$EG.ELC.ACCS.ZS_tm3 - data.elec.lagged$EG.ELC.ACCS.ZS_tm4
data.elec.lagged$EG.ELC.ACCS.ZS_delta5 = data.elec.lagged$EG.ELC.ACCS.ZS_tm4 - data.elec.lagged$EG.ELC.ACCS.ZS_tm5
model.elec.lagged.formula = stability_index_estimate ~ stability_index_estimate_tm1 +
EG.ELC.ACCS.ZS_delta1 + EG.ELC.ACCS.ZS_delta2 + EG.ELC.ACCS.ZS_delta3 +
EG.ELC.ACCS.ZS_delta4 + EG.ELC.ACCS.ZS_delta5
model.elec.lagged = lm(model.elec.lagged.formula, data=data.elec.lagged)
summary(model.elec.lagged)
########################## Distributed lag for all vars
data.all.lagged = merge(data.basicwater.lagged, data.foodprod.lagged, by=intersect(names(data.basicwater.lagged),
names(data.foodprod.lagged)), all.x = TRUE, all.y = TRUE)
data.all.lagged = merge(data.all.lagged, data.undernourished.lagged, by=intersect(names(data.basicwater.lagged),
names(data.foodprod.lagged)), all.x = TRUE, all.y = TRUE)
data.all.lagged = subset(data.all.lagged, select=-c(X))
data.all.lagged = slide(data.all.lagged, Var='stability_index_estimate', TimeVar='year',
GroupVar='country', NewVar='stability_index_estimate_tm2',
slideBy=-2, keepInvalid=FALSE, reminder=TRUE)
data.all.lagged = slide(data.all.lagged, Var='stability_index_estimate', TimeVar='year',
GroupVar='country', NewVar='stability_index_estimate_tm3',
slideBy=-3, keepInvalid=FALSE, reminder=TRUE)
data.all.lagged$sie_delta1 = data.all.lagged$stability_index_estimate - data.all.lagged$stability_index_estimate_tm1
data.all.lagged$sie_delta2 = data.all.lagged$stability_index_estimate_tm1 - data.all.lagged$stability_index_estimate_tm2
data.all.lagged$sie_delta3 = data.all.lagged$stability_index_estimate_tm2 - data.all.lagged$stability_index_estimate_tm3
model.all.lagged.formula = sie_delta1 ~ sie_delta2 + # sie_delta3 +
SN.ITK.DEFC.ZS_delta1 + SN.ITK.DEFC.ZS_delta2 + # SN.ITK.DEFC.ZS_delta3 +
SH.H2O.BASW.ZS_delta1 + SH.H2O.BASW.ZS_delta2 + # SH.H2O.BASW.ZS_delta3 +
AG.PRD.FOOD.XD_delta1 + AG.PRD.FOOD.XD_delta2 # + # AG.PRD.FOOD.XD_delta3
model.all.lagged = lm(model.all.lagged.formula, data=data.all.lagged)
summary(model.all.lagged)
table = xtable(model.all.lagged)
print(table)
# Plot water access
plot(x = data.all.lagged$SH.H2O.BASW.ZS_delta1, y = data.all.lagged$sie_delta1,
cex=0.8, col='blue', xlab='Change in Basic Water Access (D1)',
ylab='Change in Stability Index (D1)',
main='Change in Stability Index vs. Basic Water Access (1 Year Lag)')
abline(model.all.lagged$coefficients['(Intercept)'],
model.all.lagged$coefficients['SH.H2O.BASW.ZS_delta1'],
col='black', lwd=2)
# Plot water access
plot(x = data.all.lagged$SH.H2O.BASW.ZS_delta2, y = data.all.lagged$sie_delta1,
cex=0.8, col='blue', xlab='Change in Basic Water Access (D2)',
ylab='Change in Stability Index (D1)',
main='Change in Stability Index vs. Basic Water Access (2 Year Lag)')
abline(model.all.lagged$coefficients['(Intercept)'],
model.all.lagged$coefficients['SH.H2O.BASW.ZS_delta2'],
col='black', lwd=2)
|
a362761002adb19d8bbc58147636f157af7d4d43
|
7ee92c10a10a1226270e9d7d4bad14bb8fd819b9
|
/man/new_yaml.Rd
|
8c8b58d7849684330767f6d4638212badc5aedd7
|
[] |
no_license
|
harcharansidhu/swirlify
|
7af8a720bd1570df9e189bc2adda7339a3bcb8b7
|
cdc3837215b3e2574866246491633fafeba98aef
|
refs/heads/master
| 2021-01-15T21:49:38.822788
| 2014-07-21T12:15:40
| 2014-07-21T12:15:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 323
|
rd
|
new_yaml.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{new_yaml}
\alias{new_yaml}
\title{Create new yaml lesson using yaml_writer}
\usage{
new_yaml(lesson_name, course_name)
}
\arguments{
\item{lesson_name}{Name of lesson}
\item{course_name}{Name of course}
}
\description{
Create new yaml lesson using yaml_writer
}
|
9bffbfabc3b2ed9cef6e38129d037a2be136a3fc
|
1fe29b29498b2d0dd63f18038dd8105e721b65fa
|
/analysis.R
|
8df147c383adeb6e4c7ba0d3149092a32e2ed680
|
[] |
no_license
|
khyejin1231/SocialNetworkAnalysis
|
85e541b304dfe724853609f11408537187eedff9
|
87bd13fb1500dd2dba59da5e25c04de0e9a9732d
|
refs/heads/main
| 2023-08-04T05:18:22.586334
| 2021-09-30T19:56:26
| 2021-09-30T19:56:26
| 408,346,891
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,840
|
r
|
analysis.R
|
#https://igraph.org/r/doc/igraph.pdf
install.packages("readxl")
install.packages("linkcomm")
install.packages("igraph")
install.packages("NetData")
install.packages(c("sna","triads","psych",'nFactors','GPArotation','NetCluster'))
install.packages("leiden")
install.packages("RColorBrewer")
install.packages("ExPosition")
install.packages("cppRouting")
install.packages("ggplot2")
library(ExPosition)
library(RColorBrewer)
library(lenden)
library(readxl)
library(linkcomm)
library(igraph)
library(sna)
library(triads)
library(psych)
library(nFactors)
library(GPArotation)
library(NetCluster)
library(cppRouting)
library(dplyr)
library(ggplot2)
data <- read_excel("Department_Data.xlsx")
data <- data[,2:30]
isSymmetric(as.matrix(data)) #This matrix is not symmetric
############################################################################Data
A <- data.frame("V3" = NA)
colnames(data)[1:29] <- "V3"
for(i in 1:29){
A <- rbind(data[,i], A, make.row.names = TRUE)
A <- na.omit(A)
}
A$V1 <- rep(1:29,each=29)
A$V2 <- rep(1:29, 29)
A <- subset(A, A$V3>0)
A <- A[,c("V1", "V2", "V3")]
######################################################################Question1.
data <- as.matrix(data)
d <- as.dist(data)
?as.dist
g1 <- graph_from_adjacency_matrix(as.matrix(data), mode = c("directed"),
weighted = TRUE, diag = FALSE)
cc <- hclust(d, method = "ward.D") #Which method?
plot(cc)
clusters.list <- rect.hclust(cc, k= 2, border = "blue")
clusters <- cutree(cc, k= 2)
node.cols <- brewer.pal(max(c(3,clusters)), "Pastel1")[clusters]
plot(g1, vertex.color = node.cols,layout = coords)
https://igraph.org/r/doc/sample_pa.html
fc <- cluster_fast_greedy(g1)
plot_dendrogram(fc)
######################################################################node-based
rownames(data) <- c(1:29)
colnames(data) <- c(1:29)
g1 <- graph_from_adjacency_matrix(as.matrix(data), mode = c("directed"),
weighted = TRUE, diag = FALSE)
coords <- layout_with_fr(g1)
plot(g1, layout=coords, vertex.label = NA, vertex.size = 5)
#Which clusterings to use?
#Greedy community detection, Spectral community detection, betewenness community detection
#Hierarchical clustering, Optimal community
B <- get.adjacency(g1, sparse = FALSE)
#What is modularity?
#question 1.
#a. 2,3,4,5,6,7 clusters
# 2 cluster
?cluster_optimal
partition <- leiden(B)
table(B)
node.cols <- brewer.pal(max(c(3,partition)), "Pastel1")[partition]
plot(g1, vertex.color = node.cols)
#Who is the leader?
#b. Matrix normalization?????
data_norm <- data
row_sum <- apply(data, 1, function(x)(sum(x)))
data_norm <- (data[1,]/row_sum[1])
for (i in 1:29){
data_norm[i,] <- data[i,]/row_sum[i]
}
g2 <- graph_from_adjacency_matrix(as.matrix(data_norm), mode = c("directed"),
weighted = TRUE, diag = FALSE)
C <- get.adjacency(g2, sparse = FALSE)
partition <- leiden(C)
node.cols <- brewer.pal(max(c(3,partition)), "Pastel1")[partition]
plot(g2, vertex.color = node.cols)
#question 2.
#a.
cc <- hclust(data, method = "ward") #Which method?
plot(cc)
clusters.list <- rect.hclust(cc, k= 5, border = "blue")
clusters <- cutree(cc, k= 5)
node.cols <- brewer.pal(max(c(3,clusters)), "Pastel1")[clusters]
plot(g1, vertex.color = node.cols,layout = coords)
#b. link
A$V1 <- as.numeric(A$V1)
A$V2 <- as.numeric(A$V2)
A$V3 <- as.numeric(A$V3)
A <- as.matrix(A)
?getLinkCommunities
lc <- getLinkCommunities(A, hcmethod = "ward" ,directed = TRUE)
#question3.
##############################################################Dijkstra algorithm
A[,3] <- 1/A[,3] #First, we inverse the weights
head(A)
colnames(A) <- c("from", "to", 'weight')
graph <- makegraph(A, directed = T, coords = NULL)
graph$nbnode
graph$dict$ref
get_distance_matrix(graph, 1, 2, algorithm = "mch", allcores = TRUE)
get_path_pair(graph, 22,1,algorithm = "Dijkstra")
######################################################Edge.betweenness.community
edge_cluster <- cluster_edge_betweenness(g1, weights = E(g1)$weight, directed = TRUE,
edge.betweenness = TRUE, merges = TRUE, bridges = TRUE,
modularity = FALSE, membership = TRUE)
membership(edge_cluster)
cut <- cutat(edge_cluster, 2)
colors <- rainbow(10)
plot(g1, vertex.color = colors[cut], edge.arrow.size=0.5, vertex.size=8)
cut <- cutat(edge_cluster, 3)
plot(g1, vertex.color = colors[cut], edge.arrow.size=0.5, vertex.size=8)
cut <- cutat(edge_cluster, 4)
plot(g1, vertex.color = colors[cut], edge.arrow.size=0.5, vertex.size=8)
cut <- cutat(edge_cluster, 5)
plot(g1, vertex.color = colors[cut], edge.arrow.size=0.5, vertex.size=8)
cut <- cutat(edge_cluster, 6)
plot(g1, vertex.color = colors[cut], edge.arrow.size=0.5, vertex.size=8)
cut <- cutat(edge_cluster, 7)
plot(g1, vertex.color = colors[cut], edge.arrow.size=0.5, vertex.size=8)
plot_dendrogram(edge_cluster)
cut <- cutat(edge_cluster, 7)
plot(g1,edge.width = 0, layout = layout_in_circle,
vertex.color = colors[cut], xlim = c(-3.5, 3.5),
ylim = c(-3.5, 3.5),vertex.size=degree(g1, mode="all")*0.5, edge.color= "white",
vertex.label=NA)
par(new=TRUE)
##norm
edge_cluster <- cluster_edge_betweenness(g2, weights = E(g2)$weight, directed = TRUE,
edge.betweenness = TRUE, merges = TRUE, bridges = TRUE,
modularity = FALSE, membership = TRUE)
membership(edge_cluster)==1
cut <- cutat(edge_cluster, 2)
colors <- rainbow(10)
plot(g2, vertex.color = colors[cut], edge.arrow.size=0.5, vertex.size=8, edge.width = log(edge.betweenness(g2)))
cut <- cutat(edge_cluster, 3)
plot(g2, vertex.color = colors[cut], edge.arrow.size=0.5, vertex.size=8)
cut <- cutat(edge_cluster, 4)
plot(g2, vertex.color = colors[cut], edge.arrow.size=0.5, vertex.size=8)
cut <- cutat(edge_cluster, 5)
plot(g2, vertex.color = colors[cut], edge.arrow.size=0.5, vertex.size=8)
cut <- cutat(edge_cluster, 6)
plot(g2, vertex.color = colors[cut], edge.arrow.size=0.5, vertex.size=8)
cut <- cutat(edge_cluster, 7)
plot(g2, vertex.color = colors[cut], edge.arrow.size=0.5, vertex.size=degree(g2, mode="all")/2,
xlim = c(-1.5, 1.5), ylim = c(-1.5, 1.5))
plot_dendrogram(edge_cluster)
#image size: 1128,825
(membership(edge_cluster)==1)
#visualization_norm
cut <- cutat(edge_cluster, 2)
plot(g2,edge.width = 0, layout = layout_in_circle,
vertex.color = colors[cut], xlim = c(-0.75, 0.75),
ylim = c(-0.75, 0.75),vertex.size=degree(g2, mode="all")*0.5, edge.color= "white",
vertex.label=NA)
par(new=TRUE)
cut <- cutat(edge_cluster, 3)
plot(g2,edge.width = 0, layout = layout_in_circle,
vertex.color = colors[cut], xlim = c(-1, 1),
ylim = c(-1, 1),vertex.size=degree(g2, mode="all")*0.5, edge.color= "white",
vertex.label=NA)
par(new=TRUE)
cut <- cutat(edge_cluster, 4)
plot(g2,edge.width = 0, layout = layout_in_circle,
vertex.color = colors[cut], xlim = c(-1.4, 1.4),
ylim = c(-1.4, 1.4),vertex.size=degree(g2, mode="all")*0.5, edge.color= "white",
vertex.label=NA)
par(new=TRUE)
cut <- cutat(edge_cluster, 5)
plot(g2,edge.width = 0, layout = layout_in_circle,
vertex.color = colors[cut], xlim = c(-2, 2),
ylim = c(-2, 2),vertex.size=degree(g2, mode="all")*0.5, edge.color= "white",
vertex.label=NA)
par(new=TRUE)
cut <- cutat(edge_cluster, 6)
plot(g2,edge.width = 0, layout = layout_in_circle,
vertex.color = colors[cut], xlim = c(-2.7, 2.7),
ylim = c(-2.7, 2.7),vertex.size=degree(g2, mode="all")*0.5, edge.color= "white",
vertex.label=NA)
par(new=TRUE)
cut <- cutat(edge_cluster, 7)
plot(g2,edge.width = 0, layout = layout_in_circle,
vertex.color = colors[cut], xlim = c(-3.5, 3.5),
ylim = c(-3.5, 3.5),vertex.size=degree(g2, mode="all")*0.5, edge.color= "white",
vertex.label=NA)
#subgraph
g3 <- induced_subgraph(g2, which(cutat(edge_cluster, 7)==1))
plot(g3,edge.width = log(edge.betweenness(g3)), layout = layout_in_circle, vertex.color = colors[1], xlim = c(-1, 1),
ylim = c(-1, 1),vertex.size=degree(g3, mode="all")/2, edge.color= "white")
par(new=TRUE)
centr_degree(g3, mode = c("all"), normalized = TRUE)$res #node level
########################################################################walktrap
# Mean recurrence time of a markov chain
# Graph --> P--> M
# How to P --> M: Formula
# will I get block in M?
# Do we take average and make M into a symmetric matrix?
#
source("Mean_recurrence.R")
install.packages(c("expm","matlib"))
library(expm)
library(matlib)
X <- as.matrix(data_norm)
Y <- MRT_distance(X, 10)
Y <- as.dist(Y, diag=TRUE)
hc <- hclust(Y)
plot(hc)
#####################################################################Question 2.
g1 <- graph_from_adjacency_matrix(as.matrix(data), mode = c("directed"),
weighted = TRUE, diag = FALSE)
coords <- layout_with_fr(g1)
plot(g1, layout=coords, vertex.label = NA, vertex.size = 5)
#Which clusterings to use?
#Greedy community detection, Spectral community detection, betewenness community detection
#Hierarchical clustering, Optimal community
B <- get.adjacency(g1, sparse = FALSE)
#What is modularity?
#question 1.
#a. 2,3,4,5,6,7 clusters
# 2 cluster
?cluster_optimal
partition <- leiden(B)
table(B)
node.cols <- brewer.pal(max(c(3,partition)), "Pastel1")[partition]
plot(g1, vertex.color = node.cols)
#Who is the leader?
#b. Matrix normalization?????
data_norm <- data
row_sum <- apply(data, 1, function(x)(sum(x)))
data_norm <- (data[1,]/row_sum[1])
for (i in 1:29){
data_norm[i,] <- data[i,]/row_sum[i]
}
g2 <- graph_from_adjacency_matrix(as.matrix(data_norm), mode = c("directed"),
weighted = TRUE, diag = FALSE)
C <- get.adjacency(g2, sparse = FALSE)
partition <- leiden(C)
|
412ad6f28c946c9cbb803998f095460bd61041ff
|
a39190a4a53465d1711a5bbc600d86d1ff7a2640
|
/MoreFilesYouNeed/squaresPack/man/addSquares.Rd
|
2e2cabba2a0dc13cc6a8f10fc6673ffee3634e56
|
[] |
no_license
|
davidflast/Class
|
f2638d94605128712c22079aac81aca1772d18d8
|
ba210e8d750c2cfb9cf66f3f084ee5beaf895f0c
|
refs/heads/master
| 2021-01-18T05:49:07.305157
| 2016-03-10T20:19:05
| 2016-03-10T20:19:05
| 51,100,788
| 0
| 0
| null | 2016-02-04T19:45:54
| 2016-02-04T19:45:54
| null |
UTF-8
|
R
| false
| true
| 730
|
rd
|
addSquares.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addSquares.R
\name{addSquares}
\alias{addSquares}
\alias{addSquares,ANY-method}
\title{Adding squared values}
\usage{
addSquares(x, y, ...)
}
\arguments{
\item{x}{A numeric object}
\item{y}{A numeric object with the same dimensionality as \code{x}.}
}
\value{
An object of class Squares containing
\item{squares}{The sum of the squared values}
\item{x}{The first object input}
\item{y}{The second object input}
}
\description{
Finds the sum of squared numbers
}
\note{
This is a very simple function
}
\examples{
myX <- c(20, 3)
myY <- c(-2, 4.1)
addSquares(myX, myY)
}
\author{
Jacob M. Montgomery
}
\seealso{
\code{\link{subtractSquares}}
}
|
553cec9c82226ca6bd13b19ae7e12bfb2beddb46
|
a1a3b1005edfb61644549ef688cdb25eefc17ae3
|
/Rscript/Script_4.R
|
d16ad96b9b073fb967f4afef20455d56b9973175
|
[] |
no_license
|
ibarrioh/Network_expansion
|
d61272db69c521c369e3699dc5c2df89d00afb3a
|
4ae655542fce3e6b19cfdd319f09f5f0aec9635b
|
refs/heads/main
| 2023-05-27T05:50:30.531542
| 2023-05-15T11:54:03
| 2023-05-15T11:54:03
| 521,727,385
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 679
|
r
|
Script_4.R
|
####Jaccard score calculations (genes from significant modules, figures 3 & 4)
jaccard.IB<-function(set1,set2){
jac=sum(set1%in%set2)/sum(!duplicated(c(set1,set2)))
return(jac)
}
######
setwd("/tables_expansion")
####This file is compressed using 7z (jaccard_KS.7z) and should be decompressed prior to run
jaccard.KS=readRDS("/tables_expansion/jaccard_KS.rds")
geneList=readRDS("/tables_expansion/genesList_KS.rds")
for(i in 1:nrow(jaccard.KS)){
jaccard.KS[i,"jaccIndx"]=jaccard.IB(unlist(geneList[jaccard.KS[i,"A"]]),
unlist(geneList[jaccard.KS[i,"B"]]))
}
saveRDS(jaccard.KS,"/tables_expansion/jaccard_KS_results.rds")
|
e44e6a98ebabc601682ee74d818163cab9a767a1
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googlepubsubv1beta1a.auto/man/PubsubEvent.Rd
|
54956cd4bfa34fc38c27ef5bcc9df840aacf2e6a
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 710
|
rd
|
PubsubEvent.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pubsub_objects.R
\name{PubsubEvent}
\alias{PubsubEvent}
\title{PubsubEvent Object}
\usage{
PubsubEvent(truncated = NULL, deleted = NULL, message = NULL,
subscription = NULL)
}
\arguments{
\item{truncated}{Indicates that this subscription has been truncated}
\item{deleted}{Indicates that this subscription has been deleted}
\item{message}{A received message}
\item{subscription}{The subscription that received the event}
}
\value{
PubsubEvent object
}
\description{
PubsubEvent Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
An event indicating a received message or truncation event.
}
|
e1cbaa52cb260fda739e5146eeac7ed97b00dde5
|
305923bb1a34b2eb81dae663b8bf5c2fb09a4774
|
/R/model-setup.R
|
4fff3c065cb3d3d11f1be5f03abb29bcfe4fd092
|
[] |
no_license
|
andrew-edwards/hake-assessment
|
4e535bf7ce05a1fcf6156d5bfb0a73aec4320594
|
1b5d03594d89868c3496a672e1d9f86a608c5704
|
refs/heads/master
| 2021-01-24T01:36:02.342468
| 2019-07-03T21:06:18
| 2019-07-03T21:06:18
| 49,098,720
| 0
| 0
| null | 2016-01-05T23:20:06
| 2016-01-05T23:20:06
| null |
UTF-8
|
R
| false
| false
| 20,449
|
r
|
model-setup.R
|
## -----------------------------------------------------------------------------
## Set verbosity for this project (R code) and SS shell calls
## -----------------------------------------------------------------------------
verbose <- TRUE
ss.verbose <- FALSE
## -----------------------------------------------------------------------------
## Year for this assessment - default is current year
## -----------------------------------------------------------------------------
assess.yr <- 2019
if(verbose) cat0("Assessment year: \n ", assess.yr)
## -----------------------------------------------------------------------------
## Year for last assessment - default is current year - 1
## -----------------------------------------------------------------------------
last.assess.yr <- assess.yr - 1
if(verbose) cat0("Last assessment year: \n ", last.assess.yr)
## -----------------------------------------------------------------------------
## Directory in which the model directories reside
## -----------------------------------------------------------------------------
model.dir <- rootd.models
if(verbose) cat0("Models directory: \n ", model.dir)
## Output CSV directory for outputs of at-age which are calculated by the
## make.est.numbers.at.age.table function (in r-functions/tables-age.r)
output.csv.dir <- file.path(rootd, "out-csv")
## -----------------------------------------------------------------------------
## File names which must exists in each model directory
## -----------------------------------------------------------------------------
exe.file.name <- "ss3.exe"
if(verbose) cat0("SS executable file: \n ", exe.file.name)
starter.file.name <- "starter.ss"
if(verbose) cat0("SS starter file: \n ", starter.file.name)
forecast.file.name <- "forecast.ss"
if(verbose) cat0("SS forecast file: \n ", forecast.file.name)
weight.at.age.file.name <- "wtatage.ss"
if(verbose) cat0("SS weight-at-age file: \n ", weight.at.age.file.name)
## -----------------------------------------------------------------------------
## The version of SS and ADMB used in this assessment
## -----------------------------------------------------------------------------
ss.version <- "3.30.10"
if(verbose) cat0("SS version: \n ", ss.version)
admb.version <- "11.6"
if(verbose) cat0("ADMB version: \n ", admb.version)
## -----------------------------------------------------------------------------
## Data start and endpoint variables
## -----------------------------------------------------------------------------
## Recruitment deviations start year
recruit.dev.start.yr <- 1946
if(verbose) cat0("Recruitment deviations start year: \n ", recruit.dev.start.yr)
## Unfished equilibrium year.
unfished.eq.yr <- 1964
if(verbose) cat0("Unfished equilibrium year: \n ", unfished.eq.yr)
## Start year for the models
start.yr <- 1966
if(verbose) cat0("Start year for catch data: \n ", start.yr)
## Start year for the fishery age comps
start.yr.age.comps <- 1975
if(verbose) cat0("Start year for fishery age comps data: \n ", start.yr.age.comps)
## The last non-forecast year in the model. This is the year for which the
## mcmc outputs will be used in reference point calculations.
end.yr <- assess.yr
if(verbose) cat0("End year for model: \n ", end.yr)
## First year in the survey timeseries
survey.start.yr <- 1995
if(verbose) cat0("First survey year: \n ", survey.start.yr)
## Last year in the survey timeseries
survey.end.yr <- 2017
## Years in which the survey took place
surv.yrs <- c(1995,
1998,
2001,
2003,
2005,
2007,
2009,
2011,
2012,
2013,
2015,
2017)
# tick marks for time series plot
big.ticks <- seq(1970, end.yr + 4, 5)
little.ticks <- start.yr:max(big.ticks)
if(verbose) cat0("Last survey year: \n ", survey.end.yr)
## Final year of data (This is what is the end year is in the model data files)
last.data.yr <- end.yr - 1
last.age.yr <- end.yr - 2
if(verbose) cat0("Last year of model data: \n ", last.data.yr)
if(verbose) cat0("Last year of age data: \n ", last.age.yr)
## -----------------------------------------------------------------------------
## Key posteriors used in the assessment
## -----------------------------------------------------------------------------
key.posteriors <- c("NatM",
"SR_LN",
"SR_BH_steep",
"Q_extraSD")
if(verbose){
cat0("***")
cat0("Key posteriors in this assessment:")
cat(paste0(" ", key.posteriors), sep = "\n")
cat0("***")
}
key.posteriors.file <- "keyposteriors.csv"
if(verbose) cat0("Key posteriors file: \n ", key.posteriors.file)
nuisance.posteriors.file <- "nuisanceposteriors.csv"
if(verbose) cat0("Key posteriors file: \n ", nuisance.posteriors.file)
## -----------------------------------------------------------------------------
## Base model name and directory
## -----------------------------------------------------------------------------
base.model.dir.name <- "2019.03.00_base"
base.model.name <- paste0(assess.yr, " Base model")
if(verbose){
cat0("Base model directory name: \n ", base.model.dir.name)
cat0("Base model pretty name: \n ", base.model.name)
}
## -----------------------------------------------------------------------------
## Alternative base model names and directories (runs we want MCMC results for,
## not necessarily considering as alt runs for 2019).
## -----------------------------------------------------------------------------
## alt.base.model.1.dir.name <- "2019.02.36_fecundity"
## alt.base.model.1.name <- paste0(assess.yr, " Short-term pre-1975 wt at age")
## alt.base.model.2.dir.name <- "2019.02.32_fecundity"
## alt.base.model.2.name <- paste0(assess.yr, " Long-term pre-1975 wt at age")
## alt.base.model.3.dir.name <- "2019.02.38_fecundity"
## alt.base.model.3.name <- paste0(assess.yr, " TV Fec, short-term pre-1975 wt at age")
## -----------------------------------------------------------------------------
## Last assessment year's base model name and directory
## -----------------------------------------------------------------------------
##last.yr.base.model.dir.name <- "00_45_2017base"
last.yr.base.model.dir.name <- "2018.40_base_model"
last.yr.base.model.name <- paste(last.assess.yr, "Base model")
if(verbose){
cat0("Last assessment year's base model directory name: \n ",
last.yr.base.model.dir.name)
cat0("Last assessment year's base model pretty name: \n ",
last.yr.base.model.name)
}
## -----------------------------------------------------------------------------
## Bridge models group 1
## -----------------------------------------------------------------------------
bridge.model.dir.names.1 <- c(last.yr.base.model.dir.name,
"2019.03.40_update_historic_catch",
"2019.03.46_update_historic_comp",
"2019.03.47_update_historic_wtage")
bridge.model.names.1 <- c(last.yr.base.model.name,
"Update historic catch",
"Update historic comps",
"Update historic weights")
bridge.model.end.yr.1 <- end.yr - c(1, 1, 1, 1) # subtract 1 year from all 4 models
## -----------------------------------------------------------------------------
## Bridge models group 2
## -----------------------------------------------------------------------------
bridge.model.dir.names.2 <- c(last.yr.base.model.dir.name,
"2019.03.41_add_2018_catch",
"2019.03.42_add_2018_comp_wtatage",
"2019.03.45_fecundity_time_varying")
bridge.model.names.2 <- c(last.yr.base.model.name,
"Add 2018 catch",
"Add 2018 comps and weights",
"Time-varying fecundity (= base model)")
bridge.model.end.yr.2 <- end.yr - c(1, 0, 0, 0) # subtract 1 year from first 1 models
## -----------------------------------------------------------------------------
## Sensitivity models group 1
## -----------------------------------------------------------------------------
sens.model.dir.names.1 <- c("2019.03.01_h_prior_mean_low",
"2019.03.02_h_fix_high",
"2019.03.03_sigmaR_fix_low",
"2019.03.04_sigmaR_fix_high",
"2019.03.05_M_0.2SD",
"2019.03.06_M_0.3SD")
sens.model.names.1 <- c("Steepness Mean Prior Low (0.5)",
"Steepness Fix 1.0",
"Sigma R 1.0",
"Sigma R 1.8",
"Natural Mortality (SD=0.2)",
"Natural Mortality (SD=0.3)")
## -----------------------------------------------------------------------------
## Sensitivity models group 2
## -----------------------------------------------------------------------------
sens.model.dir.names.2 <- c("2019.03.07_age1Survey",
"2019.03.08_compWeight_HarmonicMean",
"2019.03.09_compWeight_Francis")
sens.model.names.2 <- c("Add Age 1 Index",
"McAllister Ianelli Weighting",
"Francis Weighting")
## -----------------------------------------------------------------------------
## Sensitivity models group 3
## -----------------------------------------------------------------------------
##Group 3 not used for 2019 assessment
##sens.model.dir.names.3 <- c("2019.02.07_maxSel_Age5",
## "2019.02.08_maxSel_Age7",
## "2019.02.09_maxSel_Age10",
## "2019.02.11_tvSelect_phi_xtralow",
## "2019.02.12_tvSelect_phi_low",
## "2019.02.13_tvSelect_phi_high")
##sens.model.names.3 <- c("Max. age selectivity 5",
## "Max. age selectivity 7",
## "Max. age selectivity 10",
## "Phi t.v. selectivity (0.21)",
## "Phi t.v. selectivity (0.70)",
## "Phi t.v. selectivity (2.10)")
##
## -----------------------------------------------------------------------------
## Sensitivity models group 4
## -----------------------------------------------------------------------------
sens.model.dir.names.4 <- c("2019.03.11_tvSelect_phi_xtralow",
"2019.03.12_tvSelect_phi_low",
"2019.03.13_tvSelect_phi_high",
"2019.03.10_semiPara_tvSelect_sig0.695",
"2019.03.14_semiPara_tvSelect_sig1.0")
sens.model.names.4 <- c("Phi t.v. selectivity (0.21)",
"Phi t.v. selectivity (0.70)",
"Phi t.v. selectivity (2.10)",
"Semi-Parametric t.v selectivity (0.695)",
"Semi-Parametric t.v. selectivity (1.0)")
## -----------------------------------------------------------------------------
## Sensitivity models group 5 - Different weight-at-age schemes (first group)
## -----------------------------------------------------------------------------
sens.model.dir.names.5 <- c("2019.03.52_fecundity",
"2019.03.53_fecundity",
"2019.03.54_fecundity")
sens.model.names.5 <- c("Run 52",
"Run 53",
"Run 54")
## -----------------------------------------------------------------------------
## Sensitivity models group 6 - Different weight-at-age schemes (second group)
## -----------------------------------------------------------------------------
sens.model.dir.names.6 <- c("2019.03.55_fecundity",
"2019.03.56_fecundity",
"2019.03.57_fecundity",
"2019.03.58_fecundity")
sens.model.names.6 <- c("Run 55",
"Run 56",
"Run 57",
"Run 58")
## sens.model.names.5 <- c("Early weight-age 1975-2018 mean, late is 2016-2018 mean", #52
## "Early weight-age 1975-2018 mean, late is 1975-2018 mean", #53
## "TV Fecund, early weight-age 1975-2018 mean, late is 2016-2018 mean", #54
## "TV Fecund, early weight-age 1975-1979 mean, late is 1975-2018 mean*", #55
## "Early weight-age 1975-1979 mean, late is 2016-2018 mean", #56
## "Early weight-age 1975-1979 mean, late is 1975-2018 mean*", #57
## "TV Fecund, early weight-age 1975-1979 mean, late is 2016-2018 mean") #58
## This function must be called from within the first knitr code chunk
## in the document. It is defined here so that it is in the same place
## as the other model setup and should be changed if bridge models
## and sensitivity models change in the model.dir.names above..
load.models.into.parent.env <- function(){
base.model <<- load.models(model.dir, base.model.dir.name)
## Error checks:
if(is.null(base.model$mcmccalcs)){
stop("Error - base.model$mcmccalcs is NULL. Make sure the directory\n",
file.path(base.model$path, "mcmc"), " exists and contains valid\n",
" mcmc output, set ovwrt.rdata = TRUE in the create.rdata.file() calls\n",
" within build() in model-setup.r, and try again.\n")
}
if(is.null(base.model$risks)){
stop("Error - base.model$risks is NULL. Maybe you forgot to run the forecasting?\n",
" Make sure to setup running and/or loading of forecasts, and\n",
" set ovwrt.rdata = TRUE in the create.rdata.file() calls\n",
" within build() in model-setup.r and try again.\n")
}
last.yr.base.model <<- load.models(model.dir, last.yr.base.model.dir.name)
## alt.base.model.1 <<- load.models(model.dir, alt.base.model.1.dir.name)
## alt.base.model.2 <<- load.models(model.dir, alt.base.model.2.dir.name)
## alt.base.model.3 <<- load.models(model.dir, alt.base.model.3.dir.name)
bridge.models.1 <<- load.models(model.dir, bridge.model.dir.names.1)
bridge.models.2 <<- load.models(model.dir, bridge.model.dir.names.2)
sens.models.1 <<- load.models(model.dir, sens.model.dir.names.1)
sens.models.2 <<- load.models(model.dir, sens.model.dir.names.2, TRUE)
## sens.models.3 <<- load.models(model.dir, sens.model.dir.names.3)
sens.models.4 <<- load.models(model.dir, sens.model.dir.names.4)
sens.models.5 <<- load.models(model.dir, sens.model.dir.names.5)
sens.models.6 <<- load.models(model.dir, sens.model.dir.names.6)
## Lists of sensitivities for the MLE parameters, derived quantiles,
## and reference points table
## First set includes base and sensitivity group 1 and 2
sens.models.1.for.table <<- c(list(base.model), sens.models.1, sens.models.2)
sens.model.names.1.for.table <<- c("Base model", sens.model.names.1,sens.model.names.2)
## Second set includes base and sensitivity groups 3 and 4
## Removing the sens group 4 from this because it's causing problems when
## running make.short.parameter.estimates.sens.table()
sens.models.2.for.table <<- c(list(base.model), sens.models.4)
sens.model.names.2.for.table <<- c("Base model", sens.model.names.4)
## sens.models.2.for.table <<- c(list(base.model), sens.models.3, sens.models.4)
## sens.model.names.2.for.table <<- c("Base model", sens.model.names.3, sens.model.names.4)
## Third set
sens.models.3.for.table <<- c(list(base.model), sens.models.5, sens.models.6)
sens.model.names.3.for.table <<- c("Base model", sens.model.names.5, sens.model.names.6)
}
build <- function(run.fore = FALSE,
run.retro = FALSE,
run.extra.mcmc = FALSE,
model.name = NA){
## Once the model setup has been verified, this function will create the
## corresponding RData files. Each model defined in the models-setup.r
## file will have its own RData file holding the model object as defined
## in the Readme.md file.
## if model name (directory name) is included, only that one will be built
## otherwise, all will be.
if(!is.na(model.name)){
if(run.extra.mcmc){
delete.dirs(sub.dir = file.path(model.name, "extra-mcmc"))
}
if(run.fore){
delete.dirs(sub.dir = file.path(model.name, "mcmc", "forecasts"))
}
if(run.retro){
delete.dirs(sub.dir = file.path(model.name, "retrospectives"))
}
create.rdata.file(model.name = model.name,
ovwrt.rdata = TRUE,
run.fore = run.fore,
fore.yrs = forecast.yrs,
forecast.probs = forecast.probs,
forecast.catch.levels = catch.levels,
run.retros = run.retro,
my.retro.yrs = retro.yrs,
run.extra.mcmc = run.extra.mcmc,
key.posteriors = key.posteriors,
ss.version = ss.version,
exe.file.name = exe.file.name,
starter.file.name = starter.file.name,
forecast.file.name = forecast.file.name,
weight.at.age.file.name = weight.at.age.file.name,
verbose = ss.verbose)
return(invisible())
}
## Delete old directories for all models
if(run.extra.mcmc){
delete.dirs(sub.dir = file.path("extra-mcmc"))
}
if(run.fore){
delete.dirs(sub.dir = file.path("mcmc", "forecasts"))
}
if(run.retro){
delete.dirs(sub.dir = file.path("retrospectives"))
}
## Base model
create.rdata.file(model.name = base.model.dir.name,
ovwrt.rdata = ifelse(any(run.fore, run.retro, run.extra.mcmc),
TRUE,
FALSE),
run.fore = run.fore,
fore.yrs = forecast.yrs,
forecast.probs = forecast.probs,
forecast.catch.levels = catch.levels,
run.retros = run.retro,
my.retro.yrs = retro.yrs,
run.extra.mcmc = run.extra.mcmc,
key.posteriors = key.posteriors,
ss.version = ss.version,
exe.file.name = exe.file.name,
starter.file.name = starter.file.name,
forecast.file.name = forecast.file.name,
weight.at.age.file.name = weight.at.age.file.name,
verbose = ss.verbose)
## Bridge and sensitivity models need to be unlisted from their groups
## and placed into a single list for the FOR loop to work right
mnv <- c(#alt.base.model.1.dir.name,
#alt.base.model.2.dir.name,
#alt.base.model.3.dir.name,
unlist(bridge.model.dir.names.1),
unlist(bridge.model.dir.names.2),
unlist(sens.model.dir.names.1),
unlist(sens.model.dir.names.2),
#unlist(sens.model.dir.names.3),
unlist(sens.model.dir.names.4),
unlist(sens.model.dir.names.5),
unlist(sens.model.dir.names.6))
## Subtract out the last year base model from mnv
mnv <- mnv[! mnv %in% last.yr.base.model.dir.name]
model.names.list <- as.list(unique(mnv))
## Bridge/sensitivity models
for(model.nm in model.names.list){
create.rdata.file(
model.name = model.nm,
ovwrt.rdata = ifelse(any(run.fore, run.retro, run.extra.mcmc),
TRUE,
FALSE),
run.fore = run.fore,
fore.yrs = forecast.yrs,
forecast.probs = forecast.probs,
forecast.catch.levels = catch.levels,
run.retros = run.retro,
my.retro.yrs = retro.yrs,
run.extra.mcmc = run.extra.mcmc,
key.posteriors = key.posteriors,
ss.version = ss.version,
exe.file.name = exe.file.name,
starter.file.name = starter.file.name,
forecast.file.name = forecast.file.name,
weight.at.age.file.name = weight.at.age.file.name,
verbose = ss.verbose)
}
}
|
4c5a6b6e2ca7e8d22263627194c20b47e55b1731
|
bb4e7027da03652fc4475c106b2794684f191ff1
|
/constructCrossSampleFrame.R
|
bb6f591d223ae04a2367381bf4963e525dfc99b1
|
[] |
no_license
|
BrainArid/CoexpressionNetworkRProject
|
b06601db016135dcabcd659d2e23e6a358977a71
|
1d2f3a6c73e08fdf5623928295e579503b2bd07c
|
refs/heads/master
| 2021-01-23T11:49:50.034854
| 2015-08-12T01:02:26
| 2015-08-12T01:02:26
| 24,804,940
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,313
|
r
|
constructCrossSampleFrame.R
|
constructCrossSampleFrame_privateHelper <- function(outFrame, inFiles, data, cols2Ignore, i)
{
if(is.null(cols2Ignore))
{
colnames(outFrame)[i] <- paste(tail(strsplit(inFiles[i], split="/")[[1]],1), "_", colnames(data)[-1]);
outFrame[,i] <- as.numeric(data[,-1]);
}
else
{
colnames(outFrame)[i] <- paste(tail(strsplit(inFiles[i], split="/")[[1]],1), "_", colnames(data)[-cols2Ignore][-1]);
outFrame[,i] <- as.numeric(data[,-cols2Ignore][,-1]);
}
return(outFrame);
}
constructCrossSampleFrame <- function(inFiles, rows2Ignore=c(), cols2Ignore=c())
{
data <- read.csv(file=inFiles[1], sep="\t", stringsAsFactors=FALSE);
if(!is.null(rows2Ignore))
{
data <- data[-rows2Ignore,];
}
outFrame <- data.frame(matrix(ncol = length(inFiles)[1]*(dim(data)[2]-length(cols2Ignore)-1), nrow = dim(data)[1]));
row.names(outFrame) <- data[,1];
outFrame <- constructCrossSampleFrame_privateHelper(outFrame, inFiles, data, cols2Ignore, 1);
i<-2
for (file in inFiles[2:length(inFiles)])
{
data <- read.csv(file=file, sep="\t", stringsAsFactors=FALSE);
if(!is.null(rows2Ignore))
{
data <- data[-rows2Ignore,];
}
outFrame <- constructCrossSampleFrame_privateHelper(outFrame, inFiles, data, cols2Ignore, i);
i<-i+1
}
return(outFrame);
}
|
b0c2e87d465f53588fd5f2fc00418630a439fe10
|
17d7e380809b207ae5160c0f8f48a42d094cf54e
|
/Fast_data_explorer/server.R
|
0d24985fc06612ce37ef146289311ab337297b04
|
[] |
no_license
|
PachoAlvarez/Developing-Data-Products
|
0684e99d42eb2d43858f84148072566daccfd5a5
|
d8949d581b13eebeaf0972eb2e9887224a937787
|
refs/heads/master
| 2022-12-26T21:14:23.864916
| 2020-09-24T06:12:16
| 2020-09-24T06:12:16
| 295,596,521
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,869
|
r
|
server.R
|
library(shiny)
library(datasets)
library(DT)
library(GGally)
library(HistData)
library(mlbench)
library(tidyverse)
shinyServer(function(input, output) {
# #================= 2. DATA ==================================================
# --------- Carga los datos --------------------------------------
# input$ty_da --->
# input$file1 --->
# input$dsR --->
# ---> dataset()
dataset <- reactive({
if (input$ty_da == "file") {
file <- input$file1
ext <- tools::file_ext(file$datapath)
req(file)
validate(need(ext=="csv", "Please upload a csv file"))
read.csv(file$datapath, header=input$header)
} else {
switch (input$dsR,
"CO2" = CO2,
"diamonds" = diamonds,
"Galton" = Galton,
"iris" = iris,
"mtcars" = mtcars,
"swiss" = swiss,
"ToothGrowth" = ToothGrowth
)
}
})
# --------- Tabla para explorar los datos ------------------------
# dataset() --->
# ---> output$rawData
output$rawData <- DT::renderDataTable(
DT::datatable({dataset()},
options = list(lengthMenu=list(c( 5, 10, 15, -1),
c('5', '10', '15', 'All')),
pageLength=5),
filter = "top",
selection = 'multiple',
style = "bootstrap"
))
# #================= 3. VARIABLES =============================================
# --------- Desplega la lista de variables -----------------------
# --------- para seleccionar y recoge la seleccion ---------------
# dataset() --->
# ---> output$sel_Vars
# ---> input$vars
output$sel_Vars <- renderUI({
varsName <- names(dataset())
checkboxGroupInput("vars", "Choose variables:", varsName)
})
# --------- Grafica de Correlaciones entre las -------------------
# --------- variables seleccionadas ------------------------------
# dataset() --->
# input$vars --->
# ---> output$corrPlot
output$corrPlot <- renderPlot({
input$ok
ggpairs(dataset()[, input$vars])
})
# #================= 4. EXPLORE ===============================================
# --------- Subset para las variables elegidas -------------------
# dataset() --->
# input$vars --->
# ---> set_var()
set_var <- reactive({
dataset()[, input$vars]
})
# --------- Desplega la lista de variables -----------------------
# --------- para seleccionar Y, y recoge la seleccion ------------
# input$vars --->
# ---> output$sel_Vars2
# ---> input$varY
output$sel_Vars2 <- renderUI({
selectInput("varY", "Select the dependent variable:", input$vars)
})
# --------- Desplega la lista de variables -----------------------
# --------- para seleccionar X, y recoge la seleccion ------------
# input$vars --->
# input$varY --->
# ---> output$sel_VarX
# ---> input$varX
output$sel_VarX <- renderUI({
varsX <- input$vars[input$vars != input$varY]
selectInput("varX", "Select the independent variable:", varsX)
})
# --------- Grafica para la var Y --------------------------------
# set_var() --->
# input$varY --->
# ---> output$plotY
output$plotY <- renderPlot({
par(mfrow=c(1, 3))
hist(set_var()[, input$varY], main=input$varY, xlab="")
boxplot(set_var()[, input$varY], main="")
qqnorm(set_var()[, input$varY])
})
# --------- Grafica para el modelo Y ~ X ------------------------
# set_var() --->
# input$varY --->
# input$varX --->
# ---> output$plotXY
output$plotXY <- renderPlot({
modelo <- lm(set_var()[, input$varY] ~ set_var()[, input$varX])
par(mfrow=c(2,2))
plot(modelo)
})
})
#=============================================================================
# runApp()
|
0871bd5aceb0ed69f6ae1202a0410daa61a0ccca
|
8f937f58ede4fe1ad36cc7051a425d70b9078998
|
/global.R
|
1698676b88d7b8c2c181b4d84dbeaddc37d41bf9
|
[] |
no_license
|
davidhainesiii/apod-analysis-app
|
0b198bb6823dc9eb68914966d933c00ebf31b81d
|
927c1929629582fc49893357eb7349eceb19e499
|
refs/heads/master
| 2020-04-14T19:12:44.399498
| 2019-01-30T00:02:01
| 2019-01-30T00:02:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 778
|
r
|
global.R
|
library(shiny)
library(shinydashboard)
library(ggplot2)
library(leaflet)
library(tidyverse)
library(dplyr)
library(tm)
library(httr)
library(wordcloud2)
library(tidytext)
library(data.table)
library(plotly)
mergedata <- readRDS("./data/apodtwittermerge.RDS")
apoddata <- readRDS("./data/apod.RDS")
cloud_data <- apoddata %>%
unnest_tokens(output = word, input = 'Explanation', token = "words") %>%
anti_join(stop_words) %>%
count(word, sort = TRUE)
sentiments <- cloud_data %>%
filter(cloud_data$n > 5) %>%
inner_join(get_sentiments("bing")) %>%
#count(book, index = linenumber %/% 80, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(sentiment = positive - negative)
sortiments <- sentiments[order(sentiments$sentiment, decreasing = TRUE),]
|
fafa0387f5a40485f5cedb07527cc5eca24a390d
|
4bcb3a40c8527210fd2d00492559156a2cc2a58e
|
/instat/static/InstatObject/R/instat_object_R6.R
|
f31c9f1fe2b8c7983dfb2cfa691ca8cc260d712a
|
[] |
no_license
|
getch23/Instat
|
d74b0bea55d200bda16575a7d3f18a768b165c97
|
297a0ed9d10af9df6d8a1699e44c447bda11a419
|
refs/heads/master
| 2020-02-26T13:32:18.841974
| 2016-08-01T07:49:46
| 2016-08-01T07:49:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 38,741
|
r
|
instat_object_R6.R
|
instat_object <- R6Class("instat_object",
public = list(
initialize = function(data_tables = list(), instat_obj_metadata = list(),
data_tables_variables_metadata = rep(list(data.frame()),length(data_tables)),
data_tables_metadata = rep(list(list()),length(data_tables)),
data_tables_filters = rep(list(list()),length(data_tables)),
imported_from = as.list(rep("",length(data_tables))),
messages=TRUE, convert=TRUE, create=TRUE)
{
self$set_meta(instat_obj_metadata)
self$set_objects(list())
if (missing(data_tables) || length(data_tables) == 0) {
self$set_data_objects(list())
}
else {
self$import_data(data_tables=data_tables, data_tables_variables_metadata=data_tables_variables_metadata,
data_tables_metadata=data_tables_metadata,
imported_from=imported_from, messages=messages, convert=convert, create=create, data_tables_filters = data_tables_filters)
}
private$.data_objects_changed <- FALSE
}
),
private = list(
.data_objects = list(),
.metadata = list(),
.objects = list(),
.links = list(),
.data_objects_changed = FALSE
),
active = list(
data_objects_changed = function(new_value) {
if(missing(new_value)) return(private$.data_objects_changed)
else {
if(new_value != TRUE && new_value != FALSE) stop("new_value must be TRUE or FALSE")
private$.data_objects_changed <- new_value
#TODO is this behaviour we want?
invisible(sapply(self$get_data_objects(), function(x) x$data_changed <- new_value))
}
}
)
)
instat_object$set("public", "import_data", function(data_tables = list(), data_tables_variables_metadata = rep(list(data.frame()),length(data_tables)),
data_tables_metadata = rep(list(list()),length(data_tables)),
data_tables_filters = rep(list(list()),length(data_tables)),
imported_from = as.list(rep("",length(data_tables))),
messages=TRUE, convert=TRUE, create=TRUE)
{
if (missing(data_tables) || length(data_tables) == 0) {
stop("No data found. No data objects can be created.")
}
else {
if (!(class(data_tables) == "list")) {
stop("data_tables must be a list of data frames")
}
if (length(unique(names(data_tables))) != length(names(data_tables)) ) {
stop("There are duplicate names in the data tables list.")
}
if (!(length(data_tables_variables_metadata) == length(data_tables))) {
stop("If data_tables_variables_metadata is specified, it must be a list of metadata lists with the same
length as data_tables.")
}
if (!(length(data_tables_metadata) == length(data_tables))) {
stop("If data_tables_metadata is specified, it must be a list of metadata lists with the same
length as data_tables.")
}
if (length(imported_from) != length(data_tables) ) {
stop("imported_from must be a list of the same length as data_tables")
}
# loop through the data_tables list and create a data object for each
# data.frame given
new_data_objects = list()
for ( i in (1:length(data_tables)) ) {
new_data = data_object$new(data=data_tables[[i]], data_name = names(data_tables)[[i]],
variables_metadata = data_tables_variables_metadata[[i]],
metadata = data_tables_metadata[[i]],
imported_from = imported_from[[i]],
start_point = i,
messages = messages, convert = convert, create = create,
filters = data_tables_filters[[i]])
# Add this new data object to our list of data objects
self$append_data_object(new_data$get_metadata(data_name_label), new_data)
}
}
}
)
instat_object$set("public", "replace_instat_object", function(new_instatObj) {
for(curr_obj in new_instatObj$get_data_objects()) {
self$append_data_object(curr_obj$get_metadata(data_name_label), curr_obj$data_clone())
}
self$set_meta(new_instatObj$get_metadata())
self$set_objects(new_instatObj$get_objects(data_name = overall_label, as_list = FALSE))
self$data_objects_changed <- TRUE
invisible(lapply(new_instatObj$get_data_objects(), function(x) x$set_data_changed(TRUE)))
}
)
instat_object$set("public", "set_data_objects", function(new_data_objects) {
if(!is.list(new_data_objects) || (length(new_data_objects) > 0 && !all("data_object" %in% sapply(new_data_objects, class)))) {
stop("new_data_objects must be a list of data_objects")
}
else private$.data_objects <- new_data_objects
}
)
#' Title
#'
#' @param data_RDS
#' @param keep_existing
#' @param overwrite_existing
#' @param include_models
#' @param include_graphics
#' @param include_metadata
#' @param include_logs
#' @param messages
#'
#' @return
#' @export
#'
#' @examples
instat_object$set("public", "import_RDS", function(data_RDS, keep_existing = TRUE, overwrite_existing = FALSE, include_objects = TRUE,
include_metadata = TRUE, include_logs = TRUE, include_filters = TRUE, messages = TRUE)
# TODO add include_calcuations options
{
if("instat_object" %in% class(data_RDS)) {
if(!keep_existing && include_objects && include_metadata && include_logs && include_filters) {
self$replace_instat_object(new_instatObj = data_RDS)
}
else {
if(!keep_existing) {
self$set_data_objects(list())
self$set_meta(list())
self$set_objects(list())
}
for (curr_data_obj in data_RDS$get_data_objects()) {
if (!(curr_data_obj$get_metadata(data_name_label) %in% self$get_data_names()) || overwrite_existing){
if (!include_objects) curr_data_obj$set_objects(list())
curr_data_name = curr_data_obj$get_metadata(data_name_label)
if (!include_metadata) {
curr_data_obj$set_meta(list())
curr_data_obj$add_defaults_meta()
curr_data_obj$set_variables_metadata(data.frame())
curr_data_obj$update_variables_metadata()
}
if (!include_logs) curr_data_obj$set_changes(list())
if(!include_filters) {
curr_data_obj$set_filters(list())
curr_data_obj$remove_current_filter()
}
self$append_data_object(curr_data_name, curr_data_obj)
}
}
new_objects_list = data_RDS$get_objects(data_name = overall_label)
new_objects_count = length(new_objects_list)
if (include_objects && new_objects_count > 0) {
for ( i in (1:new_objects_count) ) {
if (!(names(new_objects_list)[i] %in% names(private$.objects)) || overwrite_existing) {
self$add_object(object = new_objects_list[i], object_name = names(new_objects_list)[i])
}
}
}
new_metadata = data_RDS$get_metadata()
new_metadata_count = length(new_metadata)
if (include_metadata && new_metadata_count > 0) {
for ( i in (1:new_metadata_count) ) {
if (!(names(new_metadata)[i] %in% names(metadata)) || overwrite_existing) {
self$append_to_metadata(names(new_metadata)[i], new_metadata[[i]])
}
}
}
}
self$data_objects_changed <- TRUE
}
else if (is.data.frame(data_RDS)) {
self$import_data(data_tables = list(data_RDS = data_RDS))
}
else {
if(messages){
#TODO work on messages and error handling
# use build in R defaults for messages
stop(paste("Cannot import an objects of clss", class(data_RDS)))
}
}
}
)
# Now appending/merging not setting so maybe should be renamed
instat_object$set("public", "set_meta", function(new_meta) {
if(!is.list(new_meta)) stop("new_meta must be of type: list")
for(name in names(new_meta)) {
self$append_to_metadata(name, new_meta[[name]])
}
}
)
instat_object$set("public", "set_objects", function(new_objects) {
if(!is.list(new_objects)) stop("new_objects must be of type: list")
private$.objects <- new_objects
}
)
instat_object$set("public", "append_data_object", function(name, obj) {
if( !class(name) == "character") {
stop("name must be a character")
}
if ( !"data_object" %in% class(obj)) {
stop("obj must be a data object")
}
obj$append_to_metadata(data_name_label, name)
private$.data_objects[[name]] <- obj
}
)
instat_object$set("public", "get_data_objects", function(data_name, as_list = FALSE) {
if(missing(data_name)) {
return(private$.data_objects)
}
else{
if(all(is.character(data_name))) type = "character"
else if(all(is.numeric(data_name)) && all((data_name %% 1) == 0)) type = "integer"
else stop("data_name must be of type character or integer")
if(type=="character" && !all(data_name %in% names(private$.data_objects))) stop(paste(data_name, "not found"))
if(type=="integer" && (!all(1 <= data_name) || !all(data_name <= length(private$.data_objects)))) stop(paste(data_name, "not found"))
if(length(data_name) > 1 || as_list) return(private$.data_objects[data_name])
else return(private$.data_objects[[data_name]])
}
}
)
instat_object$set("public", "get_data_frame", function(data_name, convert_to_character = FALSE, stack_data = FALSE, include_hidden_columns = TRUE, use_current_filter = TRUE, filter_name = "", ...) {
if(!stack_data) {
if(missing(data_name)) {
retlist <- list()
for ( i in (1:length(private$.data_objects)) ) {
retlist[[names(private$.data_objects)[[i]]]] = data_objects[[i]]$get_data_frame(convert_to_character = convert_to_character, include_hidden_columns = include_hidden_columns, use_current_filter = use_current_filter, filter_name = filter_name)
}
return(retlist)
}
else return(self$get_data_objects(data_name)$get_data_frame(convert_to_character = convert_to_character, include_hidden_columns = include_hidden_columns, use_current_filter = use_current_filter, filter_name = filter_name))
}
else {
if(missing(data_name)) stop("data to be stacked is missing")
if(!data_name %in% names(private$.data_objects)) stop(paste(data_name, "not found."))
return(melt(self$get_data_objects(data_name)$get_data_frame(include_hidden_columns = include_hidden_columns, use_current_filter = use_current_filter, filter_name = filter_name), ...))
}
}
)
instat_object$set("public", "get_variables_metadata", function(data_name, data_type = "all", convert_to_character = FALSE, property, column, error_if_no_property = TRUE, update = FALSE, direct_from_attributes = FALSE) {
if(missing(data_name)) {
retlist <- list()
for (curr_obj in private$.data_objects) {
retlist[[curr_obj$get_metadata(data_name_label)]] = curr_obj$get_variables_metadata(data_type = data_type, convert_to_character = convert_to_character, property = property, column = column, error_if_no_property = error_if_no_property, update = update, direct_from_attributes = direct_from_attributes)
}
return(retlist)
}
else return(self$get_data_objects(data_name)$get_variables_metadata(data_type = data_type, convert_to_character = convert_to_character, property = property, column = column, error_if_no_property = error_if_no_property, update = update, direct_from_attributes = direct_from_attributes))
}
)
instat_object$set("public", "get_combined_metadata", function(convert_to_character = FALSE) {
retlist <- data.frame()
for (curr_obj in private$.data_objects) {
templist = curr_obj$get_metadata()
for ( j in (1:length(templist)) ) {
if(length(templist[[j]]) > 1) templist[[j]] <- paste(as.character(templist[[j]]), collapse = ",")
retlist[curr_obj$get_metadata(data_name_label), names(templist[j])] = templist[[j]]
}
}
if(convert_to_character) return(convert_to_character_matrix(retlist, FALSE))
else return(retlist)
}
)
instat_object$set("public", "get_metadata", function(name) {
if(missing(name)) return(private$.metadata)
if(!is.character(name)) stop("name must be a character")
if(!name %in% names(private$.metadata)) stop(paste(name, "not found in metadata"))
return(private$.metadata[[name]])
}
)
instat_object$set("public", "get_data_names", function() {
return(names(private$.data_objects))
}
)
instat_object$set("public", "get_data_changed", function(data_name) {
if(missing(data_name)) {
if(self$data_objects_changed) return (TRUE)
for(curr_obj in private$.data_objects) {
if(curr_obj$data_changed) return(TRUE)
}
return(FALSE)
}
else {
return(self$get_data_objects(data_name)$data_changed)
}
}
)
instat_object$set("public", "get_variables_metadata_changed", function(data_obj) {
if(missing(data_obj)) {
if(private$.data_objects_changed) return(TRUE)
return(any(sapply(private$.data_objects, function(x) x$variables_metadata_changed)))
}
else {
return(self$get_data_objects(data_obj)$variables_metadata_changed)
}
}
)
instat_object$set("public", "get_metadata_changed", function(data_obj) {
if(missing(data_obj)) {
if(private$.data_objects_changed) return (TRUE)
for(curr_obj in private$.data_objects) {
if(curr_obj$metadata_changed) return(TRUE)
}
return(FALSE)
}
else {
return(self$get_data_objects(data_obj)$metadata_changed)
}
}
)
instat_object$set("public", "dataframe_count", function() {
return(length(private$.data_objects))
}
)
instat_object$set("public", "set_data_frames_changed", function(data_name = "", new_val) {
if(data_name == "") {
for(curr_obj in private$.data_objects) {
curr_obj$data_changed <- new_val
}
}
else self$get_data_objects(data_name)$set_data_changed(new_val)
}
)
instat_object$set("public", "set_variables_metadata_changed", function(data_name = "", new_val) {
if(data_name == "") {
for(curr_obj in private$.data_objects) {
curr_obj$variables_metadata_changed <- new_val
}
}
else self$get_data_objects(data_name)$set_variables_metadata_changed(new_val)
}
)
instat_object$set("public", "set_metadata_changed", function(data_name = "", new_val) {
if(data_name == "") {
for(curr_obj in private$.data_objects) {
curr_obj$set_metadata_changed(new_val)
}
}
else self$get_data_objects(data_name)$set_metadata_changed(new_val)
}
)
instat_object$set("public", "add_columns_to_data", function(data_name, col_name = "", col_data, use_col_name_as_prefix = FALSE, hidden = FALSE, before = FALSE, adjacent_column, num_cols) {
if(missing(use_col_name_as_prefix)) self$get_data_objects(data_name)$add_columns_to_data(col_name, col_data, hidden = hidden, before = before, adjacent_column = adjacent_column, num_cols = num_cols)
else self$get_data_objects(data_name)$add_columns_to_data(col_name, col_data, use_col_name_as_prefix = use_col_name_as_prefix, hidden = hidden, before = before, adjacent_column = adjacent_column, num_cols = num_cols)
}
)
instat_object$set("public", "get_columns_from_data", function(data_name, col_names, from_stacked_data = FALSE,
force_as_data_frame = FALSE, use_current_filter = TRUE) {
if(missing(data_name)) stop("data_name is required")
if(!from_stacked_data) {
if(!data_name %in% names(private$.data_objects)) stop(paste(data_name, "not found"))
self$get_data_objects(data_name)$get_columns_from_data(col_names, force_as_data_frame, use_current_filter = use_current_filter)
}
else {
if(!exists(data_name)) stop(paste(data_name, "not found."))
if(!all(sapply(col_names, function(x) x %in% names(data_name)))) stop("Not all column names were found in data")
if(length(col_names)==1 && !force_as_data_frame) return (data_name[[col_names]])
else return(data_name[col_names])
}
}
)
instat_object$set("public", "add_object", function(data_name, object, object_name) {
if(missing(data_name)) {
if(missing(object_name)) object_name = next_default_item("object", names(private$.objects))
if(object_name %in% names(private$.objects)) message(paste("An object called", object_name, "already exists. It will be replaced."))
private$.objects[[object_name]] <- object
}
else self$get_data_objects(data_name)$add_object(object = object, object_name = object_name)
}
)
instat_object$set("public", "get_objects", function(data_name, object_name, include_overall = TRUE, as_list = FALSE, type = "", include_empty = FALSE) {
if(missing(data_name)) {
if(!missing(object_name)) {
curr_objects = private$.objects[self$get_object_names(data_name = overall_label, type = type)]
if(!(object_name %in% names(curr_objects))) stop(object_name, "not found.")
else out = curr_objects[[object_name]]
}
else {
out = sapply(self$get_data_objects(as_list = TRUE), function(x) x$get_objects(type = type))
if(include_overall) out[[overall_label]] <- private$.objects[self$get_object_names(data_name = overall_label, type = type)]
if(!include_empty) out = out[sapply(out, function(x) length(x) > 0)]
}
return(out)
}
else {
if(data_name == overall_label) {
curr_objects = private$.objects[self$get_object_names(data_name = data_name, type = type)]
if(!missing(object_name)) {
if(!(object_name %in% names(curr_objects))) stop(object_name, "not found.")
else out = curr_objects[[object_name]]
}
else out = curr_objects
}
else out = self$get_data_objects(data_name)$get_objects(object_name = object_name, type = type)
if(as_list) {
lst = list()
lst[[data_name]][[object_name]] <- out
return(lst)
}
else return(out)
}
}
)
instat_object$set("public", "get_object_names", function(data_name, include_overall = TRUE, include, exclude, type = "", include_empty = FALSE, as_list = FALSE, excluded_items = c()) {
if(type == "") overall_object_names = names(private$.objects)
else {
if(type == model_label) overall_object_names = names(private$.objects)[!sapply(private$.objects, function(x) any(c("ggplot", "gg") %in% class(x)))]
else if(type == graph_label) overall_object_names = names(private$.objects)[sapply(private$.objects, function(x) any(c("ggplot", "gg") %in% class(x)))]
else stop("type: ", type, " not recognised")
}
if(missing(data_name)) {
if(missing(type)) out = sapply(self$get_data_objects(), function(x) x$get_object_names())
else out = sapply(self$get_data_objects(), function(x) x$get_object_names(type = type))
if(include_overall) out[[overall_label]] <- overall_object_names
if(!include_empty) out = out[sapply(out, function(x) length(x) > 0)]
if(as_list) out = as.list(out)
return(out)
}
else {
if(data_name == overall_label) {
if(length(excluded_items) > 0) {
ex_ind = which(overall_object_names %in% excluded_items)
if(length(ex_ind) != length(excluded_items)) warning("Some of the excluded_items were not found in the list of objects")
if(length(ex_ind) > 0) overall_object_names = overall_object_names[-ex_ind]
}
if(as_list) {
lst = list()
lst[[overall_label]] <- overall_object_names
return(lst)
}
else return(overall_object_names)
}
else return(self$get_data_objects(data_name)$get_object_names(type, as_list = as_list, excluded_items = excluded_items))
}
}
)
instat_object$set("public", "rename_object", function(data_name, object_name, new_name) {
if(missing(data_name) || data_name == overall_label) {
if(!object_name %in% names(private$.objects)) stop(object_name, " not found in overall objects list")
if(new_name %in% names(private$.objects)) stop(new_name, " is already an object name. Cannot rename ", object_name, " to ", new_name)
names(private$.objects)[names(private$.objects) == object_name] <- new_name
}
else self$get_data_objects(data_name)$rename_object(object_name = object_name, new_name = new_name)
}
)
instat_object$set("public", "delete_objects", function(data_name, object_names) {
if(missing(data_name) || data_name == overall_label) {
if(!all(object_names %in% names(private$.objects))) stop("Not all object_names found in overall objects list")
private$.objects[names(private$.objects) == object_names] <- NULL
}
else self$get_data_objects(data_name)$delete_objects(object_names = object_names)
}
)
instat_object$set("public", "reorder_objects", function(data_name, new_order) {
if(missing(data_name) || data_name == overall_label) {
if(length(new_order) != length(private$.objects) || !setequal(new_order, names(private$.objects))) stop("new_order must be a permutation of the current object names.")
self$set_objects(private$.objects[new_order])
}
else self$get_data_objects(data_name)$reorder_objects(new_order = new_order)
}
)
instat_object$set("public", "get_from_object", function(data_name, object_name, value1, value2, value3) {
if(missing(data_name) || missing(object_name)) stop("data_name and object_name must both be specified.")
curr_object = self$get_objects(data_name = data_name, object_name = object_name)
if(missing(value1)) {
if(!missing(value2) || !missing(value3)) warning("value1 is missing so value2 and value3 will be ignored.")
return(curr_object[])
}
if(!value1 %in% names(curr_object)) stop(value1, " not found in ", object_name)
if(missing(value2)) {
if(!missing(value3)) warning("value2 is missing so value3 will be ignored.")
return(curr_object[[value1]])
}
else {
if(!value2 %in% names(curr_object[[value1]])) stop(paste0(value2, " not found in ", object_name,"[[\"",value1,"\"]]"))
if(missing(value3)) return(curr_object[[value1]][[value2]])
else {
if(!value3 %in% names(curr_object[[value1]][[value2]])) stop(paste0(value3, " not found in ", object_name,"[[\"",value1,"\"]]","[[\"",value2,"\"]]"))
return(curr_object[[value1]][[value2]][[value3]])
}
}
}
)
instat_object$set("public", "add_model", function(data_name, model, model_name) {
self$add_object(data_name = data_name, object = model, object_name = model_name)
}
)
instat_object$set("public", "get_models", function(data_name, model_name, include_overall = TRUE) {
self$get_objects(data_name = data_name, object_name = model_name, include_overall = include_overall, type = model_label)
}
)
instat_object$set("public", "get_model_names", function(data_name, include_overall = TRUE, include, exclude, include_empty = FALSE, as_list = FALSE, excluded_items = c()) {
self$get_object_names(data_name = data_name, include_overall = include_overall, include, exclude, type = model_label, include_empty = include_empty, as_list = as_list, excluded_items = excluded_items)
}
)
instat_object$set("public", "get_from_model", function(data_name, model_name, value1, value2, value3) {
self$get_from_object(data_name = data_name, object_name = model_name, value1 = value1, value2 = value2, value3 = value3)
}
)
instat_object$set("public", "add_graph", function(data_name, graph, graph_name) {
self$add_object(data_name = data_name, object = graph, object_name = graph_name)
}
)
instat_object$set("public", "get_graphs", function(data_name, graph_name, include_overall = TRUE) {
self$get_objects(data_name = data_name, object_name = graph_name, include_overall = include_overall, type = graph_label)
}
)
instat_object$set("public", "get_graph_names", function(data_name, include_overall = TRUE, include, exclude, include_empty = FALSE, as_list = FALSE, excluded_items = c()) {
self$get_object_names(data_name = data_name, include_overall = include_overall, include, exclude, type = graph_label, include_empty = include_empty, as_list = as_list, excluded_items = excluded_items)
}
)
instat_object$set("public", "add_filter", function(data_name, filter, filter_name = "", replace = TRUE, set_as_current_filter = FALSE) {
if(missing(filter)) stop("filter is required")
self$get_data_objects(data_name)$add_filter(filter, filter_name, replace, set_as_current_filter)
}
)
instat_object$set("public", "current_filter", function(data_name) {
return(self$get_data_objects(data_name)$current_filter)
}
)
instat_object$set("public", "set_current_filter", function(data_name, filter_name = "") {
self$get_data_objects(data_name)$set_current_filter(filter_name)
}
)
instat_object$set("public", "get_filter", function(data_name, filter_name) {
return(self$get_data_objects(data_name)$get_filter(filter_name))
}
)
instat_object$set("public", "get_current_filter", function(data_name) {
self$get_data_objects(data_name)$get_current_filter()
}
)
instat_object$set("public", "get_filter_names", function(data_name, as_list = FALSE, include = list(), exclude = list(), excluded_items = c()) {
if(missing(data_name)) {
#TODO what to do with excluded_items in this case
return(lapply(self$get_data_objects(), function(x) x$get_filter_names(include = include, exclude = exclude)))
}
else {
return(self$get_data_objects(data_name)$get_filter_names(as_list = as_list, include = include, exclude = exclude, excluded_items = excluded_items))
}
}
)
instat_object$set("public", "remove_current_filter", function(data_name) {
self$get_data_objects(data_name)$remove_current_filter()
}
)
instat_object$set("public", "filter_applied", function(data_name) {
self$get_data_objects(data_name)$filter_applied()
}
)
instat_object$set("public", "filter_string", function(data_name, filter_name) {
self$get_data_objects(data_name)$filter_string(filter_name)
}
)
instat_object$set("public", "replace_value_in_data", function(data_name, col_names, rows, old_value, start_value = NA, end_value = NA, new_value, closed_start_value = TRUE, closed_end_value = TRUE) {
self$get_data_objects(data_name)$replace_value_in_data(col_names, rows, old_value, start_value, end_value, new_value, closed_start_value, closed_end_value)
}
)
# instat_object$set("public", "replace_value_in_data", function(data_name, col_name, row, new_value) {
# self$get_data_objects(data_name)$replace_value_in_data(col_name, row, new_value)
# }
# )
instat_object$set("public", "rename_column_in_data", function(data_name, column_name, new_val) {
self$get_data_objects(data_name)$rename_column_in_data(column_name, new_val)
}
)
#TODO remove this method
instat_object$set("public", "remove_columns_in_data_from_start_position", function(data_name, start_pos, col_numbers) {
self$get_data_objects(data_name)$remove_columns_in_data_from_start_position(start_pos = start_pos, col_numbers = col_numbers)
}
)
instat_object$set("public", "remove_columns_in_data", function(data_name, cols) {
self$get_data_objects(data_name)$remove_columns_in_data(cols = cols)
}
)
instat_object$set("public", "remove_rows_in_data", function(data_name, row_names) {
self$get_data_objects(data_name)$remove_rows_in_data(row_names = row_names)
}
)
instat_object$set("public", "get_next_default_column_name", function(data_name, prefix) {
if(missing(data_name)) {
out = list()
for(curr_obj in private$.data_objects) {
out[[curr_obj$get_metadata(data_name_label)]] = curr_obj$get_next_default_column_name(prefix)
}
return(out)
}
if(!is.character(data_name)) stop("data_name must be of type character")
if(!data_name %in% names(private$.data_objects)) stop(paste("dataframe: ", data_name, " not found"))
return(self$get_data_objects(data_name)$get_next_default_column_name(prefix))
}
)
instat_object$set("public", "get_column_names", function(data_name, as_list = FALSE, include = list(), exclude = list(), excluded_items = c()) {
if(missing(data_name)) {
#TODO what to do with excluded items in this case?
return(lapply(self$get_data_objects(), function(x) x$get_column_names(include = include, exclude = exclude)))
}
else {
return(self$get_data_objects(data_name)$get_column_names(as_list, include, exclude, excluded_items = excluded_items))
}
}
)
instat_object$set("public", "reorder_columns_in_data", function(data_name, col_order){
self$get_data_objects(data_name)$reorder_columns_in_data(col_order = col_order)
}
)
#TODO Think how to use row_data argument
instat_object$set("public", "insert_row_in_data", function(data_name, start_row, row_data = c(), number_rows, before = FALSE) {
self$get_data_objects(data_name)$insert_row_in_data(start_row = start_row, row_data = row_data, number_rows = number_rows, before = before)
}
)
instat_object$set("public", "get_data_frame_length", function(data_name) {
self$get_data_objects(data_name)$get_data_frame_length()
}
)
instat_object$set("public", "get_next_default_dataframe_name", function(prefix, include_index = TRUE, start_index = 1) {
next_default_item(prefix = prefix, existing_names = names(private$.data_objects), include_index = include_index, start_index = start_index)
}
)
instat_object$set("public", "delete_dataframe", function(data_name) {
# TODO need a set or append
private$.data_objects[[data_name]] <- NULL
data_objects_changed <- TRUE
}
)
instat_object$set("public", "get_column_factor_levels", function(data_name,col_name = "") {
self$get_data_objects(data_name)$get_column_factor_levels(col_name)
}
)
instat_object$set("public", "get_factor_data_frame", function(data_name,col_name = "") {
self$get_data_objects(data_name)$get_factor_data_frame(col_name)
}
)
instat_object$set("public", "sort_dataframe", function(data_name, col_names = c(), decreasing = FALSE, na.last = TRUE, by_row_names = FALSE, row_names_as_numeric = TRUE) {
self$get_data_objects(data_name)$sort_dataframe(col_names = col_names, decreasing = decreasing, na.last = na.last, by_row_names = by_row_names, row_names_as_numeric = row_names_as_numeric)
}
)
instat_object$set("public", "rename_dataframe", function(data_name, new_value = "") {
data_obj = self$get_data_objects(data_name)
names(private$.data_objects)[names(private$.data_objects) == data_name] <- new_value
data_obj$append_to_metadata(data_name_label, new_value)
data_obj$set_data_changed(TRUE)
}
)
instat_object$set("public", "convert_column_to_type", function(data_name, col_names = c(), to_type ="factor", factor_numeric = "by_levels") {
self$get_data_objects(data_name)$convert_column_to_type(col_names = col_names, to_type = to_type, factor_numeric = factor_numeric)
}
)
instat_object$set("public", "append_to_variables_metadata", function(data_name, col_names, property, new_val = "") {
self$get_data_objects(data_name)$append_to_variables_metadata(col_names, property, new_val)
}
)
instat_object$set("public", "append_to_dataframe_metadata", function(data_name, property, new_val = "") {
self$get_data_objects(data_name)$append_to_metadata(property, new_val)
}
)
instat_object$set("public", "append_to_metadata", function(property, new_val = "") {
if(missing(property)) stop("property and new_val arguments must be specified.")
if(!is.character(property)) stop("property must be of type character")
attr(self, property) <- new_val
self$metadata_changed <- TRUE
self$append_to_changes(list(Added_metadata, property))
}
)
instat_object$set("public", "add_metadata_field", function(data_name, property, new_val = "") {
if(missing(property)) stop("property and new_val arguments must be specified.")
if(data_name == overall_label) {
invisible(sapply(self$get_data_objects(), function(x) x$append_to_metadata(property, new_val)))
}
else invisible(sapply(self$get_data_objects(data_name, as_list = TRUE), function(x) x$append_to_variables_metadata(property = property, new_val = new_val)))
}
)
instat_object$set("public", "reorder_dataframes", function(data_frames_order) {
if(length(data_frames_order) != length(names(private$.data_objects))) stop("number data frames to order should be equal to number of dataframes in the object")
if(!setequal(data_frames_order,names(private$.data_objects))) stop("data_frames_order must be a permutation of the dataframe names.")
self$set_data_objects(private$.data_objects[data_frames_order])
self$data_objects_changed <- TRUE
}
)
instat_object$set("public", "copy_columns", function(data_name, col_names = "") {
self$get_data_objects(data_name)$copy_columns(col_names = col_names)
}
)
instat_object$set("public", "drop_unused_factor_levels", function(data_name, col_name) {
self$get_data_objects(data_name)$drop_unused_factor_levels(col_name = col_name)
}
)
instat_object$set("public", "set_factor_levels", function(data_name, col_name, new_levels) {
self$get_data_objects(data_name)$set_factor_levels(col_name = col_name, new_levels = new_levels)
}
)
instat_object$set("public", "set_factor_reference_level", function(data_name, col_name, new_ref_level) {
self$get_data_objects(data_name)$set_factor_reference_level(col_name = col_name, new_ref_level = new_ref_level)
}
)
instat_object$set("public", "get_column_count", function(data_name) {
return(self$get_data_objects(data_name)$get_column_count())
}
)
instat_object$set("public", "reorder_factor_levels", function(data_name, col_name, new_level_names) {
self$get_data_objects(data_name)$reorder_factor_levels(col_name = col_name, new_level_names = new_level_names)
}
)
instat_object$set("public","get_data_type", function(data_name, col_name) {
self$get_data_objects(data_name)$get_data_type(col_name = col_name)
}
)
instat_object$set("public","copy_data_frame", function(data_name, new_name) {
curr_obj = self$get_data_objects(data_name)$clone(deep = TRUE)
if(missing(new_name)) new_name = next_default_item(data_name, self$get_data_names())
self$append_data_object(new_name, curr_obj)
curr_obj$data_changed <- TRUE
}
)
instat_object$set("public","set_hidden_columns", function(data_name, col_names) {
self$get_data_objects(data_name)$set_hidden_columns(col_names = col_names)
}
)
instat_object$set("public","unhide_all_columns", function(data_name) {
if(missing(data_name)) invisible(sapply(self$get_data_objects(), function(obj) obj$unhide_all_columns()))
else self$get_data_objects(data_name)$unhide_all_columns()
}
)
instat_object$set("public","set_row_names", function(data_name, row_names) {
self$get_data_objects(data_name)$set_row_names(row_names = row_names)
}
)
instat_object$set("public","get_row_names", function(data_name) {
self$get_data_objects(data_name)$get_row_names()
}
)
instat_object$set("public","set_protected_columns", function(data_name, col_names) {
self$get_data_objects(data_name)$set_protected_columns(col_names = col_names)
}
)
instat_object$set("public","get_metadata_fields", function(data_name, include_overall, as_list = FALSE, include, exclude, excluded_items = c()) {
if(!missing(data_name)) {
if(data_name == overall_label) {
out = names(self$get_combined_metadata())
if(length(excluded_items) > 0){
ex_ind = which(out %in% excluded_items)
if(length(ex_ind) != length(excluded_items)) warning("Some of the excluded_items were not found in the list of objects")
if(length(ex_ind) > 0) out = out[-ex_ind]
}
if(as_list) {
lst = list()
lst[[data_name]] <- out
return(lst)
}
else return(out)
}
else return(self$get_data_objects(data_name)$get_variables_metadata_fields(as_list = as_list, include = include, exclude = exclude, excluded_items = excluded_items))
}
else {
#TODO what to do with excluded_items in this case
out = list()
if(include_overall) out[[overall_label]] <- names(self$get_combined_metadata())
for(data_obj_name in self$get_data_names()) {
out[[data_obj_name]] <- self$get_data_objects(data_obj_name)$get_variables_metadata_fields(as_list = FALSE, include = include, exclude = exclude)
}
return(out)
}
}
)
instat_object$set("public","freeze_columns", function(data_name, column) {
self$get_data_objects(data_name)$freeze_columns(column = column)
}
)
instat_object$set("public","unfreeze_columns", function(data_name) {
self$get_data_objects(data_name)$unfreeze_columns()
}
)
instat_object$set("public","is_variables_metadata", function(data_name, property, column, update = TRUE) {
self$get_data_objects(data_name)$is_variables_metadata(property, column, update)
}
)
instat_object$set("public","data_frame_exists", function(data_name) {
return(data_name %in% names(private$.data_objects))
}
)
instat_object$set("public","add_key", function(data_name, col_names) {
self$get_data_objects(data_name)$add_key(col_names)
invisible(sapply(self$get_data_objects(), function(x) if(!x$is_metadata(is_linkable)) x$append_to_metadata(is_linkable, FALSE)))
}
)
instat_object$set("public","get_links", function() {
return(private$.links)
}
)
instat_object$set("public","set_structure_columns", function(data_name, struc_type_1 = c(), struc_type_2 = c(), struc_type_3 = c()) {
self$get_data_objects(data_name)$set_structure_columns(struc_type_1, struc_type_2, struc_type_3)
}
)
instat_object$set("public","add_dependent_columns", function(data_name, columns, dependent_cols) {
self$get_data_objects(data_name)$add_dependent_columns(columns, dependent_cols)
}
)
|
38254a39bbe17f06da04d63bd3d0a00be324aa57
|
e5b3c5ceb002c9aa926e5a1caee2cfe6beb43e1b
|
/5-divergence/5c-phylogenetic-signal/phylogenetic_distance_correlation.r
|
46ea1a14341c0dc76e3c6723a98a83d9e8005cce
|
[] |
no_license
|
brendane/symbiosis_gene_evolution_initial
|
4e4f9edc146fd2f1efe79e49b7d8e63d8b55941b
|
c69a2357c561794c79e2ec1189886b58e9354dad
|
refs/heads/master
| 2022-11-20T23:13:48.105230
| 2020-07-26T23:53:21
| 2020-07-26T23:53:21
| 282,684,232
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,150
|
r
|
phylogenetic_distance_correlation.r
|
#!/usr/bin/env Rscript
#
# Report the squared correlation coefficient between the median single-copy
# core distance among pairs of strains and the pairwise distances among
# sequences in a gene.
#
library(data.table)
argv = commandArgs(trailingOnly=TRUE)
## Read pairwise divergence data and gene summary data
pairwise_div = fread(argv[1])
strains = unique(c(pairwise_div[['strain1']], pairwise_div[['strain2']]))
gene_data = read.csv(argv[2], sep='\t', comment.char='#', header=TRUE, as.is=TRUE,
check.names=FALSE)
scc_genes = gene_data[gene_data[, 'n_strains'] == length(strains) &
gene_data[, 'n_genes'] == length(strains), 'subset']
## Create a matrix of median pairwise distances for the single copy core genes
scc_median_dist = matrix(nrow=length(strains), ncol=length(strains),
dimnames=list(strains, strains), data=NaN)
scc_pairwise = pairwise_div[orthoset %in% scc_genes]
for(i in seq_along(strains)) {
s1 = strains[i]
for(j in seq_along(strains)) {
s2 = strains[j]
if(j > i) next
scc_median_dist[i, j] = median(c(scc_pairwise[strain1 == s1 & strain2 == s2][['pairwise_prot_dist']],
scc_pairwise[strain1 == s2 & strain2 == s1][['pairwise_prot_dist']]))
scc_median_dist[j, i] = scc_median_dist[i, j]
}
}
diag(scc_median_dist) = 0
cat('gene\tr2\tr2_no_paralogs\n', file=stdout())
for(i in 1:nrow(gene_data)) {
if(i %% 100 == 0) cat(i, 'genes done\n', file=stderr())
gene = gene_data[i, 'subset']
pd = pairwise_div[orthoset == gene]
gene_dist = pd[['pairwise_prot_dist']]
para = pd[['strain1']] == pd[['strain2']]
species_dist = numeric(length(gene_dist)) * NaN
for(j in seq_along(species_dist)) {
species_dist[j] = scc_median_dist[pd[j][['strain1']],
pd[j][['strain2']]]
}
r2 = cor(species_dist, gene_dist)^2
r2_nopara = NaN
if(sum(!para) > 0) {
r2_nopara = cor(species_dist[!para], gene_dist[!para])^2
}
cat(gene, '\t', r2, '\t', r2_nopara, '\n', sep='', file=stdout())
}
|
046c8aa69ced206634661ac432ae347166c10d0a
|
72991c91f4df86d7e06c6235446ff1bf35b9cc9d
|
/t_nsah/srank_t_nsah.R
|
57ef059809cbf0fce54a7a1f58a0df5363aa4b3b
|
[
"MIT"
] |
permissive
|
samuelcg/thesis-data-scripts
|
6b22ef73ff4f9399211cfd45aa09a878e65264b8
|
71e7d745d512a1c2be36fcae0c28ac7b669bc80c
|
refs/heads/master
| 2021-01-19T22:17:43.715271
| 2017-04-25T00:56:00
| 2017-04-25T00:56:00
| 88,791,821
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,992
|
r
|
srank_t_nsah.R
|
## source('load_all_nsah.R')
source('../compute_src.R')
##################################
## Compute all possible unique pairs of SAFE sessions
##################################
t_nsah_session_pair_matrix <- t(combn(names(list_of_t_nsah_dfs), 2))
names_for_one_df <- names(list_of_t_nsah_dfs[[t_nsah_session_pair_matrix[1,1]]])
##################################
## Add a column to the matrix to hold the results of the spearman's test
##################################
src_matrix <- matrix(0, nrow=nrow(t_nsah_session_pair_matrix),ncol=length(names_for_one_df) - 1)
t_nsah_session_pair_matrix <- cbind(
t_nsah_session_pair_matrix,
src_matrix)
col_names <- names(list_of_t_nsah_dfs[[t_nsah_session_pair_matrix[1,1]]])[-c(1)]
col_names <- sapply(col_names, paste, "_src", sep="")
col_names <- c(c(
"SAFE_session_a",
"SAFE_session_b"),
col_names)
colnames(t_nsah_session_pair_matrix) <- col_names
##################################
## Compute the spearmans_rank_coefficient across several fields for each pair of SAFE sessions
##################################
for (x in 1:nrow(t_nsah_session_pair_matrix)) {
session_a <- t_nsah_session_pair_matrix[x,"SAFE_session_a"]
session_b <- t_nsah_session_pair_matrix[x,"SAFE_session_b"]
session_df_a <- list_of_t_nsah_dfs[[session_a]]
session_df_b <- list_of_t_nsah_dfs[[session_b]]
compute_full_row_of_src <- function(attribute_str) {
src <- compute_src(
data_set_a=session_df_a[[attribute_str]],
data_set_b=session_df_b[[attribute_str]])
return(src$estimate)
}
cols_to_compute <- names(list_of_t_nsah_dfs[[t_nsah_session_pair_matrix[1,1]]])[-c(1)]
row_of_src <- sapply(cols_to_compute, compute_full_row_of_src)
t_nsah_session_pair_matrix[x,3:ncol(t_nsah_session_pair_matrix)] <- row_of_src
}
##################################
## Clean up
##################################
rm(
src_matrix,
col_names,
session_a,
session_b,
row_of_src,
session_df_a,
session_df_b,
x
)
|
08db3aaab7105a747abdaf09b463f8c36de31ddd
|
8ba4e3b939d3d4fe9c9b07d13ea6a05ab5e57d01
|
/inst/extdata/counts_by_town.R
|
351a453a2b18c46cfc9619f0e27a85775e5e7018
|
[
"BSD-2-Clause"
] |
permissive
|
COMHIS/fennica
|
8ce782cbdff37582c586820b3c26a4f549c2f7d3
|
f7f2e52b6e0b62a44a2c8106bb1ab5dd3edcc743
|
refs/heads/master
| 2023-08-19T03:01:34.138530
| 2022-04-20T17:23:36
| 2022-04-20T17:23:36
| 107,188,766
| 5
| 2
|
NOASSERTION
| 2023-07-04T08:44:37
| 2017-10-16T22:14:44
|
R
|
UTF-8
|
R
| false
| false
| 720
|
r
|
counts_by_town.R
|
#' @title counts_by_town
#' @description Plot publication year counts by town
#' @param df Main dataframe
#' @param str Name of town
#' @param file Filename
#' @export
#' @author Niko Ilomaki \email{niko.ilomaki@@helsinki.fi}
#' @references See citation("fennica")
#' @examples \dontrun{counts_by_town(df,"Hämeenlinna","Hameenlinna")}
#' @keywords utilities
counts_by_town <- function(df, str, file) {
f <- df %>% filter(publication_place == str) %>% group_by(published_in) %>% tally() %>% arrange(published_in)
f$cumul <- cumsum(f$n)
png(paste0("figure/",file,"_noncumul.png"))
plot(f$published_in,f$n)
dev.off()
png(paste0("figure/",file,"_cumul.png"))
plot(f$published_in,f$cumul)
dev.off()
}
|
ea3bc61497ea83daa1cee93e33ec141a4c79c166
|
b4db1d76b46dba8e9250a18b19d4851166f13eba
|
/R/style_catalogue.R
|
117ea0da2004e3b6e109ba6961576cd00d54c602
|
[] |
no_license
|
adambouras/xltabr
|
1b199a5dcda6c8dd7a28a780815503866b139ddf
|
fd9aab00ba85fd0d6061ab97489747970258df55
|
refs/heads/master
| 2021-06-25T19:18:16.703340
| 2017-09-10T14:23:53
| 2017-09-10T14:29:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,295
|
r
|
style_catalogue.R
|
# Use default .xlsx style catalogue to initialise the catalogue of styles
style_catalogue_initialise <- function(tab) {
path <- xltabr:::get_style_path()
path_num <- xltabr:::get_num_format_path()
tab <- style_catalogue_xlsx_import(tab)
tab <- style_catalogue_import_num_formats(tab)
tab
}
style_catalogue_import_num_formats <- function(tab){
lookup_df <- utils::read.csv(get_cell_format_path(), stringsAsFactors = FALSE)
# Convert dataframe into two vectors
style_keys <- lookup_df$excel_format
style_names <- lookup_df$style_name
for (i in 1:length(style_names)){
tab$style_catalogue[[style_names[i]]] <- create_style_key(list(numFmt = style_keys[i]))
}
tab
}
# Iterate through the default style workbook, adding them to the styles catalogue
style_catalogue_xlsx_import <- function(tab) {
# if initialising style_catalogue do we want to reset it with line below?
tab$style_catalogue <- list()
path <- xltabr:::get_style_path()
wb <- openxlsx::loadWorkbook(path)
listed_styles <- openxlsx::readWorkbook(wb, colNames = FALSE)
for (i in wb$styleObjects) {
for (iter in 1:length(i$rows)){
r <- i$rows[iter]
c <- i$cols[iter]
suppressWarnings(cell <- openxlsx::readWorkbook(wb, rows = r, cols = c, colNames = FALSE, rowNames = FALSE))
value <- cell[1, 1]
if (is.null(value)) {
next
}
# tmp_list <- list()
# tmp_list$style <- i$style
# cell <- openxlsx::readWorkbook(wb, rows = r, cols = c + 1, colNames = FALSE, rowNames = FALSE)
# tmp_list$rowHeight <- cell[1, 1]
style_list <- convert_style_object(i$style)
style_key <- create_style_key(style_list)
if (!value %in% names(tab$style_catalogue)){
tab$style_catalogue[[value]] <- style_key
}
}
}
# Add in a checker to catch default style objects
for (style_name in listed_styles$X1){
if (!style_name %in% names(tab$style_catalogue)){
tab$style_catalogue[[style_name]] <- create_style_key(convert_style_object(openxlsx::createStyle()))
}
}
tab
}
# add_to_dictionary
# returns a style_key string based on our style catelogue objects (should also work with default R lists)
create_style_key <- function(style_list){
style_key <- gsub(' +', ' ', paste0(utils::capture.output(dput(style_list)), collapse = ""))
style_key
}
# Converts style object property to string representation (used in create_style_key function) Depreciated
property_to_key <- function(style_object, property){
if(is.null(style_object[[property]])){
return(NULL)
}
else{
return(paste(property, paste0(style_object[[property]], collapse = "%"), sep = "_"))
}
}
# Converts style_key to style_list
style_key_parser <- function(style_key){
style_list <- eval(parse(text=style_key))
style_list
}
# looks at the cell_style_definition and build a final style_key for that cell
build_style <- function(tab, cell_style_definition){
# Convert cell style inheritence string into an array
seperated_style_definition <- unlist(strsplit(cell_style_definition, "\\|"))
## Run a check (that all base styles referenced in cell_style_definition exist in style_catalogue)
# Get array of base styles (i.e. styles that are not a combination of multiple styles (no pipes in names))
base_styles <- names(tab$style_catalogue)[!grepl("\\|",names(tab$style_catalogue))]
ussd <- unique(seperated_style_definition)
style_check <- ussd %in% base_styles
if(!all(style_check)){
stop(paste("The following style names:", paste0(ussd[!style_check], collapse = ", "), "are not in the style_catalogue please add then maunally or specify them in style.xlsx"))
}
##
if (length(seperated_style_definition) <= 1){
return (tab$style_catalogue[[seperated_style_definition]])
}
else{
# Otherwise build final style
previous_style <- style_key_parser(tab$style_catalogue[[seperated_style_definition[1]]])
for (i in 2:length(seperated_style_definition)){
current_style <- style_key_parser(tab$style_catalogue[[seperated_style_definition[i]]])
for (property_name in names(current_style)){
if(property_name == "fontDecoration"){
previous_style[[property_name]] <- unique(c(previous_style[[property_name]], current_style[[property_name]]))
} else {
previous_style[property_name] <- current_style[property_name]
}
}
}
}
return (create_style_key(previous_style))
}
add_style_defintions_to_catelogue <- function(tab, style_definitions){
for (style_def in style_definitions){
style_key <- build_style(tab, style_def)
# Always log the key value pair in the style dictionary
if (!style_def %in% names(tab$style_catalogue)){
tab$style_catalogue[[style_def]] <- style_key
}
}
tab
}
convert_style_object <- function(style, convert_to_S4 = FALSE){
if(convert_to_S4){
if(typeof(style) == "character"){
style <- style_key_parser(style)
}
style_properties <- names(style)
if("numFmt" %in% style_properties){
style_properties <- style_properties[!("numFmt" == style_properties)]
out_style <- openxlsx::createStyle(numFmt = style[["numFmt"]])
} else{
out_style <- openxlsx::createStyle()
}
for (prop in style_properties){
out_style[[prop]] <- style[[prop]]
}
return(out_style)
} else {
out_style <- style$as.list()
# For some reason fills do not export properly so workaround is added here
if(any(c("fillFg", "fillBg") %in% names(out_style))){
out_style[["fill"]] <- list(fillFg = out_style[["fillFg"]], fillBg = out_style[["fillBg"]])
out_style[["fillFg"]] <- NULL
out_style[["fillBg"]] <- NULL
}
return(out_style)
}
}
add_styles_to_wb <- function(tab){
full_table <- combine_all_styles(tab)
if(is.null(full_table)) stop("Please ensure add_from has appropriate values and a table has been added to tab")
# Bloody factors
full_table <- data.frame(lapply(full_table, as.character), stringsAsFactors=FALSE)
# Get a unique vector of style_name from full_table
unique_styles_definitions <- unique(full_table$style_name)
# Add unique style_name vector to style catalogue
tab <- add_style_defintions_to_catelogue(tab, unique_styles_definitions)
# add the style_key to each style name in full table
full_table$style_key <- unlist(tab$style_catalogue[full_table$style_name])
# iterate over a unique list of style_keys for and apply them to the workbook for each row col
unique_style_keys <- unique(full_table$style_key)
for (sk in unique_style_keys){
row_col_styles <- full_table[full_table$style_key == sk,]
rows <- row_col_styles$row
cols <- row_col_styles$col
created_style <- convert_style_object(sk, convert_to_S4 = TRUE)
openxlsx::addStyle(tab$wb, tab$misc$ws_name, created_style, rows, cols)
}
tab
}
# Not used in package - used for debug
compare_style_lists <- function(a, b){
a_names <- sort(names(a))
b_names <- sort(names(b))
if(length(a_names) != length(b_names)) return(FALSE)
if(!all(a_names == b_names)) return(FALSE)
final_check <- TRUE
for (prop in a_names){
if(!identical(a[prop], b[prop])){
final_check <- FALSE
break;
}
}
return(final_check)
}
|
c79d38cbf4970007e6961e3fbb64663be7ffb80e
|
1dcb69267472f4dbc1a48f6fba1264e20e97e28e
|
/man/pigorro.Rd
|
600badb739d0ece8827357e1769070eebcf8dea8
|
[] |
no_license
|
mvaldora/rdp
|
1a4cdf19f42142d24eb497d67dbad9d7fb7205d6
|
8c177be17866bc1988d3fad4df5287466117980e
|
refs/heads/master
| 2020-05-18T18:31:51.641777
| 2019-05-02T14:07:51
| 2019-05-02T14:07:51
| 181,533,928
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 506
|
rd
|
pigorro.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rdp_basic_functions.R
\name{pigorro}
\alias{pigorro}
\title{computes the propensity score}
\usage{
pigorro(x, a)
}
\arguments{
\item{x}{A matrix of covariates without an intercept}
\item{a}{A vector of 0 and 1; if the i-th entry of a equals 1, it indicates the i-th outcome is missing.}
}
\value{
A vector indicating the estimated probabilities that a equals 1 conditional to x.
}
\description{
computes the propensity score
}
|
17a42c993c35022568079dfa18f931a5d45efb27
|
b683643da99f0ac1cbc9275e83c873b8ce08240e
|
/tweet-sentiment.R
|
3fdbcb01361e136c8ef315b429f823f4768e49a4
|
[] |
no_license
|
PurbashaChatterjee/Oscar-2018-twitter-sentiment-analysis
|
ed65d2ac786fc55d7125179229ab535f79c5a88a
|
cd6804074667cf59d20964f73fbf70f81c85166d
|
refs/heads/master
| 2021-03-19T13:30:53.206896
| 2018-03-07T08:15:56
| 2018-03-07T08:15:56
| 123,990,601
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,102
|
r
|
tweet-sentiment.R
|
library("twitteR")
library("wordcloud")
library("tm")
#necessary file for Windows
download.file(url="http://curl.haxx.se/ca/cacert.pem", destfile="cacert.pem")
#to get your consumerKey and consumerSecret see the twitteR documentation for instructions
consumer_key <- 'rTv9spgUO7mY6D45hmvfhld0a'
consumer_secret <- 'fwxw6B93PubgQMpsj0I2YbJWZVYMzjloXxb5Wt17FAMm7LvjE9'
access_token <- '970578129650462720-RKFtiPGKwaQKEpFiwQHwIGwlzZiaMqH'
access_secret <- 'hN5BkTw2T57BobRPrX4yQZuKyxrl8I0xEgAf1naNzxWfX'
setup_twitter_oauth(consumer_key,
consumer_secret,
access_token,
access_secret)
#the cainfo parameter is necessary only on Windows
oscar.tweets = searchTwitter("Oscars_2018live", n=3500)
#converts to data frame
df <- do.call("rbind", lapply(oscar.tweets, as.data.frame))
#remove odd characters
df$text <- sapply(df$text,function(row) iconv(row, "latin1", "ASCII", sub="")) #remove emoticon
df$text = gsub("(f|ht)tp(s?)://(.*)[.][a-z]+", "", df$text) #remove URL
sample <- df$text
score.sentiment = function(tweets, pos.words, neg.words, .progress='none')
{
require(plyr)
require(stringr)
neut = 0
list=lapply(tweets, function(tweet, pos.words, neg.words)
{
tweet = gsub('[[:punct:]]',' ',tweet)
tweet = gsub('[[:cntrl:]]','',tweet)
tweet = gsub('\\d+','',tweet) #removes decimal number
tweet = gsub('\n','',tweet) #removes new lines
tweet = tolower(tweet)
word.list = str_split(tweet, '\\s+')
words = unlist(word.list) #changes a list to character vector
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
pp = sum(pos.matches)
nn = sum(neg.matches)
score = sum(pos.matches) - sum(neg.matches)
if (score==0) neut=neut+1
list1 = c(score, pp, nn)
return (list1)
}, pos.words, neg.words)
score_new = lapply(list, `[[`, 1)
pp1 = lapply(list, '[[', 2)
nn1 = lapply(list, '[[', 3)
neut1 = lapply(list, '[[', 3)
scores.df = data.frame(score = score_new, text=tweets)
positive.df = data.frame(Positive = pp1, text=tweets)
negative.df = data.frame(Negative = nn1, text=tweets)
neutral.df = data.frame(Neutral = neut1, text=tweets)
list_df = list(scores.df, positive.df, negative.df, neutral.df)
return(list_df)
}
pos.words = scan('C:/Users/purba/Desktop/Sentiment/positive-words.txt', what='character', comment.char=';') #Make sure you edit the location
neg.words = scan('C:/Users/purba/Desktop/Sentiment/negative-words.txt', what='character', comment.char=';')
#Adding words to positive and negative databases
pos.words=c(pos.words, 'Congrats', 'prizes', 'prize', 'thanks', 'thnx', 'Grt', 'gr8', 'plz', 'trending', 'recovering', 'brainstorm', 'leader')
neg.words = c(neg.words, 'Fight', 'fighting', 'wtf', 'arrest', 'no', 'not')
# Clean the tweets and returns merged data frame
result = score.sentiment(sample, pos.words, neg.words)
library(reshape)
score=result[[1]]
positive=result[[2]]
negative=result[[3]]
neutral=result[[4]]
#Creating three different data frames for Score, Positive and Negative
#Removing text column from data frame
score$text=NULL
positive$text=NULL
negative$text=NULL
neutral$text=NULL
#Storing the first row(Containing the sentiment scores) in variable q
sc=score[1,]
pt=positive[1,]
ng=negative[1,]
nu=neutral[1,]
sent1=melt(sc, ,var='Score')
sent2=melt(pt, ,var='Positive')
sent3=melt(ng, ,var='Negative')
sent4=melt(nu, ,var='Neutral')
sent1['Score'] = NULL
sent2['Positive'] = NULL
sent3['Negative'] = NULL
sent4['Neutral'] = NULL
#Creating data frame
table1 = data.frame(Text=result[[1]]$text, Score=sent1)
table2 = data.frame(Text=result[[2]]$text, Score=sent2)
table3 = data.frame(Text=result[[3]]$text, Score=sent3)
table4 = data.frame(Text=result[[4]]$text, Score=sent3)
#Merging three data frames into one
sentiment_table=data.frame(Text=table1$Text, Score=table1$value, Positive=table2$value, Negative=table3$value, Neutral=table4$value )
hist(sentiment_table$Positive, col=blues9, xlab = "Positive Sentiments", main = "Histogram of Positive Sentiments")
hist(sentiment_table$Negative, col=blues9, xlab = "Negative Sentiments", main = "Histogram of Negative Sentiments")
hist(sentiment_table$Neutral, col=blues9, xlab = "Neutral Sentiments", main = "Histogram of Neutral Sentiments")
hist(sentiment_table$Score, col=blues9 ,xlab = "Scoring Sentiments", main = "Histogram of Sentiment Score")
library(corrgram)
corrgram(sentiment_table, main="Corrgram of Sentiment Variables", lower.panel=panel.shade,
upper.panel=panel.pie,
text.panel=panel.txt)
slices <- c(sum(sentiment_table$Positive), sum(sentiment_table$Negative), sum(sentiment_table$Neutral))
labels <- c("Positive", "Negative", "Neutral")
library(plotrix)
pie3D(slices, labels = labels, col=rainbow(length(labels)),explode=0.00, main="Oscar Twwets Sentiment Analysis")
|
ec3581b0cfe22f56e180020afce9d54202e7f381
|
4163b2ff26b31e00b8d73da528d5c881d94108e3
|
/R/SSPFinUA.R
|
2b627a047f3e04fd579f1f201866f55d2a0e4f6a
|
[] |
no_license
|
VB6Hobbyst7/R_SEQUOIA
|
6cfdb9586036efe7bac44857aee0eed8515d1e85
|
def0603d5bb699a3cfc04ea9efd91c190d1c2a9d
|
refs/heads/master
| 2023-04-26T08:45:06.117938
| 2021-05-27T15:21:47
| 2021-05-27T15:21:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,763
|
r
|
SSPFinUA.R
|
# Lancement des library
if (!require("tcltk")) {install.packages("tcltk")}
if (!require("sf")) {install.packages("sf")}
if (!require("stringr")) {install.packages("stringr")}
if (!require("dplyr")) {install.packages("dplyr")}
SSPFinUA <- function(rep=F){
if(isFALSE(rep)) {
rep <- tk_choose.files(caption = "Choisir le fichier .shp du parcellaire cadastral (PARCA)",
filter = matrix(c("ESRI Shapefile", ".shp"), 1, 2, byrow = TRUE))
}
if (!length(rep)){stop("Aucune sélection effectuée > Traitement annulé \n")}
UA <- st_read(rep, stringsAsFactors=F, options = "ENCODING=UTF-8", quiet=T) # Lecture du shapefile
NAME <- str_sub(rep,
str_locate_all(rep,'/')[[1]][nrow(str_locate_all(rep,'/')[[1]]),1]+1,
str_locate(rep,'_UA')[1,1]-1)
assign("NAME", NAME, envir=globalenv())
UARV<-UA[0,colnames(UA)]
PF <- unique(UA$N_PARFOR)
for (a in 1:length(PF)){
print(paste0('Parcelle ', PF[a]))
UAPF <- UA %>%
filter(N_PARFOR==PF[a])
PLTS <- unique(UAPF$PLT_TYPE)
print(paste0(length(PLTS), ' peuplements'))
r=1
for (b in 1:length(PLTS)){
UAPLTS <- UAPF %>%
filter(PLT_TYPE==PLTS[b])
ESS <- unique(UAPLTS$PLT_ESS)
if (length(ESS)>1) {
for (c in 1:length(ESS)){
UAESS <- UAPLTS %>%
filter(PLT_ESS==ESS[c])
UAESS <- UAESS %>%
mutate(N_SSPARFOR=r)
r=r+1
UARV <- rbind(UARV, UAESS)
}
}else {
UAPLTS <- UAPLTS %>%
mutate(N_SSPARFOR=r)
r=r+1
UARV <- rbind(UARV, UAPLTS)
} #end if
}
}
if(nrow(UARV)==nrow(UA)){SEQUOIA:::WRITE(UARV, getwd(), paste(NAME,"PROP_point.shp"))}
}
|
38bb7760f3507520b1ab14f0033b3c66c2903584
|
0d825b26d91f5dca10290aecd7830dc5b754d031
|
/man/taubeta.casp.est.Rd
|
6c9616421f397d093acf6634cf594c1cb60c27aa
|
[
"MIT"
] |
permissive
|
trambakbanerjee/casp
|
47dd5f707bddcc19c58148237ced317e4ca9e4f6
|
27b98414fbba639238beed7f1b7677b4b5e1e455
|
refs/heads/master
| 2022-11-20T10:11:19.371424
| 2022-11-12T21:02:24
| 2022-11-12T21:02:24
| 140,539,279
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,698
|
rd
|
taubeta.casp.est.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/casplib.R
\name{taubeta.casp.est}
\alias{taubeta.casp.est}
\title{Data-driven estimation of the hyper-parameters \eqn{(\tau,\beta)}}
\usage{
taubeta.casp.est(grid.val, rmt, mx, X)
}
\arguments{
\item{grid.val}{a matrix with two columns. Each row of the matrix represents an
element of a two dimensional grid. The first component represents a likely value for \eqn{\tau}
while the second component is a likely value for \eqn{\beta}.}
\item{rmt}{the output from \code{\link{rmt.est}}.}
\item{mx}{the sample size of the past observations \eqn{\mathbf{X}}.}
\item{X}{a \eqn{mx\times n} matrix of past observations.}
}
\value{
\enumerate{
\item An estimate of \eqn{(\tau,\beta)}
}
}
\description{
Provides an estimate of the hyper-parameters \eqn{(\tau,\beta)} using equation (15) of
the casp paper in the reference.
}
\details{
This function relies on the output from \code{\link{rmt.est}} and calls \code{\link{g.est}}.
Please see Section 3.3 of the casp paper for more details.
}
\examples{
library(casp)
set.seed(42)
n = 10
mx = 5
X = matrix(runif(mx*n),mx,n)
K = 4
S = diag(c(10,8,6,4,rep(1,n-4)))
mw = 50
rmt<- rmt.est(K,S,mw)
tau.grid<-c(0.2,0.3,0.4,0.5)
beta.grid<-c(0.15,0.25,0.35,0.5)
grid.val<- cbind(rep(tau.grid,each = length(beta.grid)),
rep(beta.grid,length(beta.grid)))
taubeta.estimated<- taubeta.casp.est(grid.val,rmt,mx,X)
}
\references{
\enumerate{
\item Trambak Banerjee, Gourab Mukherjee, and Debashis Paul. Improved Shrinkage Prediction under a Spiked
Covariance Structure, 2021.
}
}
\seealso{
\code{\link{rmt.est}}, \code{\link{casp.checkloss}}, \code{\link{casp.linexloss}}
}
|
5acc1c339eaaf449acdd085bda9a42a9d16c4b41
|
ef1853f4456fbcd6d273a39c1792125cd67975b9
|
/man/run_searchlight.Rd
|
6df50b70fc11aa634abeffd81ffc8a07aca01663
|
[] |
no_license
|
DiegoAngls/rMVPA
|
e9f47570684b64db84a377b547b5bc30de7206a1
|
358e4b47a981beccef0b085a3721b59bc0a4ee42
|
refs/heads/master
| 2020-03-18T18:45:49.311543
| 2018-03-29T13:56:25
| 2018-03-29T13:56:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 704
|
rd
|
run_searchlight.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/allgeneric.R
\name{run_searchlight}
\alias{run_searchlight}
\title{run_searchlight}
\usage{
run_searchlight(model_spec, radius, method, niter, ...)
}
\arguments{
\item{model_spec}{a \code{mvpa_model} instance.}
\item{radius}{the searchlight radus in millimeters.}
\item{method}{the type of searchlight (randomized or standard)}
\item{niter}{the number of searchlight iterations (used only for 'randomized' method)}
\item{extra}{args}
}
\value{
a named list of \code{BrainVolume} objects, where each element contains a performance metric (e.g. AUC) at every voxel location.
}
\description{
execute a searchlight analysis
}
|
ec032a91f08cd76ab7a9d80e46b287666fe4af58
|
68a381ae10021be4b4027eba5ed567bb547f1ac5
|
/R-main/07-do-colony-drop-loci-experiment.R
|
2d051e3948a756b676b102875ebd7b720d55cf39
|
[] |
no_license
|
eriqande/tpb-colony-compare
|
205bb35c1324970ba44cfe6af62c600b46f938a1
|
2ff0a16c9d7f4f678441ac9f772e1d1db0d78e3e
|
refs/heads/master
| 2021-01-13T01:55:37.198109
| 2015-03-04T07:55:14
| 2015-03-04T07:55:14
| 31,044,874
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,384
|
r
|
07-do-colony-drop-loci-experiment.R
|
# must be run in project dirctory
load("outputs/InferredSibgroups.Rda")
source("R/colony-comp-funcs.R")
# get the mean size of the Colony inferred sibgroups (about 3.07) for use later
ColonyFirstRunMeanSibsize <- mean(sapply(InferredSibgroups$Full.colony$sets, length))
loc_vals <- seq(95, 25, by=-10) # these are the locus values to do
# create the data sets to run and put each in their own directory
top_dir <- "colony-drop-loc-runs"
set.seed(22) # this will set colony's seeds to a reproducible value
lapply(loc_vals, function(x)
createColonyRunArea(data_file = "./data/chinook_full_sibs.Rda",
dir_name = file.path(top_dir, x),
locus_numbers = 1:x,
opt_strings = c("full-colony-ewens" = paste(" -y -d 0.0 -m 0.005 -L -f -S",
floor(runif(1, min = 1, max = 100000)),
" -e \"1 ", sprintf("%.6f", ColonyFirstRunMeanSibsize),
sprintf("%.6f", ColonyFirstRunMeanSibsize), " \""
),
"full-colony" = paste(" -y -d 0.0 -m 0.005 -L -f -S",
floor(runif(1, min = 1, max = 100000))
)
)
)
)
# compile up a series of commands into a shell script that we will run with system(). Note that
# we are going to run it both with and without the Ewens prior that Colony provides.
# If we run it with the prior we set it at the average from the full run with 95.
script1 <- paste("PROJDIR=$(pwd)")
script2 <- sapply(loc_vals, function(x)
paste("cd", file.path("$PROJDIR", top_dir, x, "full-colony;"), "nohup $PROJDIR/bin/colony2s.out > colony-stdout.txt &\n",
"cd", file.path("$PROJDIR", top_dir, x, "full-colony-ewens;"), "nohup $PROJDIR/bin/colony2s.out > colony-stdout.txt &")
)
run_scr <- file.path(top_dir, "run-script.sh")
writeLines(text = c(script1, script2), sep = "\n", con = run_scr)
# then chmod it
system(paste("chmod a+x", run_scr))
# then launch that dude! It takes about 15 minutes with one processor for each of the 8 jobs
system(run_scr)
|
bd849773fd09c2ecd013bb4bac64ba2800c533de
|
a82252596b166e9b599a977a4296625e0adc066a
|
/post_processing/allLhs/sensitivityAnalysis/allLhsSensitivityAnalysis.R
|
f19190233dfe8f5d686d33fd780b3eb9bffe2e35
|
[] |
no_license
|
NREL/OpenStudio-analysis-spreadsheet
|
45ec53a6aacc3218ec208c35315e3265a7cb374f
|
f81bd3f2f848c6ad847e9ebc6f2f053ef1711aa8
|
refs/heads/develop
| 2021-01-19T01:13:34.140725
| 2017-05-05T01:03:04
| 2017-05-05T01:03:04
| 16,424,102
| 26
| 17
| null | 2023-09-13T13:22:38
| 2014-01-31T23:39:41
|
Ruby
|
UTF-8
|
R
| false
| false
| 6,729
|
r
|
allLhsSensitivityAnalysis.R
|
#Load list of desired PCA reports and data to be used
require(ggplot2)
require(grid)
setwd("C:/gitRepositories/OpenStudio-analysis-spreadsheet/post_processing/allLhs/sensitivityAnalysis")
wd_base = "."
output_df = read.csv(paste(wd_base,"resources","reporting_outputs.csv",sep="/"))
data_for_analysis = read.csv(paste(wd_base,"resources","data.csv",sep="/"))
#Load metadata dataframe and rip out independent vars
load(paste(wd_base,data_for_analysis[1,"metadata_dataframe"],sep="/"))
variables_df = subset(metadata,perturbable==T)
#Check existence of desired outputs
error_flag = F
for(i in 1:nrow(output_df)){
error_message = ""
if(!(output_df[i,"resultname"] %in% metadata[,"name"])){
error_flag = T
error_message = paste(error_message,"Desired PCA result ",output_df[i,"resultname"]," was not found in the provided data. ",sep="")
}
}
if(error_flag){
stop(error_message)
}
#Fix non-machine readable names in all results dataframes
#Cycle through desired outputs and produce graphs. These will be stored in wd_base/output
if(!file.exists("post_processing_graphs")){
dir.create("post_processing_graphs")
}
if(!file.exists("linear_models")){
dir.create("linear_models")
}
for(i in 1:nrow(data_for_analysis)){
load(paste(wd_base,data_for_analysis[i,"results_dataframe"],sep="/"))
setwd("post_processing_graphs")
results = subset(results,select=c(variables_df[,"name"],levels(output_df[,"resultname"])))
predictor_slope_df = data.frame(row.names = variables_df[,"name"])
predictor_intercept_df = data.frame(row.names = variables_df[,"name"])
for(j in 1:nrow(output_df)){
slope_df = data.frame()
for(k in 1:nrow(variables_df)){
outputName = tolower(substr(toString(data_for_analysis[i,"building_display_name"]),1,1))
outputName = paste(outputName, substr(toString(data_for_analysis[i,"building_display_name"]),2,nchar(toString(data_for_analysis[i,"building_display_name"]))),sep="")
outputName = paste(outputName, toupper(substr(toString(output_df[j,"varname"]),1,1)),sep="")
outputName = paste(outputName, substr(toString(output_df[j,"varname"]),2,nchar(toString(output_df[j,"varname"]))),sep="")
outputName = paste(outputName,"_",k,".png",sep="")
outputName = gsub(" ","",outputName)
plot_df = data.frame()
plot_df[1:nrow(results),toString(output_df[j,"resultname"])] = results[,toString(output_df[j,"resultname"])]
plot_df[1:nrow(results),variables_df[k,"name"]] = results[,variables_df[k,"name"]]
plot_df = plot_df[c(order(plot_df[,variables_df[k,"name"]])),]
x_tick_labels = ""
for(bin in 1:10){
start_row = ceiling(nrow(plot_df)/10*(bin-1))+1
end_row = ceiling(nrow(plot_df)/10*(bin))
plot_df[start_row:end_row,"bin"] = toString(round(mean(plot_df[start_row:end_row,variables_df[k,"name"]]),3))
x_tick_labels = c(x_tick_labels,toString(round(mean(plot_df[start_row:end_row,variables_df[k,"name"]]),3)))
}
x_tick_labels = x_tick_labels[2:length(x_tick_labels)]
png(filename=outputName,height=600,width=1000,pointsize=12)
p=ggplot(plot_df,aes(x=factor(x=plot_df$bin,levels=x_tick_labels,ordered=T),y=plot_df[,toString(output_df[j,"resultname"])]))+
geom_boxplot(outlier.colour = "gray24", outlier.size = 3)+
labs(y=toString(output_df[j,"resultnamed"]),x=variables_df[k,"display_name"],title="Total Variable Effect")+
theme(plot.title=element_text(size=30,face="bold"),
axis.title.x=element_text(size=24,face="bold"),
axis.title.y=element_text(size=24,face="bold",vjust=2),
axis.text.x=element_text(angle=60,hjust=1,color="black",size=14,face="bold"),
axis.text.y=element_text(color="black",size=14,face="bold"),
plot.margin=unit(c(1,1,5,1), "lines"))
print(p)
dev.off()
graphics.off()
lm_var = paste(toString(output_df[j,"resultname"])," ~ ",variables_df[k,"name"],sep="")
l = lm(lm_var, data = plot_df)
slope=coef(l)[2]
intercept = coef(l)[1]
f=summary(l)$fstatistic
p=pf(f[1],f[2],f[3],lower.tail=F)
scale_mult = diff(range(plot_df[,variables_df[k,"name"]]))
slope_df[k,"slope"] = slope*scale_mult
slope_df[k,"int"] = intercept
slope_df[k,"p_value"] = p
slope_df[k,"var"] = variables_df[k,"display_name"]
slope_df[k,"result"] = variables_df[k,"name"]
}
outputName = tolower(substr(toString(data_for_analysis[i,"building_display_name"]),1,1))
outputName = paste(outputName, substr(toString(data_for_analysis[i,"building_display_name"]),2,nchar(toString(data_for_analysis[i,"building_display_name"]))),sep="")
outputName = paste(outputName, toupper(substr(toString(output_df[j,"varname"]),1,1)),sep="")
outputName = paste(outputName, substr(toString(output_df[j,"varname"]),2,nchar(toString(output_df[j,"varname"]))),sep="")
outputName = paste(outputName,"_0",".png",sep="")
slope_df = slope_df[c(order(slope_df$slope,decreasing=TRUE)),]
x_tick_labels = ""
for(k in 1:nrow(slope_df)){
x_tick_labels = c(x_tick_labels,slope_df[k,"var"])
}
x_tick_labels = x_tick_labels[2:length(x_tick_labels)]
png(filename=outputName,height=600,width=1000,pointsize=12)
p=ggplot(slope_df,aes(x=factor(x=slope_df$var,levels=x_tick_labels,ordered=T),y=slope_df$slope))+
geom_bar(aes(fill=slope_df$p_value),stat="identity")+
scale_fill_gradientn(name = "P-Value",limits=c(0,1),breaks=c(0.05,0.25,0.5,0.75,1),
colours=c('#FFFFE5','#FFF7BC','#FEE391','#FEC44F','#FB9A29','#EC7014','#CC4C02','#993404','#662506'),
values=c(0,0.025,0.05,0.075,0.1,0.25,0.5,0.75,1))+
guides(fill = guide_colorbar(barwidth = 1, barheight = 15))+
theme(plot.title=element_text(size=24,face="bold"),
axis.title.x= element_blank(),
axis.title.y=element_text(size=18,face="bold",vjust=2),
axis.text.x=element_text(angle=60,hjust=1,color="black",size=10,face="bold"),
axis.text.y=element_text(color="black",size=14,face="bold"),
legend.title = element_text(size=16, face="bold"),
plot.margin=unit(c(1,1,5,1), "lines"))
print(p)
dev.off()
graphics.off()
for(k in 1:nrow(predictor_slope_df)){
predictor_slope_df[slope_df[k,"result"],toString(output_df[j,"resultname"])] = slope_df[k,"slope"]
predictor_intercept_df[slope_df[k,"result"],toString(output_df[j,"resultname"])] = slope_df[k,"int"]
}
}
setwd("../linear_models")
save(predictor_slope_df,predictor_intercept_df,file=paste(toString(data_for_analysis[i,"building_name"]),"linear_model.RData",sep="_"))
setwd("..")
}
|
c39ebf5b1088688cc39b8c793bf08f9548a9a499
|
9512429e0e5d67a3bb2be291fa6f1d2f592da780
|
/plot2.R
|
06c7b69278258aba58ef99376b98b2cfc8026af6
|
[] |
no_license
|
priyalm/Coursera_EDA_Course_Project_1
|
55875577568d5deb8292967abf0360e4a2b56893
|
28b9baa25b6dc8d5ef311b0008684ca8a3bbb621
|
refs/heads/master
| 2020-06-02T12:22:30.757123
| 2019-06-10T11:34:32
| 2019-06-10T11:34:32
| 191,152,969
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 704
|
r
|
plot2.R
|
library(data.table)
#Reads in data from file then subsets data for specified dates
datafile <- "./household_power_consumption.txt"
data <- read.table(datafile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
# Filter Dates for 2007-02-01 and 2007-02-02
data <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
# Converting into numeric variable
globalActivePower <- as.numeric(data$Global_active_power)
# Change Date Column to Date Type
datetime <- strptime(paste(data$Date, data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
# Plotting
png("plot2.png", width=480, height=480)
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
027ed9034e105f7e6addf4b58f8d69c886d1e450
|
63acf62201a0562e264c45ca67d93f6169c632e5
|
/man/determineLibraryFromGroup.Rd
|
645b82a4a0ccfd55d0ffcc0f55ca7221b55c7a59
|
[] |
no_license
|
yavorska/TenXAnalysisPackage
|
afc4f6eaf28b50583cdc11e4da119001a106d92f
|
3f89111a5940a06eb59e1487b51616bc56a8c322
|
refs/heads/master
| 2021-01-15T19:04:12.640274
| 2017-08-09T14:24:02
| 2017-08-09T14:24:02
| 99,803,911
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 766
|
rd
|
determineLibraryFromGroup.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenericsIndexing.R
\name{determineLibraryFromGroup}
\alias{determineLibraryFromGroup}
\title{Get the library that this group belongs to}
\usage{
determineLibraryFromGroup(queryGroupIndexVector, TenXSubset)
}
\arguments{
\item{queryGroupIndexVector}{A numeric vector giving the group indices we
would like to retrieve cell/sample indices from.}
\item{TenXSubset}{A \code{TenXSubset} object.}
}
\value{
An index vector identifying the cells in this group(s).
}
\description{
Given a group index, determine the library the group comes from. In some
cases groups can be spread over multiple libraries. In this case the group is
assigned to the "majority" library.
}
|
e75cea50fa1693b8b2882d13890b3ce13c804ff1
|
c62fce79b984f308d4a13b98ce90e6dd8e76e849
|
/plot2.R
|
8aff5ccccfa82d149a05ee1e4b8dcda9e9d8302e
|
[] |
no_license
|
punadsmile/ExData-Project2
|
ae946566fa4a58170c6bb94fd9d1b2dc60d46a67
|
e731bea9c34d73f9abbb891e503cb0c9d4038d26
|
refs/heads/master
| 2021-01-20T01:03:32.653194
| 2014-11-23T16:14:16
| 2014-11-23T16:14:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 774
|
r
|
plot2.R
|
#due to network problem, I mannually downloaded the files...and unziped it...and set the wd...
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#2.Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510") from 1999 to 2008? Use the base plotting system to make a plot answering this question.
#Sampling
NEISamp <- NEI[sample(nrow(NEI), size = 1000, replace = F), ]
#select Baltimore City
BC <- subset(NEI, fips == '24510')
#make the plot
png(filename = 'plot2.png')
barplot(tapply(X = BC$Emissions, INDEX = BC$year, FUN = sum), main = 'Total Emission in Baltimore City, Maryland', xlab = 'Year', ylab = expression('PM'[2.5]))
dev.off()
|
a7de1c3215548d9701fd7b64f124e10b744ac0c5
|
c8b609bf58dab1a383bbea8b43a7bc2708adcb38
|
/R/RcppExports.R
|
f81d071421281d0ffac2f3c9702ec50f44de3c0b
|
[] |
no_license
|
holaanna/contactsimulator
|
ce788627c12323c4ab6b3aa902da26bf3e2e4cf5
|
8bcd3f01e0bbe5fb7328d9f6beb27eb907779bdd
|
refs/heads/master
| 2022-03-17T03:25:18.841897
| 2019-11-26T18:33:29
| 2019-11-26T18:33:29
| 111,702,061
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,008
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Functional form of the intensity (beta) as a function of t.
#'
#'\code{func_time_beta} returns the value of the intensity as a function of t.
#'
#' @param t,t_intervention Time of the contact and the introduction of the control respectively espressed in days.
#' @param sum_beta,epsilon Total risk from infected premisses and the primary infection rate respectively.
#' @param omega Introduses the effect of seasonality in the infection rate.
#'
#' @details The particular case of transmission rate used here is given by \deqn{\beta(t)= n(t)\beta + \epsilon)cos^2(\omega t) if t<t_interventioin}
#' where n(t) is the size of potential sources.
#'
#' @return It returns the rate of infection in the non-homogeneous poisson process.
#' @references
#' \insertRef{KR08}{contactsimulator}
#' @examples
#' func_time_beta(20,50,0.08,0.2,0.3)
#' @export
func_time_beta <- function(t, t_intervention, sum_beta, epsilon, omega, beta_1) {
.Call('_contactsimulator_func_time_beta', PACKAGE = 'contactsimulator', t, t_intervention, sum_beta, epsilon, omega, beta_1)
}
#' Simulation of the time of the next event of a non-homogeneous oisson process .
#'
#'\code{simulate_NHPP_next_event} computes the next-event time given the current state at t.
#'
#' @param t_now,t_intervention,t_max current time, the introduction of the control and the final observation time (usually fixed)
#' respectively espressed in days.
#' @param sum_beta,epsilon total risk from infected premisses and the primary infection rate respectively.
#' @param omega, introduses the effect of seasonality in the infection rate.
#'
#' @return It returns the next-event time in the non-homogeneous poisson process.
#'
#' @examples
#'simulate_NHPP_next_event(2,50,0.08,0.2,0.3,300)
#' @export
simulate_NHPP_next_event <- function(t_now, t_intervention, sum_beta, epsilon, omega, b1, t_max) {
.Call('_contactsimulator_simulate_NHPP_next_event', PACKAGE = 'contactsimulator', t_now, t_intervention, sum_beta, epsilon, omega, b1, t_max)
}
#' Generates a set of intersection points between the cirlce and the grid lines.
#'
#'\code{circle_line_intersections} computes the intersections points of a given circle with the grid lines along with
#' the angle formed with the x-axis.
#'
#' @param circle_x,circle_y The the euclidean coordinates of the center of the circle.
#'
#' @param r The radius of the given circle.
#' @param n_line The number of grid lines.
#' @param grid_lines A 6 columns data frame with columns names as coor_x_1, coor_y_1, coor_x_2, coor_y_2, orient_line.
#' \describe{
#' \item{coor_x_1, coor_y_1}{Coordinates of the left end point of the grid line }
#' \item{coor_x_2, coor_y_2}{Coordinates of the right end point of the grid line }
#' \item{orient_line}{Line orientation}
#' \enumerate{
#' \item indicates horizontal orientation
#' \item indicates vetical orientation
#' }
#' \item{k_line}{Line numbering: bottom to top, then left to right}
#' }
#'
#' @return It returns a three columns data frame containing x-coordinate, y-coordanate of the intersection of the circle with the
#' grid, and the value of the angle betweem the x-axis and the line joining the center of the circle to the corresponding
#' intersection point.
#'
#' @examples
#' data(grid_line)
#' attach(grid_line)
#' circle_line_intersections(2022230,-3123109,10000,39,grid_line)
#' detach(grid_line)
#' @export
circle_line_intersections <- function(circle_x, circle_y, r, n_line, grid_lines) {
.Call('_contactsimulator_circle_line_intersections', PACKAGE = 'contactsimulator', circle_x, circle_y, r, n_line, grid_lines)
}
#' Generates a set of segments (with length, density, and absolute angle) corresponding to a set of intersection points.
#'
#'\code{func_arcs_attributes} computes the intersectioins points of a given circle with the grid lines along with
#' the angle formed with the x-axis.
#'
#' @param set_points A data frame of intersection points between the cirlce and the grid lines.
#' @seealso{\code{\link{circle_line_intersections}}}
#' @param pop_grid Population density of the grid a case resides. This is filled from bottom to top, then left to right.
#' @param r The travelling distance of the inoculum.
#' @param x_min,y_min x/y min of the left 2 corners of the box.
#' @param n_row_grid,n_col_grid Number of rors and columns of the grid.
#' @param grid_size Grid resolution
#' //@inheritParams circle_line_intersections
#'
#' @return It returns a five columns data frame containing:
#' \describe{
#' \item{len_arc}{Length of the subtending arc delimited by the intersection points of the circle with center \code{circle_x} and \code{circle_y} with a grid}
#' \item{dens}{Density of the grid where the new infection premisse resides}
#' \item{theta}{Angle (between the source and the intersection points) specifying the direction of the inoculum}
#' }
#'
#' @examples
#' data(bbtv)
#' attach(bbtv)
#' Dat<- bbtv[,c("longitude","latitude","BBTV","inspectiondate","leavesinfected","treatmentdate","location")]
#' Dat1<-subset(Dat,Dat$latitude> -27.4698 & Dat$BBTV%in%c("P&I","P", "NI") & difftime(as.Date(Dat$inspectiondate), as.Date("2010/01/01"), unit="days")>=0) # data up in queensland
#' Dat1$treatmentdate[is.na(Dat1$treatmentdate)]<- Dat1$inspectiondate[is.na(Dat1$treatmentdate)]
#' Dat1$detection<-as.numeric(difftime(as.Date(Dat1$inspectiondate), as.Date("2010/01/01"), unit="days"))
#' Dat1$removal<-as.numeric(difftime(as.Date(Dat1$treatmentdate), as.Date("2010/01/01"), unit="days"))
#' Dat1$removal[which(Dat1$removal<0)]<- Dat1$detection[which(Dat1$removal<0)]
#' Datt<-Dat1[,c("longitude","latitude","BBTV","leavesinfected","detection","removal")]
#'
#' Datt<-Dat1[,c("longitude","latitude","BBTV","leavesinfected","detection","removal","location")]
#'
#' Datt[which(Datt$leavesinfected=="LOTS"),"leavesinfected"]<- 45
#' Datt[which(Datt$leavesinfected=="1,2,4"),"leavesinfected"]<- 2.3
#' Datt[which(Datt$leavesinfected=="'3"),"leavesinfected"]<- 3
#' Datt[which(Datt$leavesinfected=="2 +bunch"),"leavesinfected"]<- 2
#' Datt[which(Datt$leavesinfected=="3 +bunch"),"leavesinfected"]<- 3
#' Datt[which(Datt$leavesinfected=="4+BUNCH"),"leavesinfected"]<- 4
#' Datt[which(Datt$leavesinfected=="avg 3.2"),"leavesinfected"]<- 3.2
#' Datt[which(Datt$leavesinfected=="1-6, avg 3.5"),"leavesinfected"]<- 3.5
#' Datt[which(Datt$leavesinfected=="all"),"leavesinfected"]<- 45
#'
#'
#' leav=sapply(Datt[,"leavesinfected"],function(x){
#' gsub("all/","",x)
#' })
#'
#' leav=sapply(leav,function(x){
#' gsub("/all","",x)
#' })
#'
#' leav[grepl("[+]",leav)]<- 45 # Assuming 45 leaves on a plant
#'
#' Datt$leavesinfected<- leav
#'
#' Datt=Datt[with(Datt,order(Datt$detection)),]
#' # Australian reference system
#' sp::coordinates(Datt) <- c("longitude", "latitude")
#' sp::proj4string(Datt) <- sp::CRS("+init=epsg:4326")
#' australianCRS <- sp::CRS("+init=epsg:3577")
#'
#' pointsinaustraliangrid = sp::spTransform(Datt,australianCRS)
#'
#' # Raster
#' rast <- raster::raster()
#' raster::extent(rast) <- raster::extent(pointsinaustraliangrid) # Set same extent
#'
#' raster::res(rast)=5000 # Set resolution
#'
#' size<- raster::res(rast)
#' # Adding column at the top or bottom of the grid if raster leaves points out
#' dif=(raster::xmax(pointsinaustraliangrid)-raster::xmin(pointsinaustraliangrid))/size
#' cei= ceiling(dif)
#'
#' if(cei!=dif){
#' if(raster::xmax(rast)!=raster::xmax(pointsinaustraliangrid)){
#' raster::xmax(rast)<- raster::xmin(rast) + size*cei
#' }
#' if(xmin(rast)!=xmin(pointsinaustraliangrid)){
#' raster::xmin(rast)<- raster::xmax(rast) - size*cei
#' }
#'
#' }
#'
#' # Adding row at the top or bottom of the grid if raster leaves points out
#'
#' dif1=(raster::ymax(pointsinaustraliangrid)-raster::ymin(pointsinaustraliangrid))/size
#' cei1= ceiling(dif1)
#'
#' if(cei1!=dif1){
#' if(raster::ymax(rast)!=raster::ymax(pointsinaustraliangrid)){
#' raster::ymax(rast)<- raster::ymin(rast) + size*cei1
#' }
#' if(raster::ymin(rast)!=raster::ymin(pointsinaustraliangrid)){
#' raster::ymin(rast)<- raster::ymax(rast) - size*cei1
#' }
#'
#' }
#' # And then ... rasterize it! This creates a grid version
#' # of your points using the cells of rast,
#'
#'
#' rast2 <- raster::rasterize(pointsinaustraliangrid, rast, 1, fun=sum)
#'
#' # Extract infos on the grid
#'
#'
#' n_row_grid=nrow_grid=raster::nrow(rast)
#' n_col_grid=ncol_grid=raster::ncol(rast)
#' grid_size=raster::res(rast)[1] # Resolution
#'
#' n_line=(nrow_grid+1) + (ncol_grid +1) # Number of grid lines
#'
#' x_min=raster::xmin(rast) # min max of the bounding box
#' x_max=raster::xmax(rast)
#'
#' y_min=raster::ymin(rast)
#' y_max=raster::ymax(rast)
#'
#' da=as.data.frame(pointsinaustraliangrid)
#'
#' pop_per_grid=raster::values(rast2)
#' pop_per_grid[is.na(pop_per_grid)]=0
#' mat=matrix(pop_per_grid,nrow = nrow_grid, byrow = TRUE )
#' pop_grid=apply(mat,2,rev) # population per grid
#'
#' # Structure of the grid
#' x=seq(x_min,x_max,grid_size)
#' y=seq(y_min,y_max,grid_size)
#'
#' grid_lines=array(0,c(n_line,6))
#' for(i in 1:n_line){
#' if(i<=(nrow_grid +1)){
#' grid_lines[i,]=c(i,1,x[1],y[i],x[length(x)],y[i])
#' }
#' else{
#' grid_lines[i,]=c(i,2,x[i-length(y)],y[1],x[i-length(y)],y[length(y)])
#' }
#' }
#'
#' grid_lines=as.data.frame(grid_lines)
#' colnames(grid_lines)<- c("indx","orient_line","coor_x_1","coor_y_1","coor_x_2","coor_y_2")
#' circle_x=2022230
#' circle_y=-3123109
#' r=10000
#'
#' set_points=circle_line_intersections(2022230,-3123109,10000,39,grid_lines)
#' func_arcs_attributes(set_points, pop_grid, r, x_min, y_min, grid_size, n_row_grid, n_col_grid);
#' detach(bbtv)
#'
#' @export
func_arcs_attributes <- function(set_points, pop_grid, r, x_min, y_min, grid_size, n_row_grid, n_col_grid) {
.Call('_contactsimulator_func_arcs_attributes', PACKAGE = 'contactsimulator', set_points, pop_grid, r, x_min, y_min, grid_size, n_row_grid, n_col_grid)
}
#' @export
f <- function(x0, E, A) {
.Call('_contactsimulator_f', PACKAGE = 'contactsimulator', x0, E, A)
}
g <- function(x0, E, A, B) {
.Call('_contactsimulator_g', PACKAGE = 'contactsimulator', x0, E, A, B)
}
g1 <- function(x0, I, A, B) {
.Call('_contactsimulator_g1', PACKAGE = 'contactsimulator', x0, I, A, B)
}
#' @export
h <- function(x0, E, a, b) {
.Call('_contactsimulator_h', PACKAGE = 'contactsimulator', x0, E, a, b)
}
#' @export
h1 <- function(x0, I, a, b) {
.Call('_contactsimulator_h1', PACKAGE = 'contactsimulator', x0, I, a, b)
}
#' @export
tau <- function(x0, E, a, b) {
.Call('_contactsimulator_tau', PACKAGE = 'contactsimulator', x0, E, a, b)
}
#' @export
tau1 <- function(x0, I, a, b) {
.Call('_contactsimulator_tau1', PACKAGE = 'contactsimulator', x0, I, a, b)
}
#' @export
f1 <- function(x0, E, a, b, n) {
.Call('_contactsimulator_f1', PACKAGE = 'contactsimulator', x0, E, a, b, n)
}
#' @export
f2 <- function(x0, I, a, b, n) {
.Call('_contactsimulator_f2', PACKAGE = 'contactsimulator', x0, I, a, b, n)
}
#' Sample from the cyclic latent period using Brent method (Inverse tranform).
#'
#'\code{Inv_trans} Generate a random draw from the distribution specified for the latent period.
#'
#' @param r The initial value.
#' @param x_lo A lower bound for the variable.
#' @param x_hi The upper bound of the variable.
#' @param t The current time: the exposure time.
#' @param l A random variable in (0,1)
#'
#' @references
#' \insertRef{DR18}{contactsimulator}
#' @return It returns a random draw from the latent period given the time of exposure for the cyclic model.
#'
#' @examples
#' Inv_trans(10,0,1000,10,runif(1))
#' @export
Inv_trans <- function(r, x_lo, x_hi, t, l, b, a) {
.Call('_contactsimulator_Inv_trans', PACKAGE = 'contactsimulator', r, x_lo, x_hi, t, l, b, a)
}
#' @export
BTFinv1 <- function(E, A, t0) {
.Call('_contactsimulator_BTFinv1', PACKAGE = 'contactsimulator', E, A, t0)
}
#' @export
BTFinv3 <- function(E, a, b, n) {
.Call('_contactsimulator_BTFinv3', PACKAGE = 'contactsimulator', E, a, b, n)
}
#' @export
BTFinv4 <- function(I, a, b, n) {
.Call('_contactsimulator_BTFinv4', PACKAGE = 'contactsimulator', I, a, b, n)
}
#' Sample the period it will take for n leaf to emerge.
#'
#'\code{rBTFinv3} Generate a random draw from the distribution of leaf emergence.
#'
#' @param EI_model A given integer characterising the type pf distribution for the latent period.
#' \enumerate{
#' \item Cyclic distribution using the LER (leaf emergence rate) derived by R Allen \url{http://www.publish.csiro.au/ar/pdf/ar9780535}:
#' \deqn{\frac{dL}{dt}=0.056cos(t-15) + 0.062}
#' \item Gamma distribution with given rate and shape parameters: \code{\link{rgamma}}
#' \item Expoential distribution with given rate: \code{\link{rexp}}, the default.
#'
#' }
#' @param E A vector of time at which the period it will take for n leaf to emerge is computed.
#' @param a The baseline LER in the absence of seasonality.
#' @param b The amplitude of the seasonality.
#' @param n The number of leaf to emerge.
#'
#' @return It returns a random period of time n leaf will emerge given E.
#'
#' @examples
#' rBTFinv3(1,c(0,10,100),0.062,0.85,3)
#' @export
rBTFinv3 <- function(EI_model, E, a, b, n) {
.Call('_contactsimulator_rBTFinv3', PACKAGE = 'contactsimulator', EI_model, E, a, b, n)
}
#' Sample the period it took for n leaf to emerge.
#'
#'\code{rBTFinv4} Generate a random draw from the distribution of leaf emergence knowing the ti.
#'
#' @inheritParams rBTFinv3
#' @param I A vector of time at which the period it took for n leaf to emerge is computed.
#' @return It returns a random period of time n leaf will emerge given E.
#'
#' @examples
#' rBTFinv4(1,c(0,10,100),0.062,0.85,3)
#' @export
rBTFinv4 <- function(EI_model, I, a, b, n) {
.Call('_contactsimulator_rBTFinv4', PACKAGE = 'contactsimulator', EI_model, I, a, b, n)
}
#' Sample the latent period corresponding to each specified model.
#'
#'\code{E_to_I} Geratee a random draw from the distribution specified for the latent period.
#'
#' @param EI_model A given integer characterising the type pf distribution for the latent period.
#' \enumerate{
#' \item Cyclic distribution using the LER (leaf emergence rate) derived by R Allen \url{http://www.publish.csiro.au/ar/pdf/ar9780535}:
#' \deqn{\frac{dL}{dt}=0.056cos(t-15) + 0.062}
#' \item Gamma distribution with given rate and shape parameters: \code{\link{rgamma}}
#' \item Expoential distribution with given rate: \code{\link{rexp}}, the default.
#'
#' }
#' @param E The time at which the contact occurs, or strictly speaking the exposure time.
#' @param mu_lat The mean latent period.
#' @param var_lat The variance of the latent period for the gamma distribution.
#'
#' @references
#' \insertRef{ALL78a}{contactsimulator}
#'
#' \insertRef{ALL78b}{contactsimulator}
#'
#' \insertRef{ALL87}{contactsimulator}
#' @return It returns a random draw from the latent period given the time of exposure.
#'
#' @examples
#' E_to_I(1,-20,30,10)
#' E_to_I(2,-20,30,10)
#' E_to_I(3,-20,30,10)
#' @export
E_to_I <- function(EI_model, E, mu_lat, var_lat) {
.Call('_contactsimulator_E_to_I', PACKAGE = 'contactsimulator', EI_model, E, mu_lat, var_lat)
}
beta_by_age <- function(age, beta_by_age_vector) {
.Call('_contactsimulator_beta_by_age', PACKAGE = 'contactsimulator', age, beta_by_age_vector)
}
#' Sample the distance at which the inoculum will travel.
#'
#'\code{Samp_dis} Generate a random draw from the distribution specified for the latent period.
#'
#' @param kern_model A given integer characterising the type of distribution for the kernel for both short and long range interaction.
#' \enumerate{
#' \item exponential-exponential
#' \item cauchy-cauchy
#' \item exponential-cauchy
#' \item cauchy
#' \item exponential the default
#'
#' }
#' @param ru. The proportion of not infected hosts in the cell containing the osurce.
#' @param alpha1 Dispersal scale parameter for the local spread kernel.
#' @param alpha2 Dispersal scale parameter for the long range interaction.
#'
#' @return It returns a random distance the inoculum will travel to.
#'
#' @examples
#' Samp_dis (1,0.5, 0.2, 0.3)
#' @export
Samp_dis <- function(kern_model, ru, alpha1, alpha2) {
.Call('_contactsimulator_Samp_dis', PACKAGE = 'contactsimulator', kern_model, ru, alpha1, alpha2)
}
#'
#'\code{Sub_set} Count the number of observation during a certain period of time.
#'
#' @param tr. A vector giving time of events (eg removals) that occur during a time period.
#' @param time. A sequence of time a which observations were performed/
#'
#' @return It returns the number of observation recorded at each time.
#'
#' @examples
#'
#' time = seq(1,10)
#' tr = sort(runif(30,0,10))
#' Sub_set(tr, time)
#' @export
Sub_set <- function(tr, time) {
.Call('_contactsimulator_Sub_set', PACKAGE = 'contactsimulator', tr, time)
}
#' @export
distanc <- function(dat, d) {
.Call('_contactsimulator_distanc', PACKAGE = 'contactsimulator', dat, d)
}
#' @export
fu <- function(t1, t2, l) {
.Call('_contactsimulator_fu', PACKAGE = 'contactsimulator', t1, t2, l)
}
#' @export
which2 <- function(x, t) {
.Call('_contactsimulator_which2', PACKAGE = 'contactsimulator', x, t)
}
#' @export
traj <- function(x, times) {
.Call('_contactsimulator_traj', PACKAGE = 'contactsimulator', x, times)
}
#' Sample from the cyclic removal period using Brent method (Inverse tranform).
#'
#'\code{I_to_R} Generate a random draw from the distribution specified for the latent period.
#'
#' @param r The initial value.
#' @param x_lo A lower bound for the variable.
#' @param x_hi The upper bound of the variable.
#' @param t The current time: the infection time.
#' @param l A random variable in (0,1)
#'
#' @references
#' \insertRef{DR18}{contactsimulator}
#' @return It returns a random draw from the removal period given the time of infection for the cyclic model.
#'
#' @examples
#' I_to_R(10,0,1000,10,runif(1),0.062,0.93)
#' @export
I_to_R <- function(r, x_lo, x_hi, t, l, b, a) {
.Call('_contactsimulator_I_to_R', PACKAGE = 'contactsimulator', r, x_lo, x_hi, t, l, b, a)
}
#' Random draw from the cyclic removal period using Brent method (Inverse tranform).
#'
#'\code{r_IR} Generate a random draw from the distribution specified for the latent period.
#'
#' @param n The sample size
#' @param I The time at which host become infectious, or strictly speaking the infection time.
#' @param a The baseline or the average infectious period.
#' @param b The amplitude of the removal period.
#' @return It returns n random draws from the removal period given the time of infection for the cyclic model.
#'
#' @examples
#' r_IR(10,10,0.062,0.93)
#' @export
r_IR <- function(n, I, a, b) {
.Call('_contactsimulator_r_IR', PACKAGE = 'contactsimulator', n, I, a, b)
}
#' @export
SGcycle <- function(t) {
.Call('_contactsimulator_SGcycle', PACKAGE = 'contactsimulator', t)
}
#' @export
func_latent_pdf <- function(ti, te, mu_lat, var_lat, k) {
.Call('_contactsimulator_func_latent_pdf', PACKAGE = 'contactsimulator', ti, te, mu_lat, var_lat, k)
}
#' @export
norma_cons <- function(beta, d) {
.Call('_contactsimulator_norma_cons', PACKAGE = 'contactsimulator', beta, d)
}
#' @export
integral_forc <- function(a, b) {
.Call('_contactsimulator_integral_forc', PACKAGE = 'contactsimulator', a, b)
}
#' Functional form of the intensity of the non-homogenous Poisson process.
#'
#'\code{Sum_Leaf_emergence_rate} returns the value of the intensity as a function of t1 and t2.
#'
#' @param t1,t2 Time of the start and end of observation respectively espressed in days.
#' @param a,b Parameters of the model.
#'
#' @return It returns the intensity in the non-homogeneous poisson process.
#' @references
#' @examples
#' Leaf_emrgence_rate(20,50,0.062,0.056)
#' @export
Sum_Leaf_emergence_rate <- function(t1, t2, a, b) {
.Call('_contactsimulator_Sum_Leaf_emergence_rate', PACKAGE = 'contactsimulator', t1, t2, a, b)
}
#' The log-likelihood of the non-homogenous Poisson process.
#'
#'\code{Log_likelihood} returns the value of the log-likelihood.
#'
#' @param data Data frame with all records.
#' @param a,b Parameters of the model.
#'
#' @return It returns the log likelihood.
#' @references
#' @examples
#' df<- data.frame(t_1=c(10,60,100),t_2=c(20,80,130),obs=c(2,3,4))
#' Leaf_emrgence_rate(df,0.062,0.056)
#' @export
Log_likelihood <- function(data, a, b) {
.Call('_contactsimulator_Log_likelihood', PACKAGE = 'contactsimulator', data, a, b)
}
#' @export
Prior <- function(a, a_p, b, b_p) {
.Call('_contactsimulator_Prior', PACKAGE = 'contactsimulator', a, a_p, b, b_p)
}
#' The mcmc sampling of the non-homogenous Poisson process.
#'
#'\code{mcmc_leaf} returns the sample from the posterior distribution.
#'
#' @param data Data frame with all records.
#' @param a0,b0 Innitial parameters of the model.
#'
#' @return It returns sample from the posterior distribution.
#' @references
#' @examples examples/africa_landscape_example.R
#' @export
mcmc_leaf <- function(data, a0, b0, samp = 1000L) {
.Call('_contactsimulator_mcmc_leaf', PACKAGE = 'contactsimulator', data, a0, b0, samp)
}
# Register entry points for exported C++ functions
methods::setLoadAction(function(ns) {
.Call('_contactsimulator_RcppExport_registerCCallable', PACKAGE = 'contactsimulator')
})
|
c048295de90b1436c590fa6f5b26a29073a4cfe3
|
208c3260ec9874dcaedf8ee2cbfb316d8b78fffe
|
/intersect_towns_cells_lr.R
|
0b475e189099b8a6db582ea1d35f2116e41f17a7
|
[] |
no_license
|
mtrachs/vegetation_township_lr
|
13336873466700c9960e6ccf0ed38588fb0c8ff2
|
86d5011470af3f7889f7b92b56052055e89d5f98
|
refs/heads/master
| 2020-03-22T16:06:59.483944
| 2018-07-09T15:16:31
| 2018-07-09T15:16:31
| 140,303,662
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,766
|
r
|
intersect_towns_cells_lr.R
|
#!/usr/bin/Rscript
# code to intersect Charlie's townships with Simon's 8 km grid
# result is 180x296x(1372+471) array of proportion intersection
# where 180 is y dimension and 296 is x dimension
# open issue: intersection only uses discrete approximation with 100 points
codeDir <- '~/github_changed_files/composition/code/'
dataDir <- '~/github_changed_files/composition/data/'
require(rgdal)
require(raster)
#source("config")
easternDataDir <- "eastern"
ohioDataDir <- "ohio"
####################################################################
# read in shape file info and create raster for PalEON Albers grid
####################################################################
eastern_townships <- readOGR(dsn = "/home/mathias/vegetation_data", layer = "1372polygons_v0.9-1")
#eastern_townships <- readOGR(file.path(dataDir, easternDataDir), paste0(easternVersionID, 'polygons_v', easternVersion))
#ohio_townships <- readOGR(file.path(dataDir, ohioDataDir), paste0('OH', ohioVersionID, 'polygons_v', ohioVersion))
#proj4string(ohio_townships) <- CRS('+init=epsg:4326') # seems to have a lat/lon proj now, so don't need this
#ohio_townships <- spTransform(ohio_townships, CRSobj=CRS('+init=epsg:3175')) # transform to Albers
nTowns <- length(eastern_townships)# + length(ohio_townships)
source(file.path(codeDir, "set_domain.R"))
###########################################################################################################
#added for lower resolution
xRange[1] <- xRange[1]+8000
xRange[2] <- xRange[2]-8000
yRange[1] <- yRange[1]+8000
yRange[2] <- yRange[2]-16000
xRes <- round((xRes-2)/3)
yRes <- round((yRes-3)/3)
###########################################################################################################
rast <- raster(crs = CRS('+init=epsg:3175'),
xmn = xRange[1], xmx = xRange[2],
ymn = yRange[1], ymx = yRange[2],
#ncols = xRes, nrows = yRes)
ncols = xRes, nrows = yRes)
# this raster has rows as y and columns as x and starts with 1,1 in the NW corner, so 180,1 is the SW corner
# NOTE: do not subset as townships@polygons[[index]] as the sorting of this is different than townships[index, ]
####################################################################
# intersect grid with townships ------------------------------------
####################################################################
mini <- min(unique(eastern_townships$ID))
# intersect with eastern townships
for(i in sort(unique((eastern_townships$ID)))){
aa <- rasterize(x = eastern_townships[eastern_townships$ID == i, ], y = rast, getCover = TRUE)
if(i == mini){
poly.stack <- stack((aa))
}
else{
poly.stack <- addLayer(poly.stack, aa)
}
}
# intersect with Ohio townships
# for(i in sort(unique((ohio_townships$ID)))){
# aa <- rasterize(x = ohio_townships[ohio_townships$ID == i, ], y = rast, getCover = TRUE)
# poly.stack <- addLayer(poly.stack, aa)
# }
#interTmp <- as.array(poly.stack) * (64/100) # 100 converts to proportion; 64 has to do with 8x8?
interTmp <- as.array(poly.stack) * (24*24/100)
# check
if(FALSE) {
area <- unlist(sapply(eastern_townships@polygons, function(x) x@area/1000000))
plot(area, apply(interTmp, 3, sum))
}
inter <- array(0, c(xRes, yRes, nTowns))
for(i in 1:nTowns)
inter[ , , i] <- t(interTmp[ , , i]/sum(interTmp[ , , i]))
# inter goes NW to NE and proceeds in lat bands southward. Last cell is the SE corner
# for plotting, I'll need to reverse the columns
#####################################################################################################
easternDomainX <- ((min(easternDomainX)-2)/3):xRes
easternDomainY <- (min(easternDomainY)/3):yRes
#####################################################################################################
nCells <- xRes*yRes
ids <- 1:nCells
usedIds <- c(matrix(ids, xRes, yRes)[easternDomainX, easternDomainY])
inter <- inter[easternDomainX, easternDomainY, ]
#-----------------------------------------------------------------------------------------------------
#x.coords <- xGrid[easternDomainX]
#y.coords <- yGrid[easternDomainY]
###############################################################################################
coords.tot <- coordinates(poly.stack)
x.coords <- sort(unique(coords.tot[,'x']))[easternDomainX] # domain is good!
y.coords <- sort(unique(coords.tot[,'y']))[easternDomainY]
###############################################################################################
save(poly.stack,inter, x.coords, y.coords,nTowns, file = file.path(dataDir, paste0('intersection_eastern_twonships_lr.Rda')))
#-----------------------------------------------------------------------------------------------------
|
e9afe1b637955e9184ce8c32cebb85bbb4e6fe33
|
735d13ef3b0a2f7c640951c3c26944aabaa5908f
|
/man/DIFtree.Rd
|
95f9fafdad3d12c817df4dae3f4dfd10c97f5c2f
|
[] |
no_license
|
cran/DIFtree
|
c6f2582a868e36dc12a514a428a8c4b205bbb929
|
a1dfeb6c9e89a078c543da504698939992578700
|
refs/heads/master
| 2020-12-25T17:13:07.031869
| 2020-06-05T08:30:03
| 2020-06-05T08:30:03
| 34,724,799
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,742
|
rd
|
DIFtree.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DIFtree.R
\name{DIFtree}
\alias{DIFtree}
\alias{print.DIFtree}
\title{Item focussed Trees for the Identification of Items in Differential Item Functioning}
\usage{
DIFtree(Y, X, model = c("Rasch", "Logistic", "PCM"), type = c("udif",
"dif", "nudif"), alpha = 0.05, nperm = 1000, trace = FALSE,
penalize = FALSE, ...)
\method{print}{DIFtree}(x, ...)
}
\arguments{
\item{Y}{Matrix or Data.frame of binary 0/1 or categorical response (rows correspond to persons, columns correspond to items)}
\item{X}{Data.frame of (not scaled) covariates (rows correspond to persons, columns correspond to covariates)}
\item{model}{Type of model to be fitted; can be \code{"Rasch"}, \code{"Logistic"} or \code{"PCM"}.}
\item{type}{Type of DIF to be modelled; one out of \code{"udif"}, \code{"dif"} and \code{"nudif"}.
For \code{"Rasch"} and \code{"PCM"} only uniform DIF can be modelled and therefore \code{type} will be ignored.}
\item{alpha}{Global significance level for the permutation tests}
\item{nperm}{Number of permutations used for the permutation tests}
\item{trace}{If true, information about the estimation progress is printed}
\item{penalize}{If true, a small ridge penalty is added to ensure existence of model parameters; only for \code{"Rasch"}.}
\item{...}{Further arguments passed to or from other methods}
\item{x}{Object of class \code{"DIFtree"}}
}
\value{
Object of class \code{"DIFtree"}.
An object of class \code{"DIFtree"} is a list containing the following components:
\item{splits}{Matrix with detailed information about all executed splits during the estimation process}
\item{coefficients}{List of estimated coefficients for items with and without DIF.
Structure of \code{coefficients} depends on \code{model} and \code{type}.}
\item{pvalues}{P-values of each permutation test during the estimation process}
\item{devs}{Maximal value statistics \eqn{T_j} of the selected variables in each iteration during the estimation process}
\item{crit}{Critical values of each permutation test during the estimation process}
\item{Y}{Response matrix used in the estimation process}
\item{X}{Model matrix used in the estimation process}
\item{persons}{Number of persons}
\item{items}{Number of items}
}
\description{
A function to estimate item focussed trees for simultaneous selection of items and variables
that induce DIF (Differential Item Functioning) in dichotomous or polytomous items. DIF detection can be
based on the Rasch Model (dichotomous case), the Logistic Regression Approach (dichotomous case) or the Partial Credit Model (polytomous case).
The basic method of item focussed recursive partitioning in Rasch Models is described in Tutz and Berger (2015).
}
\details{
The methods require 0/1 coded answers on binary items (\code{"Rasch"} and \code{"Logistic"}) or categorical answers on polytomous items (\code{"PCM"}).
Items with DIF are gradually identified by recursive partitioning.
For \code{"Rasch"} one yields a model with linear predictors
\deqn{eta_{pi}=theta_p-tr_i(x_p),}
where \eqn{theta_p} correspond to the ability and \eqn{x_p} correspond to the covariate vector of person p.
For \code{"Logistic"} one yields a model with linear predictors
\itemize{
\item Uniform DIF, \code{type="udif"}
\deqn{eta_{pi}=S_p beta_i+tr_i(x_p),}
where \eqn{S_p} corresponds to the test score and \eqn{x_p} corresponds to the covariate vector of person p.
\item DIF and Non-Uniform DIF, \code{type="dif", "nudif"}
\deqn{eta_{pi}=tr_i(x_p)+tr_i(S_p,x_p),}
where \eqn{S_p} corresponds to the test score and \eqn{x_p} corresponds to the covariate vector of person p.
}
For \code{"PCM"} one yields a model with linear predictors
\deqn{eta_{pir}=theta_p-tr_{ir}(x_p),}
where \eqn{theta_p} correspond to the ability and \eqn{x_p} correspond to the covariate vector of person p.
Significance of each split is verified by permutation tests. The result of the permutation tests
can strongly depend on the number of permutations \code{nperm}.
In the case of pure terminal nodes estimates of the model do not exist. If \code{penalize=TRUE}
a small ridge penalty is added during estimation to ensure existence of all parameters.
}
\examples{
data(data_sim_Rasch)
data(data_sim_PCM)
Y1 <- data_sim_Rasch[,1]
X1 <- data_sim_Rasch[,-1]
Y2 <- data_sim_PCM[,1]
X2 <- data_sim_PCM[,-1]
\dontrun{
mod1 <- DIFtree(Y=Y1,X=X1,model="Logistic",type="udif",alpha=0.05,nperm=1000,trace=TRUE)
print(mod1)
mod2 <- DIFtree(Y=Y2,X=X2,model="PCM",alpha=0.05,nperm=100,trace=TRUE)
print(mod2)
}
}
\references{
Berger, Moritz and Tutz, Gerhard (2016): Detection of Uniform and Non-Uniform Differential Item Functioning
by Item Focussed Trees, Journal of Educational and Behavioral Statistics 41(6), 559-592.
Bollmann, Stella, Berger, Moritz & Tutz, Gerhard (2018): Item-Focussed Trees for the Detection
of Differential Item Functioning in Partial Credit Models, Educational and Psychological Measurement 78(5), 781-804.
Swaminathan, Hariharan and Rogers, H Jane (1990): Detecting differential item functioning
using logistic regression procedures, Journal of Educational Measurements 27(4), 361-370.
Tutz, Gerhard and Berger, Moritz (2016): Item focussed Trees for the Identification of Items
in Differential Item Functioning, Psychometrika 81(3), 727-750.
}
\seealso{
\code{\link[DIFtree]{plot.DIFtree}}, \code{\link[DIFtree]{predict.DIFtree}}, \code{\link[DIFtree]{summary.DIFtree}}
}
\author{
Moritz Berger <moritz.berger@imbie.uni-bonn.de> \cr \url{http://www.imbie.uni-bonn.de/personen/dr-moritz-berger/}
}
|
b6aeaff2d9d9b6666b74210b5f56b12a53d43e49
|
9e4d92c69037b170b9c91b85eaf7ad0e6f40948c
|
/man/unitizerTests-class.Rd
|
d626cd2aa57c4c1e83a5518e95f5f6a38345912c
|
[] |
no_license
|
loerasg/unitizer
|
b59b8276aba90fa44ba4154bf06fcc69e80a02af
|
af20623b75d627f5e9533ae1c0e3c6e8286031bb
|
refs/heads/master
| 2020-12-31T01:35:28.148583
| 2015-05-14T02:05:43
| 2015-05-14T02:05:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 300
|
rd
|
unitizerTests-class.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/test.R
\docType{class}
\name{unitizerTests-class}
\alias{unitizerTests-class}
\title{Collections of Calls For Testing}
\description{
Should probably add an \code{as.unitizerTests} function...
}
\keyword{internal}
|
14a1ff774ab77ade534aac200c9b34f93e2a59c6
|
b9ef94f1714dbf6bbd7096d742851eb83ad07d9f
|
/R/omixerRand.R
|
acb509732486fe0b53b5ef4b455ac1445add25b5
|
[
"MIT"
] |
permissive
|
lardenoije/Omixer
|
5c5a1a386ddaf81555de39008f731840ffaddaf1
|
b2e12c89b7a2d087d0242229c6cc39011e620aad
|
refs/heads/master
| 2023-02-21T04:55:44.767122
| 2021-01-26T11:02:15
| 2021-01-26T11:02:15
| 332,760,675
| 1
| 0
|
MIT
| 2021-01-26T11:02:16
| 2021-01-25T13:44:17
|
R
|
UTF-8
|
R
| false
| false
| 10,305
|
r
|
omixerRand.R
|
#' Multivariate Randomization
#'
#' As the main function of the Omixer package, this function
#' outputs a randomized sample list that minimizes correlations
#' between biological factors and technical covariates.
#'
#' @param df Sample list
#' @param sampleId String specifying sample ID variable
#' @param block Paired sample identifier
#' @param iterNum Number of layouts to generate
#' @param wells Number of wells on a plate
#' @param div Plate subdivisions
#' @param positional Logical indicator of positional batch effects
#' @param plateNum Number of plates
#' @param layout Custom plate layout as data frame
#' @param mask Wells to be left empty
#' @param techVars Technical covariates
#' @param randVars Randomization variables
#'
#' @return Selected randomized sample list as a data frame
#' @return Randomization environment of optimal list generation
#'
#' @import dplyr
#' @import ggplot2
#' @import magrittr
#' @import tibble
#' @import forcats
#' @import stringr
#' @importFrom readr write_delim write_csv write_csv2
#' @importFrom tidyselect everything all_of
#' @importFrom grid unit
#' @export
#'
#' @examples
#' library(tibble)
#' library(forcats)
#' library(stringr)
#'
#' sampleList <- tibble(sampleId=str_pad(1:48, 4, pad="0"),
#' sex=as_factor(sample(c("m", "f"), 48, replace=TRUE)),
#' age=round(rnorm(48, mean=30, sd=8), 0),
#' smoke=as_factor(sample(c("yes", "ex", "never"), 48, replace=TRUE)),
#' date=sample(seq(as.Date('2008/01/01'), as.Date('2016/01/01'),
#' by="day"), 48))
#'
#' randVars <- c("sex", "age", "smoke", "date")
#'
#' omixerLayout <- omixerRand(sampleList, sampleId="sampleId",
#' block="block", iterNum=10, wells=48, div="row",
#' plateNum=1, randVars=randVars)
omixerRand <- function(df, sampleId="sampleId", block="block", iterNum=1000,
wells, div="none", positional=FALSE, plateNum=1, layout, mask=0, techVars,
randVars) {
## Set up plate layout
if (!missing(layout) & !missing(techVars)) {
layout <- layout; techVars <- techVars
} else if (!missing(layout) & missing(techVars)) {
stop("For custom layouts you must supply technical covariates.")
} else if (missing(layout) & !missing(wells)) {
rowNum <- case_when(wells == 96 ~ 8, wells == 48 ~ 6, wells == 24 ~ 4)
colNum <- case_when(wells == 96 ~ 12, wells == 48 ~ 8, wells == 24 ~ 6)
if (!wells %in% c(96, 48, 24)) {
stop("Automated layouts only support 96, 48, or 24 well plates.")
}
if(!div %in% c("none", "col", "row", "col-block", "row-block")) {
stop("Please specify a valid div (see documentation for options).")
}
if (positional == TRUE && div == "none") {
stop("Positional batches not allowed without plate subdivisions.")
}
well <- NULL
plate <- NULL
layout <- tibble(plate=rep(seq_len(plateNum), each=wells),
well=rep(seq_len(wells), plateNum),
row=factor(rep(seq_len(rowNum), colNum*plateNum),
labels=toupper(letters[seq_len(rowNum)])),
column=rep(rep(seq_len(colNum), each=rowNum), plateNum), mask=mask,
chip=case_when(div == "col" ~ column, div == "row" ~ as.integer(row),
div == "col-block" ~ as.integer(ceiling(column / 2)),
div == "row-block" ~ as.integer(ceiling(as.numeric(row) / 2))),
chipPos = case_when(div == "col" ~ as.numeric(row),
div == "row" ~ as.numeric(column),
div == "col-block" ~ ifelse(column %% 2 == 0,
as.numeric(row) + rowNum, row),
div == "row-block" ~ ifelse(as.numeric(row) %% 2 == 0,
column + colNum, column)))
} else {
stop("You must either specify a custom layout or a number of wells.")
}
## Set up technical covariates
if (plateNum == 1){
if (div == "none"){
stop("With one plate and no subdivisions, there are no batches.")
} else {
if (positional == TRUE) {
techVars <- c("chip", "chipPos")
} else {
techVars <- "chip"
}
}
} else if (plateNum > 1) {
if (div == "none") {
techVars <- "plate"
} else {
if (positional == TRUE) {
techVars <- c("plate", "chip", "chipPos")
} else {
techVars <- c("plate", "chip")
}
}
} else {
stop("Plate number must be a positive integer.")
}
## Define sample ID, blocks, and permutation variables
permVar <- NULL
if(block %in% colnames(df)) {
df <- df %>% select(sampleId=all_of(sampleId), block=all_of(block),
permVar = all_of(block), everything())
} else if(sampleId %in% colnames(df)) {
df <- df %>% select(sampleId=all_of(sampleId),
permVar = all_of(sampleId), everything())
} else {
stop("Sample ID not found in provided sample list.")
}
## Create randomized layouts and save in a list with the seeds
dfRandList <- lapply(seq_len(iterNum), function(x){
## Save seed
if(exists(".Random.seed", .GlobalEnv)) {
seed <- .GlobalEnv$.Random.seed
} else {
seed <- NULL
}
# Create a permutation set based on the .Random.seed saved above
permSet <- sample(unique(df$permVar))
# Shuffle groups
dfShuffle <- df %>% group_by(permVar) %>%
slice(sample(seq_len(n()))) %>% ungroup()
# Create the randomized data frame
dfRand <- lapply(seq_len(length(permSet)), function(y){
dfRand <- dfShuffle %>% filter(permVar == permSet[y])
return(dfRand)
})
dfRand <- list(bind_rows(dfRand), seed)
return(dfRand)
})
## Filter masked wells from plate layout
layoutMasked <- layout %>% filter(mask == 0)
if(nrow(layoutMasked) != nrow(dfRandList[[1]][[1]])) {
stop("Number of unmasked wells must equal number of samples.")
}
## Combine randomized sample lists with plate layout
sampleLayoutList <- lapply(seq_len(length(dfRandList)), function(x) {
sampleLayout <- cbind(dfRandList[[x]][[1]], layoutMasked)
sampleLayout$layoutNum <- x
sampleLayout <- list(sampleLayout, dfRandList[[x]][[2]])
return(sampleLayout)
})
## If randomization variables are not specified, then use all except IDs
if(missing(randVars)){
randVars <- colnames(dfRandList[[1]])[!colnames(dfRandList[[1]]) %in%
c("sampleId", "blockVar")]
}
## Save correlation estimates and p-values
corVal <- NULL
corP <- NULL
corTbList <- lapply(seq_len(length(sampleLayoutList)), function(x){
sampleLayout <- sampleLayoutList[[x]][[1]]
corTbList <- lapply(randVars, function(y){
corTbList <- lapply(techVars, function(z){
cor <- omixerCorr(sampleLayout[, y], sampleLayout[, z])
corTb <- tibble(layoutNum=x, randVars=y, techVars=z,
corVal=cor$corVal, corP=cor$corP)
return(corTb)
})
corTb <-bind_rows(corTbList)
return(corTb)
})
corTb <- bind_rows(corTbList)
return(corTb)
})
## Create correlation table
pTest <- NULL
absSum <- NULL
corSumList <- lapply(seq_len(length(corTbList)), function(x){
corTb <- corTbList[[x]]
corSum <- tibble(layoutNum=corTb$layoutNum,
absSum=sum(abs(corTb$corVal)), pTest=any(corTb$corP < 0.05))
})
corSum <- bind_rows(corSumList)
## Find the optimal layout
if (all(corSum$pTest)) {
warning("All randomized layouts contained unwanted correlations.")
chosenLayout <- NA
} else {
chosenLayout <- (corSum %>% filter(pTest == FALSE) %>% filter(absSum ==
min(absSum)))$layoutNum[1]
}
## Check number of optimized layouts
if(is.na(chosenLayout)) {
warning("Returning best possible layout.")
nonoptLayout <- (corSum %>% filter(absSum == min(absSum)))$layoutNum[1]
}
if(length(chosenLayout) > 1) {
chosenLayout <- chosenLayout[1]
message("Several layouts were equally optmized.")
}
## Save correlations for chosen layout
if(!is.na(chosenLayout)){
corSelect <- corTbList[[chosenLayout]]
omixerLayout <- sampleLayoutList[[chosenLayout]][[1]]
randomSeed <- sampleLayoutList[[chosenLayout]][[2]]
} else {
corSelect <- corTbList[[nonoptLayout]]
omixerLayout <- sampleLayoutList[[nonoptLayout]][[1]]
randomSeed <- sampleLayoutList[[nonoptLayout]][[2]]
}
## Rejoin layout with masked wells
omixerLayout <- full_join(omixerLayout, layout,
by=c("well", "plate", "row", "column", "mask", "chip", "chipPos"))
omixerLayout <- omixerLayout %>% arrange(plate, well)
omixerLayout$permVar <- NULL
omixerLayout$layoutNum <- NULL
## Print information
message("Random seed saved to working directory")
save(randomSeed, file="randomSeed.Rdata")
## Visualize correlations
print(ggplot(corSelect, aes(x=randVars, y=techVars)) +
geom_tile(aes(fill=corVal), size=3, colour="white", show.legend=FALSE) +
geom_text(aes(label=round(corVal, 3)),
colour=ifelse(corSelect$corVal < mean(corSelect$corVal), "white",
"grey30"), fontface="bold", nudge_y=0.2, size=8) +
geom_text(aes(label=paste("p =", round(corP, 3))),
colour=ifelse(corSelect$corVal < mean(corSelect$corVal), "white",
"grey30"), nudge_y=-0.2, size=6) +
scale_fill_distiller(palette="YlGnBu") +
scale_x_discrete(position="top", name="Randomization variables \n",
label=function(x) abbreviate(x, minlength=6), expand=c(0,0)) +
scale_y_discrete(name="Technical \n covariates",
label=function(x) abbreviate(x, minlength=6), expand=c(0,0)) +
ggtitle("Correlations present in the chosen layout") + coord_equal() +
theme(plot.title=element_text(hjust=0.5, size=24),
axis.title=element_text(face="bold", size=18),
axis.ticks=element_blank(),
axis.text.x=element_text(size=16),
axis.text.y=element_text(angle=90, size=16, vjust=1)))
return(omixerLayout)
}
|
0c862b080575e11deea2d4a03d4648c2d3dbc00c
|
bfe324beb0c335272362e7514938a82c08a9cc40
|
/man/mean_precip_by_month.Rd
|
901e462cecf6562493a49747c64276be5f07d99f
|
[] |
no_license
|
jkmiller-wildlife/PrecipPackage
|
79157b7d394835f34a2999bdbaf380c74b09a7d0
|
070035c67dbdb496716c60e5d598b8315b7a4e3a
|
refs/heads/master
| 2020-06-01T12:07:27.458399
| 2019-06-16T03:43:21
| 2019-06-16T03:43:21
| 190,774,172
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,564
|
rd
|
mean_precip_by_month.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mean_precip_by_month.R
\name{mean_precip_by_month}
\alias{mean_precip_by_month}
\title{Mean monthly precipitation}
\usage{
mean_precip_by_month(precip_data, plot = TRUE)
}
\arguments{
\item{precip_array}{The function uses data from the precipitaion array "precip_array".}
\item{water_year:}{From 2002 to 2019. The water year starts on October 1 and ends on September 30.}
\item{Location:}{Paso Robles, San Luis Obispo, and Santa Barbara}
\item{month:}{Three-letter abreviation of precipitation month}
\item{precip:}{Precipitation measured in inches}
}
\value{
Returns either a graph or table that summarizes the mean monthly precipitation for all water years (2002-2019).
}
\description{
This function creates an array from the monthly_precip$precip data frame, water_year, Location, and month. It then calculates the mean precipation by month for all years and produces a graph to visualize the data.
}
\references{
Source: Monthly Observed Precipitation - NWS Cooperative Observers. The following are data from NWS cooperative observers for the current water year and historically back to Water Year 2002. The water year starts on Oct 1 and ends on Sept 30. Precipitation data is provided for each month of the current water year, total precipitation for the water year, the percent of normal for the water year to date, and the percent of the entire water year received to date. \url{https://www.cnrfc.noaa.gov/rainfall_data.php#monthly}
}
\author{
Jamie Miller and Anna Calle
}
|
caef31083430b861b635a9bf4f91361ed484dca8
|
689c02c3b86f202ce2ebe805ea6be6d32a1d415d
|
/Shazam/shazam_list.R
|
2782389159cf3bb24340ccf802db9e7a6aeb0e47
|
[] |
no_license
|
mengranhe/Code-Display
|
963d8c45073b16a2a637da3e193f19c74fcccba2
|
5f11c29206df93cf9a216f4edb23726e2f53b163
|
refs/heads/master
| 2021-01-19T14:01:23.370730
| 2017-05-25T05:19:59
| 2017-05-25T05:19:59
| 88,119,312
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 286
|
r
|
shazam_list.R
|
#!/usr/local/bin/Rscript
source("tasks.R")
suppressMessages(library(argparse))
#create parser object
parser <- ArgumentParser()
parser$add_argument("--songname", default = NULL, type = "character", help = "Name of the song")
args <- parser$parse_args()
list_song_info(args$songname)
|
973ba9cbe420102cff7130f729688a65cbd4b98d
|
d12f5e4d9eb954aaa27d50f93cbeb85ab8a4c066
|
/man/iter.step.Rd
|
f42f19937f91dd03b7810c9e18b366621bad3490
|
[] |
no_license
|
WangTJ/opera
|
db0c463f99f28098a7eae0a1d0f79633f8e23a57
|
84b509bc27fc81103c7388c39829eedd329ec7b7
|
refs/heads/master
| 2022-04-22T00:55:20.711640
| 2020-04-15T00:24:43
| 2020-04-15T00:24:43
| 255,422,317
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 668
|
rd
|
iter.step.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/solve.R
\name{iter.step}
\alias{iter.step}
\title{iterative step to calculate cox model derivatives}
\usage{
iter.step(Z, cov, time, cen, beta, theta, indvec)
}
\arguments{
\item{Z}{a matrix of staging term}
\item{cov}{a matrix of covariates}
\item{time}{a vector of survival outcome}
\item{cen}{a vector of censoring status}
\item{beta}{a vector of staging coefficients}
\item{theta}{a vector of covariates coefficients}
\item{indvec}{an indicator of risk set}
}
\value{
a list of derivatives
}
\description{
calculate the first and second order derivatives for IRLS of Cox model.
}
|
68196d9d0ba9fd561eb5c6784f40cdf819f3ff7b
|
93a88a25389e7923d0a0c65429cc013459d7e0c0
|
/GetData.R
|
a68675d98402374d5ee43d1b90fadfc234c5d24d
|
[] |
no_license
|
Anooshiravan/DataScience_C4_W4_Explore
|
07206e19c6b2a666d56cfa98641799c0893cc797
|
f6ddf0a81ee2744b68caceff93602d6226247182
|
refs/heads/master
| 2021-06-24T09:43:20.435354
| 2017-09-09T22:08:36
| 2017-09-09T22:08:36
| 102,986,301
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 321
|
r
|
GetData.R
|
# Get archive file
archiveFile <- "NEI_data.zip"
if(!file.exists(archiveFile)) {
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(url=url,destfile=archiveFile)
}
if(!(file.exists("summarySCC_PM25.rds") &&
file.exists("Source_Classification_Code.rds"))) { unzip(archiveFile) }
|
436dfc1a8b5c334a38b53c7c24c783c91e74b8d2
|
f24bbf59b7b931cd1febf10ae1a62385dd2ab5b2
|
/test_sims2.R
|
83aa8ad857de005f9006417aab44e6c6361129df
|
[] |
no_license
|
aaamini/hdpslicer
|
067d63a66e72fd32282813769ca9064aeac61461
|
801e419a81cc77886cfc944a3a926b88c0a46171
|
refs/heads/master
| 2021-07-16T10:45:46.304659
| 2020-06-01T05:50:42
| 2020-06-01T05:50:42
| 167,135,207
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,600
|
r
|
test_sims2.R
|
library(tidyverse)
library(trendsegmentR)
library(purrr)
library(intervals)
# library(clue)
library(mclust) # for adjusted Rand index
library(extraDistr)
# library(stm)
library(lpSolve)
library(hdp) # Nicola Robert's hdp package
source("net_common.R")
source('hdp_module.R')
source('hdp_inference_simple.R')
source('docgen.R')
source("helpers.R")
source("blei_module.R")
# reticulate::source_python("dai/dai_hdp.py") # this might cause problem if run twice.
gam_mean = 1
gam_var = 1
nicrob_wrapper = function(curr_y, max_iter) {
y_table <- create_docword_freq(curr_y)
# require(hdp)
hdp_chain = hdp_quick_init(y_table, alphaa=gam_mean^2/gam_var, alphab=gam_mean/gam_var)
hdp_post = hdp_posterior(hdp_chain, burnin=1, n=max_iter, space=1, verbosity=0)
zh = lapply(hdp_post@clust_dp_counts, function(y) label_mat2vec(y[-1,])) # label_mat2vec() is in net_common.R
return(zh)
}
slice_wrapper = function(curr_y, max_iter) {
zh <- hdp_slice_samplerC(curr_y, beta0=0.5, gam0=1, ITRmax=max_iter, Kcap=20, Tcap=20)
temp <- lapply(1:max_iter, function(itr) compute_doc_labels(zh[[itr]]) + 1)
return( temp )
}
sm_wrapper = function(curr_y, max_iter) {
blei_splitmerge_wrapper(curr_y, max_iter, remove_dir = T)
}
nosm_wrapper = function(curr_y, max_iter) {
blei_splitmerge_wrapper(curr_y, max_iter, split_merge = F, remove_dir = T)
}
# j = 2
# table(zh[[20]][j])
# which_most_frequent(zh[[20]][j])
# temp[[20]][j]
# temp
# compute_doc_labels(zh[[3]])+1
# seed <- .Random.seed
set.seed(1234)
J <- 50;
W <- 15;
n <- 100;
# data generation
# out <- sample_hdp(n=rep(n,J), J=J, gam0=1, beta0=3, categorical = T, W=W)
# curr_y <- out$y
# curr_z <- out$z
out = gen_doc_data(J=J, K=3, n=n, W=W)
curr_y = out$corpus1
curr_z = out$word_label
y_table <- create_docword_freq(curr_y) # do.call(rbind, lapply(1:J, function(j) tabulate(curr_y[[j]], nbins = W)))
doc_labels <- compute_doc_labels(curr_z)
# tabulate(doc_labels)
# compute_aggregate_nmi(doc_labels, tru_doc_label)
# perf_meas = "cRand"
arand = function(z1,z2) mclust::adjustedRandIndex(unlist(z1), unlist(z2))
nmi = function(z1,z2) compute_mutual_info(unlist(z1), unlist(z2))
nrep = 5
methods = list(slice = slice_wrapper, nicrob = nicrob_wrapper, sm=sm_wrapper, nosm = nosm_wrapper)
method_names = names(methods)
nmethods = length(methods)
ITRmax <- 50L
result = NULL
for (r in 1:nrep) {
for (m in 1:nmethods) {
id = (r-1)*nmethods + m
print(id)
method = methods[[m]]
zh <- method(curr_y, ITRmax)
result[[id]] <- tibble(arand = sapply(1:ITRmax, function(itr) arand(zh[[itr]], doc_labels)),
nmi = sapply(1:ITRmax, function(itr) nmi(zh[[itr]], doc_labels)),
itr = 1:ITRmax,
method = method_names[m], rep=r)
}
# zh <- hdp_slice_samplerC(curr_y, beta0=0.5, gam0=1, ITRmax=ITRmax, Kcap=20, Tcap=20)
#
# result[[r]] <- tibble(perf=temp, itr=1:ITRmax, method="slice", rep=r)
#
# zh <- apply_nicrob_hdp(y_table, ITRmax=ITRmax, burnin = 1, alphaa=gam_mean^2/gam_var, alphab= gam_mean/gam_var)
# temp <- sapply(1:ITRmax, function(itr) perf_meas(zh[[itr]], doc_labels))
# result[[r]] <- result[[r]] %>% add_row(tibble(perf=temp, itr=1:ITRmax, method="NicRob", rep=r))
#
# # dtof = dai_warpper(curr_y, ITRmax) # dtof : doc topic f
# # zh = lapply(dtof, function(freq_table) max.col(freq_table))
# # temp <- sapply(1:ITRmax, function(itr) perf_meas(zh[[itr]], doc_labels))
# # result[[r]] <- result[[r]] %>% add_row(tibble(perf=temp, itr=1:ITRmax, method="DAI", rep=r))
# zh = blei_splitmerge_wrapper(curr_y, max_iter=ITRmax, remove_dir = T)
# temp <- sapply(1:ITRmax, function(itr) perf_meas(zh[[itr]], doc_labels))
# result[[r]] <- result[[r]] %>% add_row(tibble(perf=temp, itr=1:ITRmax, method="Blei (sp-mr)", rep=r))
}
res = bind_rows(result)
saveRDS(res, "test2.rds")
mean_res = res %>% group_by(itr,method) %>% summarise(arand = mean(arand), nmi = mean(nmi))
ggplot(mean_res, aes(x=itr, y=arand, color=method)) + geom_point() +
theme_bw() +
xlab("iteration") + ylab("Adjusted Rand") +
theme(legend.background=element_blank(),
legend.title=element_blank(),
legend.position = c(0.8, 0.3),
legend.text = element_text(size=10),
text = element_text(size=12)) +
guides(fill=guide_legend(keywidth=0.25,keyheight=0.25,default.unit="inch"))
# ggsave("test.png",width=4,height=5)
ari = mean_res %>% dplyr::filter(method =="slice") %>% pull(mean_perf)
detect_change(ari)
# ggplot(res, aes(x=factor(itr), y=perf, color=method)) + geom_boxplot()
|
1bd28cdef8d65d8975cbf07d051be2d17f529373
|
ee8733c46c91949478b44143e4977ca0ca857968
|
/man/logic.bagging.Rd
|
a8d2b6d156d73690185a4a60e8005d24ab4dc779
|
[] |
no_license
|
holgerschw/logicFS
|
0a7919ef1012814b83a114dbc485e8be3d21e7ae
|
ed8b0be37da919754b39e1a46e793e253b06ddaf
|
refs/heads/master
| 2021-06-06T01:10:52.594711
| 2020-04-12T21:34:42
| 2020-04-12T21:34:42
| 148,649,928
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,078
|
rd
|
logic.bagging.Rd
|
\name{logic.bagging}
\alias{logic.bagging}
\alias{logic.bagging.default}
\alias{logic.bagging.formula}
\alias{print.logicBagg}
\title{Bagged Logic Regression}
\description{
A bagging and subsampling version of logic regression. Currently available for the
classification, the linear regression, and the logistic regression approach
of \code{logreg}. Additionally, an approach based on multinomial logistic regressions as
implemented in \code{mlogreg} can be used if the response is categorical.
}
\usage{
\method{logic.bagging}{default}(x, y, B = 100, useN = TRUE, ntrees = 1, nleaves = 8,
glm.if.1tree = FALSE, replace = TRUE, sub.frac = 0.632,
anneal.control = logreg.anneal.control(), oob = TRUE,
onlyRemove = FALSE, prob.case = 0.5, importance = TRUE,
score = c("DPO", "Conc", "Brier", "PL"), addMatImp = FALSE, fast = FALSE,
neighbor = NULL, adjusted = FALSE, ensemble = FALSE, rand = NULL, ...)
\method{logic.bagging}{formula}(formula, data, recdom = TRUE, ...)
}
\arguments{
\item{x}{a matrix consisting of 0's and 1's. Each column must correspond
to a binary variable and each row to an observation. Missing values are not allowed.}
\item{y}{a numeric vector, a factor, or a vector of class \code{Surv} specifying the values of a response for all the observations
represented in \code{x}, where no missing values are allowed in \code{y}.
If a numeric vector, then \code{y} either contains
the class labels (coded by 0 and 1) or the values of a continuous response depending
on whether the classification or logistic regression approach of logic
regression, or the linear regression approach, respectively, should be used. If the response
is categorical, then \code{y} must be a factor naming the class labels of the observations. If the response is a (right-censored survival time), then \code{y} must be vector of class \code{Surv} (generated, e.g., with the function \code{Surv} from the \code{R} package \code{survival}.}
\item{B}{an integer specifying the number of iterations.}
\item{useN}{logical specifying if the number of correctly classified out-of-bag observations should
be used in the computation of the importance measure. If \code{FALSE}, the proportion of
correctly classified oob observations is used instead. Ignored if \code{importance = FALSE}.
Also ignored in the survival case.}
\item{ntrees}{an integer indicating how many trees should be used.
For a binary response: If \code{ntrees}
is larger than 1, the logistic regression approach of logic regreesion
will be used. If \code{ntrees} is 1, then by default the classification
approach of logic regression will be used (see \code{glm.if.1tree}.)
For a continuous response: A linear regression model with \code{ntrees} trees
is fitted in each of the \code{B} iterations.
For a categorical response: \eqn{n.lev-1} logic regression models with \code{ntrees} trees
are fitted, where \eqn{n.lev} is the number of levels of the response (for details, see
\code{\link{mlogreg}}).
For a response of class \code{Surv}: A Cox proportional hazards regression model
with \code{ntrees} trees is fitted in each of the \code{B} iterations.}
\item{nleaves}{a numeric value specifying the maximum number of leaves used
in all trees combined. See the help page of the function \code{logreg} of
the package \code{LogicReg} for details.}
\item{glm.if.1tree}{if \code{ntrees} is 1 and \code{glm.if.1tree} is \code{TRUE}
the logistic regression approach of logic regression is used instead of
the classification approach. Ignored if \code{ntrees} is not 1 or the response is not binary.}
\item{replace}{should sampling of the cases be done with replacement? If
\code{TRUE}, a bootstrap sample of size \code{length(cl)} is drawn
from the \code{length(cl)} observations in each of the \code{B} iterations. If
\code{FALSE}, \code{ceiling(sub.frac * length(cl))} of the observations
are drawn without replacement in each iteration.}
\item{sub.frac}{a proportion specifying the fraction of the observations that
are used in each iteration to build a classification rule if \code{replace = FALSE}.
Ignored if \code{replace = TRUE}.}
\item{anneal.control}{a list containing the parameters for simulated annealing.
See the help page of \code{logreg.anneal.control} in the \code{LogicReg} package.}
\item{oob}{should the out-of-bag error rate (classification and logistic regression)
or the out-of-bag root mean square prediction error (linear regression), respectively, be computed?}
\item{onlyRemove}{should in the single tree case the multiple tree measure be used? If \code{TRUE},
the prime implicants are only removed from the trees when determining the importance in the
single tree case. If \code{FALSE}, the original single tree measure is computed for each prime
implicant, i.e.\ a prime implicant is not only removed from the trees in which it is contained,
but also added to the trees that do not contain this interaction. Ignored in all other than the
classification case.}
\item{prob.case}{a numeric value between 0 and 1. If the outcome of the
logistic regression, i.e.\ the class probability, for an observation is
larger than \code{prob.case}, this observations will be classified as case
(or 1).}
\item{importance}{should the measure of importance be computed?}
\item{score}{a character string naming the score that should be used in the computation of the importance measure for a survival time analysis. By default, the distance between predicted outcomes (\code{score = "DPO"}) proposed by Tietz et al.\ (2018) is used in the determination of the importance of the variables. Alternatively, Harrell's C-Index (\code{"Conc"}), the Brier score (\code{"Brier"}), or the predictive partial log-likelihood (\code{"PL"}) can be used.}
\item{addMatImp}{should the matrix containing the improvements due to the prime implicants
in each of the iterations be added to the output? (For each of the prime implicants,
the importance is computed by the average over the \code{B} improvements.) Must be
set to \code{TRUE}, if standardized importances should be computed using
\code{\link{vim.norm}}, or if permutation based importances should be computed
using \code{\link{vim.signperm}}. If \code{ensemble = TRUE} and \code{addMatImp = TRUE} in the survival case,
the respective score of the full model is added to the output instead of an improvement matrix.}
\item{fast}{should a greedy search (as implemented in \code{logreg}) be used instead of simulated
annealing?}
\item{neighbor}{a list consisting of character vectors specifying SNPs that are in LD. If specified, all SNPs need to occur exactly one time in this list. If specified, the importance measures are adjusted for LD by considering the SNPs within a LD block as exchangable.}
\item{adjusted}{logical specifying whether the measures should be adjusted for noise. Often, the interaction actually associated with the response is not exactly found in some iterations of logic bagging, but an interaction is identified that additionally contains one (or seldomly more) noise SNPs. If \code{adjusted} is set to \code{TRUE}, the values of the importance measure is corrected for this behaviour.}
\item{ensemble}{in the case of a survival outcome, should \code{ensemble} importance measures (as, e.g., in \code{randomSurvivalSRC} be used? If \code{FALSE}, importance measures analogous to the ones in the logicFS analysis of other outcomes are used (see Tietz et al., 2018).}
\item{rand}{numeric value. If specified, the random number generator will be
set into a reproducible state.}
\item{formula}{an object of class \code{formula} describing the model that should be
fitted.}
\item{data}{a data frame containing the variables in the model. Each row of \code{data}
must correspond to an observation, and each column to a binary variable (coded by 0 and 1)
or a factor (for details, see \code{recdom}) except for the column comprising
the response, where no missing values are allowed in \code{data}. The response must be either binary (coded by
0 and 1), categorical, continuous, or a right-censored survival time. If a survival time, i.e. an object of class \code{Surv}, a Cox propotional hazard model is fitted in each of the \code{B} iterations of \code{logicFS}. If continuous, a linear model is fitted in each iterations. If categorical, the column of \code{data} specifying the response must
be a factor. In this case, multinomial logic regressions are performed as implemented in \code{\link{mlogreg}}.
Otherwise, depending on \code{ntrees} (and \code{glm.if.1tree})
the classification or the logistic regression approach of logic regression is used.}
\item{recdom}{a logical value or vector of length \code{ncol(data)} comprising whether a SNP should
be transformed into two binary dummy variables coding for a recessive and a dominant effect.
If \code{recdom} is \code{TRUE} (and a logical value), then all factors/variables with three levels will be coded by two dummy
variables as described in \code{\link{make.snp.dummy}}. Each level of each of the other factors
(also factors specifying a SNP that shows only two genotypes) is coded by one indicator variable.
If \code{recdom} is\code{FALSE} (and a logical value),
each level of each factor is coded by an indicator variable. If \code{recdom} is a logical vector,
all factors corresponding to an entry in \code{recdom} that is \code{TRUE} are assumed to be SNPs
and transformed into two binary variables as described above. All variables corresponding
to entries of \code{recdom} that are \code{TRUE} (no matter whether \code{recdom} is a vector or a value)
must be coded either by the integers 1 (coding for the homozygous reference genotype), 2 (heterozygous),
and 3 (homozygous variant), or alternatively by the number of minor alleles, i.e. 0, 1, and 2, where
no mixing of the two coding schemes is allowed. Thus, it is not allowed that some SNPs are coded by
1, 2, and 3, and others are coded by 0, 1, and 2.}
\item{...}{for the \code{formula} method, optional parameters to be passed to the low level function
\code{logic.bagging.default}. Otherwise, ignored.}
}
\value{
\code{logic.bagging} returns an object of class \code{logicBagg} containing
\item{logreg.model}{a list containing the \code{B} logic regression models,}
\item{inbagg}{a list specifying the \code{B} Bootstrap samples,}
\item{vim}{an object of class \code{logicFS} (if \code{importance = TRUE}),}
\item{oob.error}{the out-of-bag error (if \code{oob = TRUE}),}
\item{...}{further parameters of the logic regression.}
}
\references{
Ruczinski, I., Kooperberg, C., LeBlanc M.L. (2003). Logic Regression.
\emph{Journal of Computational and Graphical Statistics}, 12, 475-511.
Schwender, H., Ickstadt, K. (2007). Identification of SNP Interactions
Using Logic Regression. \emph{Biostatistics}, 9(1), 187-198.
Tietz, T., Selinski, S., Golka, K., Hengstler, J.G., Gripp, S., Ickstadt, K.,
Ruczinski, I., Schwender, H. (2018). Identification of Interactions of
Binary Variables Associated with Survival Time Using survivalFS. Submitted.
}
\author{Holger Schwender, \email{holger.schwender@hhu.de}; Tobias Tietz, \email{tobias.tietz@hhu.de}}
\seealso{
\code{\link{predict.logicBagg}}, \code{\link{plot.logicBagg}},
\code{\link{logicFS}}
}
\examples{\dontrun{
# Load data.
data(data.logicfs)
# For logic regression and hence logic.bagging, the variables must
# be binary. data.logicfs, however, contains categorical data
# with realizations 1, 2 and 3. Such data can be transformed
# into binary data by
bin.snps<-make.snp.dummy(data.logicfs)
# To speed up the search for the best logic regression models
# only a small number of iterations is used in simulated annealing.
my.anneal<-logreg.anneal.control(start=2,end=-2,iter=10000)
# Bagged logic regression is then performed by
bagg.out<-logic.bagging(bin.snps,cl.logicfs,B=20,nleaves=10,
rand=123,anneal.control=my.anneal)
# The output of logic.bagging can be printed
bagg.out
# By default, also the importances of the interactions are
# computed
bagg.out$vim
# and can be plotted.
plot(bagg.out)
# The original variable names are displayed in
plot(bagg.out,coded=FALSE)
# New observations (here we assume that these observations are
# in data.logicfs) are assigned to one of the classes by
predict(bagg.out,data.logicfs)
}}
\keyword{tree}
\keyword{regression}
|
87f3cca2fef5b5b71f9ddd46ed6d0a25a75ec4be
|
188817727780824ca9f51e0fc09caf9e06f526f9
|
/lab1/R/lab1_10.R
|
4c276116862a1412a29b061e6eeb6c0dab9d1f6b
|
[] |
no_license
|
beartwigsam/TDAB01
|
2efbb99908595f1b753b98774c661d2ccfb1e420
|
6a14738f42df5c7da47a9db7fd01873dc20bfb1e
|
refs/heads/master
| 2020-03-28T23:07:25.669596
| 2018-10-11T10:01:14
| 2018-10-11T10:01:14
| 149,277,251
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,552
|
r
|
lab1_10.R
|
n = 1000
lambda = 5
rate = 1
size = 10
p = 0.01
X = rpois(n, lambda)
Y = rexp(n, rate)
Z = rbinom(n, size, p)
hist(X, col="blue", ylim = c(0, 1000))
hist(Y, col="red", add=TRUE)
hist(Z, col="green", add=TRUE)
# Makes a given number of pulls from Z, where every pull is
# a mean value of 10 samples from Z.
# These pulls are then returned as a vector.
meanOf10 <- function(pulls, Z) {
v = vector(length=1000)
m = 0
for(i in 1:1000) {
m = 0
for(j in 1:pulls) {
m = m + sample(Z, 1)
}
m = m/pulls
v[i] = m
}
return(v)
}
Xhat = meanOf10(10, X)
Yhat = meanOf10(10, Y)
hist(Xhat, xlim = c(0, 8), ylim = c(0, 250), col="blue", main = paste("X (blue) and Y (red)"))
hist(Yhat, add=TRUE, col="red")
X1 = meanOf10(30, X)
X2 = meanOf10(100, X)
X3 = meanOf10(1000, X)
hist(X1, col="blue", ylim = c(0, 250), breaks = 50, main = paste("30 (blue), 100 (cyan) and 1000 (green) pulls from X"))
hist(X2, col="cyan",add = TRUE, breaks = 50)
hist(X3, col="green",add = TRUE, breaks = 10)
Y1 = meanOf10(30, Y)
Y2 = meanOf10(100, Y)
Y3 = meanOf10(1000, Y)
hist(Y1, col="blue", ylim = c(0, 250), breaks = 50, main = paste("30 (blue), 100 (cyan) and 1000 (green) pulls from Y"))
hist(Y2, col="cyan",add = TRUE, breaks = 50)
hist(Y3, col="green",add = TRUE, breaks = 10)
Z1 = meanOf10(30, Z)
Z2 = meanOf10(100, Z)
Z3 = meanOf10(1000, Z)
hist(Z1, col="blue", ylim = c(0, 250), main = paste("30 (blue), 100 (cyan) and 1000 (green) pulls from Z"))
hist(Z2, col="cyan",add = TRUE)
hist(Z3, col="green",add = TRUE)
|
0fab5b90d7501bd4dc7dedffa1753c3fc2ab9c74
|
8450cd8d46322e46964eaeabb27e299d5d417ca6
|
/inst/R_old/plotConc.R
|
0eaaef091f7c79c16355281246238f5cd2a44bb0
|
[] |
no_license
|
WillFox/TBsim
|
0251b444b8247796ed11d56283344b88e4329099
|
d304c5957dd1199e2ad08ba00fe054b8c6e30366
|
refs/heads/master
| 2020-07-07T04:07:23.609484
| 2018-07-05T21:52:44
| 2018-07-05T21:52:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,928
|
r
|
plotConc.R
|
#===========================================================================
# plotConc.R
# PK concentration profiles
#
# John Fors, UCSF
# Oct 6, 2014
#===========================================================================
plotConc <- function(){
# read header file
list <- readHeaderFile(folder)
timeStamp <- list[[1]]
nTime <- list[[2]]
nSteps <- list[[3]]
drugStart <- list[[4]]
nPatients <- list[[5]]
isResistance <- list[[6]]
isImmuneKill <- list[[7]]
isGranuloma <- list[[8]]
isPersistance <- list[[9]]
diseaseText <- list[[10]]
nDrugs <- list[[11]]
doseText <- list[[12]]
outcomeIter <- list[[13]]
drug <- list[[14]]
# read concentration data file
output <- readFile(folder, "calcConc.txt", "calcConc")
times <- output[[1]]
drugs <- output[[2]]
compartments <- output[[3]]
concs <- output[[4]]
# build data frame
df <- data.frame(times, drugs, compartments, concs)
df <- na.omit(df)
colnames(df) <- c("Hour", "Drug", "Compartment", "Concentration")
# apply actual drug codes
df$Drug <- drug[df$Drug]
# apply compartment labels
compNames <- c("Extracellular", "Intracellular", "Extracell Granuloma", "Intracell Granuloma")
df$Compartment <- compNames[df$Compartment]
df$Compartment <- factor(df$Compartment,
levels = c("Extracellular", "Intracellular", "Extracell Granuloma", "Intracell Granuloma"))
# generate plot
dev.new()
bp <- ggplot(data=df, aes(x=Hour, y=Concentration)) +
geom_line(size=0.5, colour="red") +
ggtitle("PK Concentration Profile per Drug") +
theme(plot.title = element_text(size=16, face="bold", vjust=2)) +
scale_color_brewer(palette="Dark2") +
theme(legend.title=element_blank()) +
ylab("Concentration [mg/L]") +
xlab("Time after drug treatment start [Hours]") +
facet_grid(Drug ~ Compartment, scales="free_y")
print(bp)
}
|
c620b7b4cc4cc78c9356b698c43437a131885381
|
5e2ffabcbde9f82c243abaa9ddb0e6bcb7ceaf9b
|
/summaryStats.R
|
cc241aaf298e51e5dabbf205ac830f9376c02a2d
|
[] |
no_license
|
eidanjacob/dataplus
|
56f2aad21310db70a79a5f2bf6fefe2a5d3b121f
|
006aa5169157a221a529f317db09b9e5bf77f8c5
|
refs/heads/master
| 2021-09-26T06:23:33.073721
| 2018-09-12T22:36:27
| 2018-09-12T22:36:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,144
|
r
|
summaryStats.R
|
# Script that calculates summary/interesting stats.
# Stats calculated:
# Avg time taken to get to certain places
# Most common paths
library(dplyr)
library(lubridate)
# # Mess with these numbers if you want.
# timeSteps = c("1hr" = 60*60, "2hr" = 2*60*60, "4hr" = 4*60*60) # in seconds
# # timeSteps = c("4 hr" = 4*60*60)
# delay = 2700 # in milliseconds
# # ------------------------------
#
# start.time = (min(df$`_time`))
# end.time = (max(df$`_time`))
#
# macsToLocList <- list()
#
# end.times <- rep(end.time, length(timeSteps))
#
# for(i in 1){ # didn't feel like replacing i -> 1
# timeStep <- timeSteps[i]
# # Bin populations, calculate densities at each timestep, and cache for future plotting
# time.windowStart = start.time # time.window for selection
# macsToLoc <- NULL
#
# while(end.time > time.windowStart){
#
# # Filter for time interval
# selInt = interval(time.windowStart, time.windowStart + timeStep)
# thisStep <- df %>%
# filter(`_time` %within% selInt)
#
# # For each macaddr, keep track of where it currently is
# macs <- data.frame("macaddr" = thisStep$macaddr,
# "location" = thisStep$location.y,
# "long" = thisStep$long,
# "lat" = thisStep$lat,
# "time.window" = c(time.windowStart),
# "realTime" = c(thisStep$`_time`))
# macs <- macs[order(macs$realTime), ]
# macsToLoc <- rbind(macsToLoc, macs)
#
# end.times[i] <- time.windowStart
# time.windowStart = time.windowStart + timeStep
# }
#
# # Cache these guys away for later
# macsToLocList[[i]] <- macsToLoc
# }
#
# # mac data -- to be used for point plotting
# macData <- macsToLocList[[1]]
source("functions.R") # import findIndex function
# Calculating avg time to get to certain places
#############
from <- "Perkins"
to <- "WestUnion"
uniqMacs <- macData %>% # initial filtering
filter(location %in% c(from, to))
uniqMacs <- unique(uniqMacs$macaddr)
n <- 1
breakNum <- 500 # how many macaddrs to search through
times <- vector(mode = mode(macData$realTime[[1]]), length = breakNum)
macDF <- sapply(sample(uniqMacs,breakNum), FUN = findIndex, macdf=macData, fromLoc=from, toLoc=to, betweenInte=60 * 10, distInte=1)
macDFNonNull <- macDF[-which(sapply(macDF, is.null))] # getting rid of null values
times <- sapply(macDFNonNull, FUN = function(x) { # calculating the travel times
macLocs <- x$orig[x$indexFrom:x$indexTo, ]
diff <- difftime(macLocs$startTimeReal[[length(macLocs$startTimeReal)]], macLocs$endTimeReal[[1]])
return(as.numeric(diff, units = "secs"))
})
times2 <- sapply(macDFNonNull, FUN = function(x) { # calculating the number of locations visited
diff <- x$orig[x$indexFrom:x$indexTo, ]
return(diff$id[[length(diff$startTimeReal)]] - diff$id[[1]])
})
cat("Calculated using a sample size of", length(times$t), "macaddrs.", "\n")
cat("Average time from", from, "to", to, "is", mean(times$t), "seconds.", "\n")
cat("Average number of buildings visited", mean(times$n), "\n")
# it seems to take between 3-5 minutes from Perk to WU
#############
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.