content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
source("scripts/checkplot_initials.R") source("scripts/obscp_inf.R") reps<-5e2 Bnum<-2e3 nc<-36 #scaling this back to work on amarel... suspect memory issues plan(strategy=multisession, workers=nc) map(round(10^seq(2, 3.5, 0.25)), function(size){ map(c(-1,0,1), function(ell){ map(1:100, function(tryme){ start<-Sys.time() nd<-trycheckingobs(flatten(flatten(SADs_list))[[21]], size, ell) write.csv(nd, file=paste("data/new_trycheckingobs_SAD_21", "iter_", tryme, "size", size, ".csv", sep=""), row.names=F) rm(nd) print(Sys.time()-start) }) }) })
/scripts/checkplots_for_parallel_amarel/obs_lomemSAD21.R
no_license
dushoff/diversity_metrics
R
false
false
556
r
source("scripts/checkplot_initials.R") source("scripts/obscp_inf.R") reps<-5e2 Bnum<-2e3 nc<-36 #scaling this back to work on amarel... suspect memory issues plan(strategy=multisession, workers=nc) map(round(10^seq(2, 3.5, 0.25)), function(size){ map(c(-1,0,1), function(ell){ map(1:100, function(tryme){ start<-Sys.time() nd<-trycheckingobs(flatten(flatten(SADs_list))[[21]], size, ell) write.csv(nd, file=paste("data/new_trycheckingobs_SAD_21", "iter_", tryme, "size", size, ".csv", sep=""), row.names=F) rm(nd) print(Sys.time()-start) }) }) })
library(xlsx) ##### CRIANDO FUNÇÃO PARA TIRAR NA ##### completeFun <- function(data, desiredCols) { completeVec <- complete.cases(data[, desiredCols]) return(data[completeVec, ]) } setwd ("C:/Users/livia/Desktop/ISSUES") ### BAIXANDO OS BANCOS E FAZENDO ALGUMAS ALTERAÇÕES INICIAIS names (elavotes) <- gsub("ROES10", "ROES", names(elavotes)) lapcitizen <- completeFun(lap_emd, c("voted", "ROES1")) elaroes1 <- completeFun(elavotes, c("votes", "seats", "ROES1")) #Subset de eleitores: lapvoter <- lapcitizen %>% filter (voted==1) ### Só consegui fazer a análise em separado de todos países, poderia fazer tudo isso # em conjunto e depois usar o ddply para a análise final por país, mas os bancos ficam muito grandes # com a multiplicação de rows nos dataframes da ELA. ##### ANÁLISE CONGRUÊNCIA VOTERS - PARTY VOTED ##### ##### ARGENTINA ##### arg_elaroes1 <- elaroes1 %>% filter (cname == "ARG") arg_lapvoter <- lapvoter %>% filter (cname == "ARG") # VARIÁVEL PARA SHARES : SHARES / SEATS * 1000? arg_elaroes1$sample <- (arg_elaroes1$votes/ arg_elaroes1$interview) * 1000 arg_smp <- arg_elaroes1[rep(seq(nrow(arg_elaroes1)), arg_elaroes1$sample),] arg_smp$pop <- as.factor("y") arg_lapvoter$pop <- as.factor("x") ### Ajustes para congruência: arg_smp_cong <- arg_smp %>% select (ROES1, pop) arg_lap_cong <- arg_lapvoter %>% select (ROES1, pop) arg_congvotes <-rbind(arg_smp_cong, arg_lap_cong) arg_congvotes <- arg_congvotes %>% dplyr::rename (samps = ROES1) arg_congvotes$samps <- as.numeric(arg_congvotes$samps) arg_congvotes <- completeFun(arg_congvotes, "samps") ARG_emd_votes_ROES1 <- emd.dis(arg_congvotes) ARG_dmeans <- dmeans (arg_congvotes) ######BOLIVIA #### bol_elaroes1 <- elaroes1 %>% filter (cname == "BOL") bol_lapvoter <- lapvoter %>% filter (cname == "BOL") # VARIÁVEL PARA SHARES : SHARES / SEATS * 1000? bol_elaroes1$sample <- (bol_elaroes1$votes/ bol_elaroes1$interview) * 1000 bol_smp <- bol_elaroes1[rep(seq(nrow(bol_elaroes1)), bol_elaroes1$sample),] bol_smp$pop <- as.factor("y") bol_lapvoter$pop <- as.factor("x") ### Ajustes para congruência: bol_smp_cong <- bol_smp %>% select (ROES1, pop) bol_lap_cong <- bol_lapvoter %>% select (ROES1, pop) bol_congvotes <-rbind(bol_smp_cong, bol_lap_cong) bol_congvotes <- bol_congvotes %>% dplyr::rename (samps = ROES1) bol_congvotes$samps <- as.numeric(bol_congvotes$samps) bol_congvotes <- completeFun(bol_congvotes, "samps") BOL_emd_votes_ROES1 <- emd.dis(bol_congvotes) BOL_dmeans <- dmeans (bol_congvotes) ###### BRASIL ###### bra_elaroes1 <- elaroes1 %>% filter (cname == "BRA") bra_lapvoter <- lapvoter %>% filter (cname == "BRA") # WEIGHT POR VOTE SHARES bra_elaroes1$sample <- (bra_elaroes1$votes/ bra_elaroes1$interview) * 1000 ### REPETIÇÃO DE OBSERVAÇÕES POR WEIGHT: bra_smp <- bra_elaroes1[rep(seq(nrow(bra_elaroes1)), bra_elaroes1$sample),] bra_smp$pop <- as.factor("y") bra_lapvoter$pop <- as.factor("x") ### AJUSTES PARA CONGRUÊNCIA: bra_smp_cong <- bra_smp %>% dplyr::select (ROES1, pop) bra_lap_cong <- bra_lapvoter %>% dplyr::select (ROES1, pop) bra_congvotes <-rbind(bra_smp_cong, bra_lap_cong) bra_congvotes <- bra_congvotes %>% dplyr::rename (samps = ROES1) bra_congvotes$samps <- as.numeric(bra_congvotes$samps) bra_congvotes <- completeFun(bra_congvotes, "samps") # CONGRUÊNCIA BRA_emd_votes_ROES1 <- emd.dis(bra_congvotes) BRA_dmeans <- dmeans(bra_congvotes) ##### CHILE ##### CHL_elaroes1 <- elaroes1 %>% filter (cname == "CHL") CHL_lapvoter <- lapvoter %>% filter (cname == "CHL") CHL_elaroes1$sample <- (CHL_elaroes1$votes/ CHL_elaroes1$interview) * 1000 CHL_smp <- CHL_elaroes1[rep(seq(nrow(CHL_elaroes1)), CHL_elaroes1$sample),] CHL_smp$pop <- as.factor("y") CHL_lapvoter$pop <- as.factor("x") CHL_smp_cong <- CHL_smp %>% select (ROES1, pop) CHL_lap_cong <- CHL_lapvoter %>% select (ROES1, pop) CHL_congvotes <-rbind(CHL_smp_cong, CHL_lap_cong) CHL_congvotes <- CHL_congvotes %>% dplyr::rename (samps = ROES1) CHL_congvotes$samps <- as.numeric(CHL_congvotes$samps) CHL_congvotes <- completeFun(CHL_congvotes, "samps") CHL_emd_votes_ROES1<- emd.dis(CHL_congvotes) CHL_dmeans <-dmeans (CHL_congvotes) #### COSTA RICA #### CRI_elaroes1 <- elaroes1 %>% filter (cname == "CRI") CRI_lapvoter <- lapvoter %>% filter (cname == "CRI") CRI_elaroes1$sample <- (CRI_elaroes1$votes/ CRI_elaroes1$interview) * 1000 CRI_smp <- CRI_elaroes1[rep(seq(nrow(CRI_elaroes1)), CRI_elaroes1$sample),] CRI_smp$pop <- as.factor("y") CRI_lapvoter$pop <- as.factor("x") CRI_smp_cong <- CRI_smp %>% select (ROES1, pop) CRI_lap_cong <- CRI_lapvoter %>% select (ROES1, pop) CRI_congvotes <-rbind(CRI_smp_cong, CRI_lap_cong) CRI_congvotes <- CRI_congvotes %>% dplyr::rename (samps = ROES1) CRI_congvotes$samps <- as.numeric(CRI_congvotes$samps) CRI_congvotes <- completeFun(CRI_congvotes, "samps") CRI_emd_votes_ROES1 <-emd.dis(CRI_congvotes) CRI_dmeans <-dmeans (CRI_congvotes) ##### ECUADOR ##### ecu_elaroes1 <- elaroes1 %>% filter (cname == "ECU") ecu_lapvoter <- lapvoter %>% filter (cname == "ECU") # VARIÁVEL PARA SHARES : SHARES / SEATS * 1000? ecu_elaroes1$sample <- (ecu_elaroes1$votes/ ecu_elaroes1$interview) * 1000 ecu_smp <- ecu_elaroes1[rep(seq(nrow(ecu_elaroes1)), ecu_elaroes1$sample),] ecu_smp$pop <- as.factor("y") ecu_lapvoter$pop <- as.factor("x") ### Ajustes para congruência: ecu_smp_cong <- ecu_smp %>% select (ROES1, pop) ecu_lap_cong <- ecu_lapvoter %>% select (ROES1, pop) ecu_congvotes <-rbind(ecu_smp_cong, ecu_lap_cong) ecu_congvotes <- ecu_congvotes %>% dplyr::rename (samps = ROES1) ecu_congvotes$samps <- as.numeric(ecu_congvotes$samps) ecu_congvotes <- completeFun(ecu_congvotes, "samps") ECU_emd_votes_ROES1 <- emd.dis(ecu_congvotes) ECU_dmeans <- dmeans (ecu_congvotes) ##### GUATEMALA ##### GTM_elaroes1 <- elaroes1 %>% filter (cname == "GTM") GTM_lapvoter <- lapvoter %>% filter (cname == "GTM") ### VARIÁVEL PARA SHARES : SHARES / SEATS * 1000 GTM_elaroes1$sample <- (GTM_elaroes1$votes/ GTM_elaroes1$interview) * 1000 GTM_smp <- GTM_elaroes1[rep(seq(nrow(GTM_elaroes1)), GTM_elaroes1$sample),] GTM_smp$pop <- as.factor("y") GTM_lapvoter$pop <- as.factor("x") ### agora tentando a congruência: GTM_smp_cong <- GTM_smp %>% select (ROES1, pop) GTM_lap_cong <- GTM_lapvoter %>% select (ROES1, pop) GTM_congvotes <-rbind(GTM_smp_cong, GTM_lap_cong) GTM_congvotes <- GTM_congvotes %>% dplyr::rename (samps = ROES1) GTM_congvotes$samps <- as.numeric(GTM_congvotes$samps) GTM_congvotes <- completeFun(GTM_congvotes, "samps") ## CONGRUÊNCIA: GTM_emd_votes_ROES1 <- emd.dis(GTM_congvotes) GTM_dmeans <-dmeans (GTM_congvotes) ##### HONDURAS ##### HND_elaroes1 <- elaroes1 %>% filter (cname == "HND") HND_lapvoter <- lapvoter %>% filter (cname == "HND") ### VARIÁVEL PARA SHARES : SHARES / SEATS * 1000 HND_elaroes1$sample <- (HND_elaroes1$votes/ HND_elaroes1$interview) * 1000 HND_smp <- HND_elaroes1[rep(seq(nrow(HND_elaroes1)), HND_elaroes1$sample),] HND_smp$pop <- as.factor("y") HND_lapvoter$pop <- as.factor("x") HND_smp_cong <- HND_smp %>% select (ROES1, pop) HND_lap_cong <- HND_lapvoter %>% select (ROES1, pop) HND_congvotes <-rbind(HND_smp_cong, HND_lap_cong) HND_congvotes <- HND_congvotes %>% dplyr::rename (samps = ROES1) HND_congvotes$samps <- as.numeric(HND_congvotes$samps) HND_congvotes <- completeFun(HND_congvotes, "samps") ## CONGRUÊNCIA: HND_emd_votes_ROES1 <- emd.dis(HND_congvotes) HND_dmeans <- dmeans (HND_congvotes) ##### MEXICO ##### mex_elaroes1 <- elaroes1 %>% filter (cname == "MEX") mex_lapvoter <- lapvoter %>% filter (cname == "MEX") # VARIÁVEL PARA SHARES : SHARES / SEATS * 1000? mex_elaroes1$sample <- (mex_elaroes1$votes/ mex_elaroes1$interview) * 1000 mex_smp <- mex_elaroes1[rep(seq(nrow(mex_elaroes1)), mex_elaroes1$sample),] mex_smp$pop <- as.factor("y") mex_lapvoter$pop <- as.factor("x") ### Ajustes para congruência: mex_smp_cong <- mex_smp %>% select (ROES1, pop) mex_lap_cong <- mex_lapvoter %>% select (ROES1, pop) mex_congvotes <-rbind(mex_smp_cong, mex_lap_cong) mex_congvotes <- mex_congvotes %>% dplyr::rename (samps = ROES1) mex_congvotes$samps <- as.numeric(mex_congvotes$samps) mex_congvotes <- completeFun(mex_congvotes, "samps") MEX_emd_votes_ROES1 <- emd.dis(mex_congvotes) MEX_dmeans <- dmeans (mex_congvotes) ##### NICARAGUA ##### NIC_elaroes1 <- elaroes1 %>% filter (cname == "NIC") NIC_lapvoter <- lapvoter %>% filter (cname == "NIC") ### VARIÁVEL PARA SHARES : SHARES / SEATS * 1000 NIC_elaroes1$sample <- (NIC_elaroes1$votes/ NIC_elaroes1$interview) * 1000 NIC_smp <- NIC_elaroes1[rep(seq(nrow(NIC_elaroes1)), NIC_elaroes1$sample),] NIC_smp$pop <- as.factor("y") NIC_lapvoter$pop <- as.factor("x") NIC_smp_cong <- NIC_smp %>% select (ROES1, pop) NIC_lap_cong <- NIC_lapvoter %>% select (ROES1, pop) NIC_congvotes <-rbind(NIC_smp_cong, NIC_lap_cong) NIC_congvotes <- NIC_congvotes %>% dplyr::rename (samps = ROES1) NIC_congvotes$samps <- as.numeric(NIC_congvotes$samps) NIC_congvotes <- completeFun(NIC_congvotes, "samps") ## CONGRUÊNCIA: NIC_emd_votes_ROES1 <- emd.dis(NIC_congvotes) NIC_dmeans <- dmeans (NIC_congvotes) ##### PANAMA ##### pan_elaroes1 <- elaroes1 %>% filter (cname == "PAN") pan_lapvoter <- lapvoter %>% filter (cname == "PAN") # VARIÁVEL PARA SHARES : SHARES / SEATS * 1000? pan_elaroes1$sample <- (pan_elaroes1$votes/ pan_elaroes1$interview) * 1000 pan_smp <- pan_elaroes1[rep(seq(nrow(pan_elaroes1)), pan_elaroes1$sample),] pan_smp$pop <- as.factor("y") pan_lapvoter$pop <- as.factor("x") ### Ajustes para congruência: pan_smp_cong <- pan_smp %>% select (ROES1, pop) pan_lap_cong <- pan_lapvoter %>% select (ROES1, pop) pan_congvotes <-rbind(pan_smp_cong, pan_lap_cong) pan_congvotes <- pan_congvotes %>% dplyr::rename (samps = ROES1) pan_congvotes$samps <- as.numeric(pan_congvotes$samps) pan_congvotes <- completeFun(pan_congvotes, "samps") PAN_emd_votes_ROES1 <- emd.dis(pan_congvotes) PAN_dmeans <- dmeans (pan_congvotes) ##### PERU ##### per_elaroes1 <- elaroes1 %>% filter (cname == "PER") per_lapvoter <- lapvoter %>% filter (cname == "PER") # VARIÁVEL PARA SHARES : SHARES / SEATS * 1000? per_elaroes1$sample <- (per_elaroes1$votes/ per_elaroes1$interview) * 1000 per_smp <- per_elaroes1[rep(seq(nrow(per_elaroes1)), per_elaroes1$sample),] per_smp$pop <- as.factor("y") per_lapvoter$pop <- as.factor("x") ### Ajustes para congruência: per_smp_cong <- per_smp %>% select (ROES1, pop) per_lap_cong <- per_lapvoter %>% select (ROES1, pop) per_congvotes <-rbind(per_smp_cong, per_lap_cong) per_congvotes <- per_congvotes %>% dplyr::rename (samps = ROES1) per_congvotes$samps <- as.numeric(per_congvotes$samps) per_congvotes <- completeFun(per_congvotes, "samps") PER_emd_votes_ROES1 <- emd.dis(per_congvotes) PER_dmeans <- dmeans (per_congvotes) ##### PARAGUAY ##### pry_elaroes1 <- elaroes1 %>% filter (cname == "PRY") pry_lapvoter <- lapvoter %>% filter (cname == "PRY") # VARIÁVEL PARA SHARES : SHARES / SEATS * 1000? pry_elaroes1$sample <- (pry_elaroes1$votes/ pry_elaroes1$interview) * 1000 pry_smp <- pry_elaroes1[rep(seq(nrow(pry_elaroes1)), pry_elaroes1$sample),] pry_smp$pop <- as.factor("y") pry_lapvoter$pop <- as.factor("x") ### Ajustes para congruência: pry_smp_cong <- pry_smp %>% select (ROES1, pop) pry_lap_cong <- pry_lapvoter %>% select (ROES1, pop) pry_congvotes <-rbind(pry_smp_cong, pry_lap_cong) pry_congvotes <- pry_congvotes %>% dplyr::rename (samps = ROES1) pry_congvotes$samps <- as.numeric(pry_congvotes$samps) pry_congvotes <- completeFun(pry_congvotes, "samps") PRY_emd_votes_ROES1 <- emd.dis(pry_congvotes) PRY_dmeans <- dmeans (pry_congvotes) ##### EL SALVADOR ##### slv_elaroes1 <- elaroes1 %>% filter (cname == "SLV") slv_lapvoter <- lapvoter %>% filter (cname == "SLV") # VARIÁVEL PARA SHARES : SHARES / SEATS * 1000? slv_elaroes1$sample <- (slv_elaroes1$votes/ slv_elaroes1$interview) * 1000 slv_smp <- slv_elaroes1[rep(seq(nrow(slv_elaroes1)), slv_elaroes1$sample),] slv_smp$pop <- as.factor("y") slv_lapvoter$pop <- as.factor("x") ### Ajustes para congruência: slv_smp_cong <- slv_smp %>% select (ROES1, pop) slv_lap_cong <- slv_lapvoter %>% select (ROES1, pop) slv_congvotes <-rbind(slv_smp_cong, slv_lap_cong) slv_congvotes <- slv_congvotes %>% dplyr::rename (samps = ROES1) slv_congvotes$samps <- as.numeric(slv_congvotes$samps) slv_congvotes <- completeFun(slv_congvotes, "samps") SLV_emd_votes_ROES1 <- emd.dis(slv_congvotes) SLV_dmeans <- dmeans (slv_congvotes) ##### URUGUAY ##### URY_elaroes1 <- elaroes1 %>% filter (cname == "URY") URY_lapvoter <- lapvoter %>% filter (cname == "URY") ### VARIÁVEL PARA SHARES : SHARES / SEATS * 1000 URY_elaroes1$sample <- (URY_elaroes1$votes/ URY_elaroes1$interview) * 1000 URY_smp <- URY_elaroes1[rep(seq(nrow(URY_elaroes1)), URY_elaroes1$sample),] URY_smp$pop <- as.factor("y") URY_lapvoter$pop <- as.factor("x") URY_smp_cong <- URY_smp %>% select (ROES1, pop) URY_lap_cong <- URY_lapvoter %>% select (ROES1, pop) URY_congvotes <-rbind(URY_smp_cong, URY_lap_cong) URY_congvotes <- URY_congvotes %>% dplyr::rename (samps = ROES1) URY_congvotes$samps <- as.numeric(URY_congvotes$samps) URY_congvotes <- completeFun(URY_congvotes, "samps") ## CONGRUÊNCIA: URY_emd_votes_ROES1 <- emd.dis(URY_congvotes) URY_dmeans <- dmeans(URY_congvotes) write.xlsx(list(BOL =BOL_emd_votes_ROES1, BRA =BRA_emd_votes_ROES1, CHL=CHL_emd_votes_ROES1, COL=COL_emd_votes_ROES1, CRI=CRI_emd_votes_ROES1, DOM=DOM_emd_votes_ROES1, GTM=GTM_emd_votes_ROES1, HND=HND_emd_votes_ROES1, NIC=NIC_emd_votes_ROES1, URY=URY_emd_votes_ROES1), "EMD - Voter_votes - ROES1.xlsx")
/PARTE 3 A- VOTER-VOTE- ROES 1.R
no_license
guilhermearbache/ISSUES
R
false
false
13,816
r
library(xlsx) ##### CRIANDO FUNÇÃO PARA TIRAR NA ##### completeFun <- function(data, desiredCols) { completeVec <- complete.cases(data[, desiredCols]) return(data[completeVec, ]) } setwd ("C:/Users/livia/Desktop/ISSUES") ### BAIXANDO OS BANCOS E FAZENDO ALGUMAS ALTERAÇÕES INICIAIS names (elavotes) <- gsub("ROES10", "ROES", names(elavotes)) lapcitizen <- completeFun(lap_emd, c("voted", "ROES1")) elaroes1 <- completeFun(elavotes, c("votes", "seats", "ROES1")) #Subset de eleitores: lapvoter <- lapcitizen %>% filter (voted==1) ### Só consegui fazer a análise em separado de todos países, poderia fazer tudo isso # em conjunto e depois usar o ddply para a análise final por país, mas os bancos ficam muito grandes # com a multiplicação de rows nos dataframes da ELA. ##### ANÁLISE CONGRUÊNCIA VOTERS - PARTY VOTED ##### ##### ARGENTINA ##### arg_elaroes1 <- elaroes1 %>% filter (cname == "ARG") arg_lapvoter <- lapvoter %>% filter (cname == "ARG") # VARIÁVEL PARA SHARES : SHARES / SEATS * 1000? arg_elaroes1$sample <- (arg_elaroes1$votes/ arg_elaroes1$interview) * 1000 arg_smp <- arg_elaroes1[rep(seq(nrow(arg_elaroes1)), arg_elaroes1$sample),] arg_smp$pop <- as.factor("y") arg_lapvoter$pop <- as.factor("x") ### Ajustes para congruência: arg_smp_cong <- arg_smp %>% select (ROES1, pop) arg_lap_cong <- arg_lapvoter %>% select (ROES1, pop) arg_congvotes <-rbind(arg_smp_cong, arg_lap_cong) arg_congvotes <- arg_congvotes %>% dplyr::rename (samps = ROES1) arg_congvotes$samps <- as.numeric(arg_congvotes$samps) arg_congvotes <- completeFun(arg_congvotes, "samps") ARG_emd_votes_ROES1 <- emd.dis(arg_congvotes) ARG_dmeans <- dmeans (arg_congvotes) ######BOLIVIA #### bol_elaroes1 <- elaroes1 %>% filter (cname == "BOL") bol_lapvoter <- lapvoter %>% filter (cname == "BOL") # VARIÁVEL PARA SHARES : SHARES / SEATS * 1000? bol_elaroes1$sample <- (bol_elaroes1$votes/ bol_elaroes1$interview) * 1000 bol_smp <- bol_elaroes1[rep(seq(nrow(bol_elaroes1)), bol_elaroes1$sample),] bol_smp$pop <- as.factor("y") bol_lapvoter$pop <- as.factor("x") ### Ajustes para congruência: bol_smp_cong <- bol_smp %>% select (ROES1, pop) bol_lap_cong <- bol_lapvoter %>% select (ROES1, pop) bol_congvotes <-rbind(bol_smp_cong, bol_lap_cong) bol_congvotes <- bol_congvotes %>% dplyr::rename (samps = ROES1) bol_congvotes$samps <- as.numeric(bol_congvotes$samps) bol_congvotes <- completeFun(bol_congvotes, "samps") BOL_emd_votes_ROES1 <- emd.dis(bol_congvotes) BOL_dmeans <- dmeans (bol_congvotes) ###### BRASIL ###### bra_elaroes1 <- elaroes1 %>% filter (cname == "BRA") bra_lapvoter <- lapvoter %>% filter (cname == "BRA") # WEIGHT POR VOTE SHARES bra_elaroes1$sample <- (bra_elaroes1$votes/ bra_elaroes1$interview) * 1000 ### REPETIÇÃO DE OBSERVAÇÕES POR WEIGHT: bra_smp <- bra_elaroes1[rep(seq(nrow(bra_elaroes1)), bra_elaroes1$sample),] bra_smp$pop <- as.factor("y") bra_lapvoter$pop <- as.factor("x") ### AJUSTES PARA CONGRUÊNCIA: bra_smp_cong <- bra_smp %>% dplyr::select (ROES1, pop) bra_lap_cong <- bra_lapvoter %>% dplyr::select (ROES1, pop) bra_congvotes <-rbind(bra_smp_cong, bra_lap_cong) bra_congvotes <- bra_congvotes %>% dplyr::rename (samps = ROES1) bra_congvotes$samps <- as.numeric(bra_congvotes$samps) bra_congvotes <- completeFun(bra_congvotes, "samps") # CONGRUÊNCIA BRA_emd_votes_ROES1 <- emd.dis(bra_congvotes) BRA_dmeans <- dmeans(bra_congvotes) ##### CHILE ##### CHL_elaroes1 <- elaroes1 %>% filter (cname == "CHL") CHL_lapvoter <- lapvoter %>% filter (cname == "CHL") CHL_elaroes1$sample <- (CHL_elaroes1$votes/ CHL_elaroes1$interview) * 1000 CHL_smp <- CHL_elaroes1[rep(seq(nrow(CHL_elaroes1)), CHL_elaroes1$sample),] CHL_smp$pop <- as.factor("y") CHL_lapvoter$pop <- as.factor("x") CHL_smp_cong <- CHL_smp %>% select (ROES1, pop) CHL_lap_cong <- CHL_lapvoter %>% select (ROES1, pop) CHL_congvotes <-rbind(CHL_smp_cong, CHL_lap_cong) CHL_congvotes <- CHL_congvotes %>% dplyr::rename (samps = ROES1) CHL_congvotes$samps <- as.numeric(CHL_congvotes$samps) CHL_congvotes <- completeFun(CHL_congvotes, "samps") CHL_emd_votes_ROES1<- emd.dis(CHL_congvotes) CHL_dmeans <-dmeans (CHL_congvotes) #### COSTA RICA #### CRI_elaroes1 <- elaroes1 %>% filter (cname == "CRI") CRI_lapvoter <- lapvoter %>% filter (cname == "CRI") CRI_elaroes1$sample <- (CRI_elaroes1$votes/ CRI_elaroes1$interview) * 1000 CRI_smp <- CRI_elaroes1[rep(seq(nrow(CRI_elaroes1)), CRI_elaroes1$sample),] CRI_smp$pop <- as.factor("y") CRI_lapvoter$pop <- as.factor("x") CRI_smp_cong <- CRI_smp %>% select (ROES1, pop) CRI_lap_cong <- CRI_lapvoter %>% select (ROES1, pop) CRI_congvotes <-rbind(CRI_smp_cong, CRI_lap_cong) CRI_congvotes <- CRI_congvotes %>% dplyr::rename (samps = ROES1) CRI_congvotes$samps <- as.numeric(CRI_congvotes$samps) CRI_congvotes <- completeFun(CRI_congvotes, "samps") CRI_emd_votes_ROES1 <-emd.dis(CRI_congvotes) CRI_dmeans <-dmeans (CRI_congvotes) ##### ECUADOR ##### ecu_elaroes1 <- elaroes1 %>% filter (cname == "ECU") ecu_lapvoter <- lapvoter %>% filter (cname == "ECU") # VARIÁVEL PARA SHARES : SHARES / SEATS * 1000? ecu_elaroes1$sample <- (ecu_elaroes1$votes/ ecu_elaroes1$interview) * 1000 ecu_smp <- ecu_elaroes1[rep(seq(nrow(ecu_elaroes1)), ecu_elaroes1$sample),] ecu_smp$pop <- as.factor("y") ecu_lapvoter$pop <- as.factor("x") ### Ajustes para congruência: ecu_smp_cong <- ecu_smp %>% select (ROES1, pop) ecu_lap_cong <- ecu_lapvoter %>% select (ROES1, pop) ecu_congvotes <-rbind(ecu_smp_cong, ecu_lap_cong) ecu_congvotes <- ecu_congvotes %>% dplyr::rename (samps = ROES1) ecu_congvotes$samps <- as.numeric(ecu_congvotes$samps) ecu_congvotes <- completeFun(ecu_congvotes, "samps") ECU_emd_votes_ROES1 <- emd.dis(ecu_congvotes) ECU_dmeans <- dmeans (ecu_congvotes) ##### GUATEMALA ##### GTM_elaroes1 <- elaroes1 %>% filter (cname == "GTM") GTM_lapvoter <- lapvoter %>% filter (cname == "GTM") ### VARIÁVEL PARA SHARES : SHARES / SEATS * 1000 GTM_elaroes1$sample <- (GTM_elaroes1$votes/ GTM_elaroes1$interview) * 1000 GTM_smp <- GTM_elaroes1[rep(seq(nrow(GTM_elaroes1)), GTM_elaroes1$sample),] GTM_smp$pop <- as.factor("y") GTM_lapvoter$pop <- as.factor("x") ### agora tentando a congruência: GTM_smp_cong <- GTM_smp %>% select (ROES1, pop) GTM_lap_cong <- GTM_lapvoter %>% select (ROES1, pop) GTM_congvotes <-rbind(GTM_smp_cong, GTM_lap_cong) GTM_congvotes <- GTM_congvotes %>% dplyr::rename (samps = ROES1) GTM_congvotes$samps <- as.numeric(GTM_congvotes$samps) GTM_congvotes <- completeFun(GTM_congvotes, "samps") ## CONGRUÊNCIA: GTM_emd_votes_ROES1 <- emd.dis(GTM_congvotes) GTM_dmeans <-dmeans (GTM_congvotes) ##### HONDURAS ##### HND_elaroes1 <- elaroes1 %>% filter (cname == "HND") HND_lapvoter <- lapvoter %>% filter (cname == "HND") ### VARIÁVEL PARA SHARES : SHARES / SEATS * 1000 HND_elaroes1$sample <- (HND_elaroes1$votes/ HND_elaroes1$interview) * 1000 HND_smp <- HND_elaroes1[rep(seq(nrow(HND_elaroes1)), HND_elaroes1$sample),] HND_smp$pop <- as.factor("y") HND_lapvoter$pop <- as.factor("x") HND_smp_cong <- HND_smp %>% select (ROES1, pop) HND_lap_cong <- HND_lapvoter %>% select (ROES1, pop) HND_congvotes <-rbind(HND_smp_cong, HND_lap_cong) HND_congvotes <- HND_congvotes %>% dplyr::rename (samps = ROES1) HND_congvotes$samps <- as.numeric(HND_congvotes$samps) HND_congvotes <- completeFun(HND_congvotes, "samps") ## CONGRUÊNCIA: HND_emd_votes_ROES1 <- emd.dis(HND_congvotes) HND_dmeans <- dmeans (HND_congvotes) ##### MEXICO ##### mex_elaroes1 <- elaroes1 %>% filter (cname == "MEX") mex_lapvoter <- lapvoter %>% filter (cname == "MEX") # VARIÁVEL PARA SHARES : SHARES / SEATS * 1000? mex_elaroes1$sample <- (mex_elaroes1$votes/ mex_elaroes1$interview) * 1000 mex_smp <- mex_elaroes1[rep(seq(nrow(mex_elaroes1)), mex_elaroes1$sample),] mex_smp$pop <- as.factor("y") mex_lapvoter$pop <- as.factor("x") ### Ajustes para congruência: mex_smp_cong <- mex_smp %>% select (ROES1, pop) mex_lap_cong <- mex_lapvoter %>% select (ROES1, pop) mex_congvotes <-rbind(mex_smp_cong, mex_lap_cong) mex_congvotes <- mex_congvotes %>% dplyr::rename (samps = ROES1) mex_congvotes$samps <- as.numeric(mex_congvotes$samps) mex_congvotes <- completeFun(mex_congvotes, "samps") MEX_emd_votes_ROES1 <- emd.dis(mex_congvotes) MEX_dmeans <- dmeans (mex_congvotes) ##### NICARAGUA ##### NIC_elaroes1 <- elaroes1 %>% filter (cname == "NIC") NIC_lapvoter <- lapvoter %>% filter (cname == "NIC") ### VARIÁVEL PARA SHARES : SHARES / SEATS * 1000 NIC_elaroes1$sample <- (NIC_elaroes1$votes/ NIC_elaroes1$interview) * 1000 NIC_smp <- NIC_elaroes1[rep(seq(nrow(NIC_elaroes1)), NIC_elaroes1$sample),] NIC_smp$pop <- as.factor("y") NIC_lapvoter$pop <- as.factor("x") NIC_smp_cong <- NIC_smp %>% select (ROES1, pop) NIC_lap_cong <- NIC_lapvoter %>% select (ROES1, pop) NIC_congvotes <-rbind(NIC_smp_cong, NIC_lap_cong) NIC_congvotes <- NIC_congvotes %>% dplyr::rename (samps = ROES1) NIC_congvotes$samps <- as.numeric(NIC_congvotes$samps) NIC_congvotes <- completeFun(NIC_congvotes, "samps") ## CONGRUÊNCIA: NIC_emd_votes_ROES1 <- emd.dis(NIC_congvotes) NIC_dmeans <- dmeans (NIC_congvotes) ##### PANAMA ##### pan_elaroes1 <- elaroes1 %>% filter (cname == "PAN") pan_lapvoter <- lapvoter %>% filter (cname == "PAN") # VARIÁVEL PARA SHARES : SHARES / SEATS * 1000? pan_elaroes1$sample <- (pan_elaroes1$votes/ pan_elaroes1$interview) * 1000 pan_smp <- pan_elaroes1[rep(seq(nrow(pan_elaroes1)), pan_elaroes1$sample),] pan_smp$pop <- as.factor("y") pan_lapvoter$pop <- as.factor("x") ### Ajustes para congruência: pan_smp_cong <- pan_smp %>% select (ROES1, pop) pan_lap_cong <- pan_lapvoter %>% select (ROES1, pop) pan_congvotes <-rbind(pan_smp_cong, pan_lap_cong) pan_congvotes <- pan_congvotes %>% dplyr::rename (samps = ROES1) pan_congvotes$samps <- as.numeric(pan_congvotes$samps) pan_congvotes <- completeFun(pan_congvotes, "samps") PAN_emd_votes_ROES1 <- emd.dis(pan_congvotes) PAN_dmeans <- dmeans (pan_congvotes) ##### PERU ##### per_elaroes1 <- elaroes1 %>% filter (cname == "PER") per_lapvoter <- lapvoter %>% filter (cname == "PER") # VARIÁVEL PARA SHARES : SHARES / SEATS * 1000? per_elaroes1$sample <- (per_elaroes1$votes/ per_elaroes1$interview) * 1000 per_smp <- per_elaroes1[rep(seq(nrow(per_elaroes1)), per_elaroes1$sample),] per_smp$pop <- as.factor("y") per_lapvoter$pop <- as.factor("x") ### Ajustes para congruência: per_smp_cong <- per_smp %>% select (ROES1, pop) per_lap_cong <- per_lapvoter %>% select (ROES1, pop) per_congvotes <-rbind(per_smp_cong, per_lap_cong) per_congvotes <- per_congvotes %>% dplyr::rename (samps = ROES1) per_congvotes$samps <- as.numeric(per_congvotes$samps) per_congvotes <- completeFun(per_congvotes, "samps") PER_emd_votes_ROES1 <- emd.dis(per_congvotes) PER_dmeans <- dmeans (per_congvotes) ##### PARAGUAY ##### pry_elaroes1 <- elaroes1 %>% filter (cname == "PRY") pry_lapvoter <- lapvoter %>% filter (cname == "PRY") # VARIÁVEL PARA SHARES : SHARES / SEATS * 1000? pry_elaroes1$sample <- (pry_elaroes1$votes/ pry_elaroes1$interview) * 1000 pry_smp <- pry_elaroes1[rep(seq(nrow(pry_elaroes1)), pry_elaroes1$sample),] pry_smp$pop <- as.factor("y") pry_lapvoter$pop <- as.factor("x") ### Ajustes para congruência: pry_smp_cong <- pry_smp %>% select (ROES1, pop) pry_lap_cong <- pry_lapvoter %>% select (ROES1, pop) pry_congvotes <-rbind(pry_smp_cong, pry_lap_cong) pry_congvotes <- pry_congvotes %>% dplyr::rename (samps = ROES1) pry_congvotes$samps <- as.numeric(pry_congvotes$samps) pry_congvotes <- completeFun(pry_congvotes, "samps") PRY_emd_votes_ROES1 <- emd.dis(pry_congvotes) PRY_dmeans <- dmeans (pry_congvotes) ##### EL SALVADOR ##### slv_elaroes1 <- elaroes1 %>% filter (cname == "SLV") slv_lapvoter <- lapvoter %>% filter (cname == "SLV") # VARIÁVEL PARA SHARES : SHARES / SEATS * 1000? slv_elaroes1$sample <- (slv_elaroes1$votes/ slv_elaroes1$interview) * 1000 slv_smp <- slv_elaroes1[rep(seq(nrow(slv_elaroes1)), slv_elaroes1$sample),] slv_smp$pop <- as.factor("y") slv_lapvoter$pop <- as.factor("x") ### Ajustes para congruência: slv_smp_cong <- slv_smp %>% select (ROES1, pop) slv_lap_cong <- slv_lapvoter %>% select (ROES1, pop) slv_congvotes <-rbind(slv_smp_cong, slv_lap_cong) slv_congvotes <- slv_congvotes %>% dplyr::rename (samps = ROES1) slv_congvotes$samps <- as.numeric(slv_congvotes$samps) slv_congvotes <- completeFun(slv_congvotes, "samps") SLV_emd_votes_ROES1 <- emd.dis(slv_congvotes) SLV_dmeans <- dmeans (slv_congvotes) ##### URUGUAY ##### URY_elaroes1 <- elaroes1 %>% filter (cname == "URY") URY_lapvoter <- lapvoter %>% filter (cname == "URY") ### VARIÁVEL PARA SHARES : SHARES / SEATS * 1000 URY_elaroes1$sample <- (URY_elaroes1$votes/ URY_elaroes1$interview) * 1000 URY_smp <- URY_elaroes1[rep(seq(nrow(URY_elaroes1)), URY_elaroes1$sample),] URY_smp$pop <- as.factor("y") URY_lapvoter$pop <- as.factor("x") URY_smp_cong <- URY_smp %>% select (ROES1, pop) URY_lap_cong <- URY_lapvoter %>% select (ROES1, pop) URY_congvotes <-rbind(URY_smp_cong, URY_lap_cong) URY_congvotes <- URY_congvotes %>% dplyr::rename (samps = ROES1) URY_congvotes$samps <- as.numeric(URY_congvotes$samps) URY_congvotes <- completeFun(URY_congvotes, "samps") ## CONGRUÊNCIA: URY_emd_votes_ROES1 <- emd.dis(URY_congvotes) URY_dmeans <- dmeans(URY_congvotes) write.xlsx(list(BOL =BOL_emd_votes_ROES1, BRA =BRA_emd_votes_ROES1, CHL=CHL_emd_votes_ROES1, COL=COL_emd_votes_ROES1, CRI=CRI_emd_votes_ROES1, DOM=DOM_emd_votes_ROES1, GTM=GTM_emd_votes_ROES1, HND=HND_emd_votes_ROES1, NIC=NIC_emd_votes_ROES1, URY=URY_emd_votes_ROES1), "EMD - Voter_votes - ROES1.xlsx")
## Theses functions will "wrap" a matrix and allow us to cache the matrix's ## inverse. The first function returns a list of functions and second ## function computes the inverse or returns a cached value if possible ## The function is the "class" for the matrix and returns a way to access ## function to get and set the matrix and get and set its inverse. makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<-NULL } get <- function() x setinv <- function(inverse) inv <<- inverse getinv <- function() inv list( set = set, get = get, setinv = setinv, getinv = getinv) } ## This function returns the inverse of the matrix, as determined by the ## "solve" function. I assume that any matrix I am dealing with can ## be inverted with solve. This function passes along any additional ## parameters to solve (assuming the inverse is not returned from the cache) cacheSolve <- function(x, ...) { inv <- x$getinv() if (!is.null(inv)) { message("getting cached data") return(inv) } #Call solve and pass along additional parameters inv = solve(x$get(), ...) #Set the cached value x$setinv(inv) #return the inverse inv }
/cachematrix.R
no_license
dossett/ProgrammingAssignment2
R
false
false
1,231
r
## Theses functions will "wrap" a matrix and allow us to cache the matrix's ## inverse. The first function returns a list of functions and second ## function computes the inverse or returns a cached value if possible ## The function is the "class" for the matrix and returns a way to access ## function to get and set the matrix and get and set its inverse. makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<-NULL } get <- function() x setinv <- function(inverse) inv <<- inverse getinv <- function() inv list( set = set, get = get, setinv = setinv, getinv = getinv) } ## This function returns the inverse of the matrix, as determined by the ## "solve" function. I assume that any matrix I am dealing with can ## be inverted with solve. This function passes along any additional ## parameters to solve (assuming the inverse is not returned from the cache) cacheSolve <- function(x, ...) { inv <- x$getinv() if (!is.null(inv)) { message("getting cached data") return(inv) } #Call solve and pass along additional parameters inv = solve(x$get(), ...) #Set the cached value x$setinv(inv) #return the inverse inv }
library(tidyverse) library(ggtext) library(ggpmthemes) theme_set(theme_light_modified(base_family = "Alef")) url <- pins::pin("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-12-17/dog_descriptions.csv") dog_descriptions <- readr::read_csv(url) dog_descriptions %>% count(contact_state) df <- dog_descriptions %>% filter(contact_state %in% state.abb) df %>% count(contact_state, sort = TRUE) us_population <- read_csv("https://raw.githubusercontent.com/jakevdp/PythonDataScienceHandbook/master/notebooks/data/state-population.csv") %>% janitor::clean_names() %>% filter(ages == "total" & year == max(year)) df_viz <- df %>% count(contact_state, sort = TRUE) %>% left_join(us_population, by = c("contact_state" = "state_region")) %>% mutate(human_dog_ratio = population / n) %>% mutate(state = state.name[match(contact_state, state.abb)]) %>% mutate(state = fct_reorder(state, human_dog_ratio)) subtitle <- "***Adopt not shop*** consists in finding a new dog at a local shelter<br>or rescue organization rather than a pet store or breeder<br>(*pudding.cool*). In the USA, the ratios of human population<br>to adoptable dogs vary greatly among states." df_viz %>% ggplot(aes(y = human_dog_ratio, x = state, fill = human_dog_ratio)) + geom_col() + coord_flip() + scale_y_continuous(expand = expand_scale(mult = c(0, 0.02))) + xlab(NULL) + ylab("Population-to-dog ratio") + paletteer::scale_fill_paletteer_c("ggthemes::Orange") + labs( title = "Adoptable dogs in the USA", subtitle = subtitle, caption = "Tidytuesday week #51 | Data: https://pudding.cool/2019/10/shelters/ | @philmassicotte" ) + theme( legend.position = "none", text = element_text(color = "white"), plot.background = element_rect(fill = "#3c3c3c"), panel.background = element_rect(fill = "#3c3c3c"), axis.text = element_text(color = "white"), panel.grid = element_blank(), panel.border = element_blank(), axis.ticks = element_blank(), panel.grid.major.x = element_line(color = "gray50", size = 0.1), plot.title = element_text(hjust = 0.5), plot.subtitle = element_markdown(hjust = 0.5, family = "Antic"), plot.caption = element_text(color = "gray60", size = 10) ) ggsave( here::here("graphs", "tidytuesday_2019_week51.png"), type = "cairo", dpi = 600, width = 7, height = 8 )
/R/tidytuesday_2019_week51.R
no_license
PMassicotte/tidytuesday
R
false
false
2,400
r
library(tidyverse) library(ggtext) library(ggpmthemes) theme_set(theme_light_modified(base_family = "Alef")) url <- pins::pin("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-12-17/dog_descriptions.csv") dog_descriptions <- readr::read_csv(url) dog_descriptions %>% count(contact_state) df <- dog_descriptions %>% filter(contact_state %in% state.abb) df %>% count(contact_state, sort = TRUE) us_population <- read_csv("https://raw.githubusercontent.com/jakevdp/PythonDataScienceHandbook/master/notebooks/data/state-population.csv") %>% janitor::clean_names() %>% filter(ages == "total" & year == max(year)) df_viz <- df %>% count(contact_state, sort = TRUE) %>% left_join(us_population, by = c("contact_state" = "state_region")) %>% mutate(human_dog_ratio = population / n) %>% mutate(state = state.name[match(contact_state, state.abb)]) %>% mutate(state = fct_reorder(state, human_dog_ratio)) subtitle <- "***Adopt not shop*** consists in finding a new dog at a local shelter<br>or rescue organization rather than a pet store or breeder<br>(*pudding.cool*). In the USA, the ratios of human population<br>to adoptable dogs vary greatly among states." df_viz %>% ggplot(aes(y = human_dog_ratio, x = state, fill = human_dog_ratio)) + geom_col() + coord_flip() + scale_y_continuous(expand = expand_scale(mult = c(0, 0.02))) + xlab(NULL) + ylab("Population-to-dog ratio") + paletteer::scale_fill_paletteer_c("ggthemes::Orange") + labs( title = "Adoptable dogs in the USA", subtitle = subtitle, caption = "Tidytuesday week #51 | Data: https://pudding.cool/2019/10/shelters/ | @philmassicotte" ) + theme( legend.position = "none", text = element_text(color = "white"), plot.background = element_rect(fill = "#3c3c3c"), panel.background = element_rect(fill = "#3c3c3c"), axis.text = element_text(color = "white"), panel.grid = element_blank(), panel.border = element_blank(), axis.ticks = element_blank(), panel.grid.major.x = element_line(color = "gray50", size = 0.1), plot.title = element_text(hjust = 0.5), plot.subtitle = element_markdown(hjust = 0.5, family = "Antic"), plot.caption = element_text(color = "gray60", size = 10) ) ggsave( here::here("graphs", "tidytuesday_2019_week51.png"), type = "cairo", dpi = 600, width = 7, height = 8 )
## required packages library(plyr) library(tidyverse) library(readxl) library(doBy) Sys.setenv(TZ = "UTC") ################################################################ ## Funções para extrair o nome de um arquivo e a extensão file_name <- function(file, full.names = FALSE){ # file <- "a.txt.txt" stopifnot(is.character(file)) file_dir <- dirname(file) file_nm <- file %>% basename() %>% stringr::str_split(., pattern = "\\.") %>% unlist() %>% dplyr::first(.) if (full.names) file_nm <- file.path(file_dir, file_nm) return(file_nm) } file_ext <- function(file, dot = FALSE){ stopifnot(is.character(file)) ext <- file %>% basename() %>% stringr::str_split(., "\\.") %>% unlist() %>% dplyr::last(.) if (dot) ext <- paste0("\\.", ext) return(ext) } #' Extract spatial coordinates (lon, lat, alt) from data imported with #' #' @param xlsdf #' #' @return #' #' @examples #' xy_coords_file <- function(xlsdf){ ## test # xlsdf <- read_xfile(xfiles[30]) # xlsdf <- metadata %>% data.frame() ## adjust column names ## replace points xlsdf[, 1] <- xlsdf[, 1] %>% stringr::str_replace_all("\\.", "") %>% tolower() #xlsdf[, 1] <- tolower(xlsdf[, 1]) ## columns numbers xlsdf[, 2] <- xlsdf[, 2] %>% stringr::str_replace_all("\\.", "") %>% stringr::str_replace_all("m", "") ## lon sign if (length(grep("W", xlsdf[, 2])) > 0) { lon_sign <- -1 } else { lon_sign <- 1 } xlsdf[, 2] <- gsub("'W|'E", "", xlsdf[, 2]) ## lat sign if (length(grep("S", xlsdf[, 2])) > 0) { lat_sign <- -1 } else { lat_sign <- 1 } xlsdf[, 2] <- gsub("'N|'S", "", xlsdf[, 2]) ## remove degree string xlsdf[, 2] <- gsub("°", "_", xlsdf[, 2]) ## replace "," by "." alt <- as.numeric(gsub(",", ".", xlsdf[1, 2])) ## signs lon <- unlist(strsplit(xlsdf[3, 2], "_")) lon <- (as.numeric(lon[1]) + as.numeric(lon[2]) / 60) * lon_sign ## times -1 because "S" lat <- unlist(strsplit(xlsdf[2, 2], "_")) lat <- (as.numeric(lat[1]) + as.numeric(lat[2]) / 60) * lat_sign ## output data frame with coords from aws (Automatic Weather Station) outdf <- data.frame(lon = lon ,lat = lat ,alt = alt ,stringsAsFactors = FALSE) return(outdf) }# end xy_coords_file file_name_type <- function(filename) { } ############################################################# ############################################################# ## Funcion to clean col names from excel file str_clean <- function(x) { #if( !require(stringr) ) stop('Required package stringr could not be loaded!') x %>% stringr::str_replace_all("[0-9]", "") %>% stringr::str_replace_all("\\.", "_") %>% stringr::str_replace_all(" _", "") %>% stringr::str_replace_all("_$", "") %>% paste(collapse = " ") %>% stringr::str_trim() %>% stringr::str_replace_all(" ", "_") %>% stringr::str_replace_all("_", "\\.") }# end str_clean ##################################################################### ## Read Excel file from automatic weather station (aws) ##################################################################### read_aws_inmet_file <- function(file.name , verbose = TRUE , na.strings = "NULL" , kjm2.wm2 = (1000/10^6)/0.0864*24) { # TEST: # file.name = xfiles_l[37] # file.name = files[1] # verbose = TRUE; na.strings = "NULL"; kjm2.wm2 = (1000/10^6)/0.0864*24 #Sys.setenv(TZ = "UTC") if (verbose) cat( basename(file.name), "\n") #................................................. # import excel file #................................................. aws_l <- read_xfile(file.xls = file.name ,na.strings = na.strings # string usada para representar dados faltantes ) aws_data <- aws_l[["data"]] # clean data aws_data <- aws_data %>% clean_data() # adicionando colunas site e date site_id <- info_file_str(file.name = file.name)[ ,"id"] aws_data <- aws_data %>% dplyr::mutate(site = site_id) rm(site_id) #glimpse(aws_data) aws_data <- aws_data %>% tidy_data() # conversion rg from kjm2 to wm2 if ("rg" %in% names(aws_data)) { # conversão de unidades de radiação aws_data <- aws_data %>% dplyr::mutate(rg = rg * kjm2.wm2) } # add metadata aws_data <- aws_data %>% data.frame(aws_l[["meta"]]) %>% dplyr::tbl_df(.) %>% dplyr::select(-id) %>% # because some files (empty xls) are without values # and tidy_data() fill values with NA dplyr::distinct(.) rm(aws_l) gc() return(aws_data) } ##################################################################### ## Read both Excel pair files from automatic weather station ##################################################################### read_aws_inmet <- function(file.name , verbose = TRUE , na.strings = "NULL" , metadata = TRUE , ...){ # file.name <- xfiles_l[17] # file.name <- xfiles_l[35] str_search <- info_file_str(file.name = file.name)$id if (verbose) { cat("\n") cat(str_search, "\n") } # list files with same aws code files <- list.files(path = dirname(file.name) ,pattern = str_search ,full.names = TRUE) nfiles <- length(files) if (nfiles == 0 | nfiles > 2) { stop("There are " ,nfiles ," files matching the pattern: " , substr(basename(file.name), 1, 8) , "\n" ,"We expected two files per station .") } if (nfiles == 1) warning("Only one file was found with the pattern " , str_search , "\n" ,"Only data from file " , files , " will be processed." , "\n") if (verbose) { cat("----------------------------------------", "\n") } # dado o nome de arquivo o outro será procurado data2 <- plyr::llply(files ,read_aws_inmet_file ,verbose = verbose ) gc() # join datasets read data_j <- dplyr::full_join(x = data2[[1]] ,y = data2[[2]] ,by = c("site", "date", "lon", "lat", "alt", "name", "state") ) data_j <- data_j %>% dplyr::arrange(date) %>% dplyr::select(site, lon:state, date, tair:rhmin, prec:ws) if (!metadata) { data_j <- data_j %>% dplyr::select(-one_of("lon", "lat", "alt", "name", "state")) } rm(data2, str_search) return(data_j) } # x <- read_aws_inmet(file.name = xfiles[457])
/functions-testing/read_aws_inmet.R
permissive
lhmet/rinmetxls
R
false
false
6,864
r
## required packages library(plyr) library(tidyverse) library(readxl) library(doBy) Sys.setenv(TZ = "UTC") ################################################################ ## Funções para extrair o nome de um arquivo e a extensão file_name <- function(file, full.names = FALSE){ # file <- "a.txt.txt" stopifnot(is.character(file)) file_dir <- dirname(file) file_nm <- file %>% basename() %>% stringr::str_split(., pattern = "\\.") %>% unlist() %>% dplyr::first(.) if (full.names) file_nm <- file.path(file_dir, file_nm) return(file_nm) } file_ext <- function(file, dot = FALSE){ stopifnot(is.character(file)) ext <- file %>% basename() %>% stringr::str_split(., "\\.") %>% unlist() %>% dplyr::last(.) if (dot) ext <- paste0("\\.", ext) return(ext) } #' Extract spatial coordinates (lon, lat, alt) from data imported with #' #' @param xlsdf #' #' @return #' #' @examples #' xy_coords_file <- function(xlsdf){ ## test # xlsdf <- read_xfile(xfiles[30]) # xlsdf <- metadata %>% data.frame() ## adjust column names ## replace points xlsdf[, 1] <- xlsdf[, 1] %>% stringr::str_replace_all("\\.", "") %>% tolower() #xlsdf[, 1] <- tolower(xlsdf[, 1]) ## columns numbers xlsdf[, 2] <- xlsdf[, 2] %>% stringr::str_replace_all("\\.", "") %>% stringr::str_replace_all("m", "") ## lon sign if (length(grep("W", xlsdf[, 2])) > 0) { lon_sign <- -1 } else { lon_sign <- 1 } xlsdf[, 2] <- gsub("'W|'E", "", xlsdf[, 2]) ## lat sign if (length(grep("S", xlsdf[, 2])) > 0) { lat_sign <- -1 } else { lat_sign <- 1 } xlsdf[, 2] <- gsub("'N|'S", "", xlsdf[, 2]) ## remove degree string xlsdf[, 2] <- gsub("°", "_", xlsdf[, 2]) ## replace "," by "." alt <- as.numeric(gsub(",", ".", xlsdf[1, 2])) ## signs lon <- unlist(strsplit(xlsdf[3, 2], "_")) lon <- (as.numeric(lon[1]) + as.numeric(lon[2]) / 60) * lon_sign ## times -1 because "S" lat <- unlist(strsplit(xlsdf[2, 2], "_")) lat <- (as.numeric(lat[1]) + as.numeric(lat[2]) / 60) * lat_sign ## output data frame with coords from aws (Automatic Weather Station) outdf <- data.frame(lon = lon ,lat = lat ,alt = alt ,stringsAsFactors = FALSE) return(outdf) }# end xy_coords_file file_name_type <- function(filename) { } ############################################################# ############################################################# ## Funcion to clean col names from excel file str_clean <- function(x) { #if( !require(stringr) ) stop('Required package stringr could not be loaded!') x %>% stringr::str_replace_all("[0-9]", "") %>% stringr::str_replace_all("\\.", "_") %>% stringr::str_replace_all(" _", "") %>% stringr::str_replace_all("_$", "") %>% paste(collapse = " ") %>% stringr::str_trim() %>% stringr::str_replace_all(" ", "_") %>% stringr::str_replace_all("_", "\\.") }# end str_clean ##################################################################### ## Read Excel file from automatic weather station (aws) ##################################################################### read_aws_inmet_file <- function(file.name , verbose = TRUE , na.strings = "NULL" , kjm2.wm2 = (1000/10^6)/0.0864*24) { # TEST: # file.name = xfiles_l[37] # file.name = files[1] # verbose = TRUE; na.strings = "NULL"; kjm2.wm2 = (1000/10^6)/0.0864*24 #Sys.setenv(TZ = "UTC") if (verbose) cat( basename(file.name), "\n") #................................................. # import excel file #................................................. aws_l <- read_xfile(file.xls = file.name ,na.strings = na.strings # string usada para representar dados faltantes ) aws_data <- aws_l[["data"]] # clean data aws_data <- aws_data %>% clean_data() # adicionando colunas site e date site_id <- info_file_str(file.name = file.name)[ ,"id"] aws_data <- aws_data %>% dplyr::mutate(site = site_id) rm(site_id) #glimpse(aws_data) aws_data <- aws_data %>% tidy_data() # conversion rg from kjm2 to wm2 if ("rg" %in% names(aws_data)) { # conversão de unidades de radiação aws_data <- aws_data %>% dplyr::mutate(rg = rg * kjm2.wm2) } # add metadata aws_data <- aws_data %>% data.frame(aws_l[["meta"]]) %>% dplyr::tbl_df(.) %>% dplyr::select(-id) %>% # because some files (empty xls) are without values # and tidy_data() fill values with NA dplyr::distinct(.) rm(aws_l) gc() return(aws_data) } ##################################################################### ## Read both Excel pair files from automatic weather station ##################################################################### read_aws_inmet <- function(file.name , verbose = TRUE , na.strings = "NULL" , metadata = TRUE , ...){ # file.name <- xfiles_l[17] # file.name <- xfiles_l[35] str_search <- info_file_str(file.name = file.name)$id if (verbose) { cat("\n") cat(str_search, "\n") } # list files with same aws code files <- list.files(path = dirname(file.name) ,pattern = str_search ,full.names = TRUE) nfiles <- length(files) if (nfiles == 0 | nfiles > 2) { stop("There are " ,nfiles ," files matching the pattern: " , substr(basename(file.name), 1, 8) , "\n" ,"We expected two files per station .") } if (nfiles == 1) warning("Only one file was found with the pattern " , str_search , "\n" ,"Only data from file " , files , " will be processed." , "\n") if (verbose) { cat("----------------------------------------", "\n") } # dado o nome de arquivo o outro será procurado data2 <- plyr::llply(files ,read_aws_inmet_file ,verbose = verbose ) gc() # join datasets read data_j <- dplyr::full_join(x = data2[[1]] ,y = data2[[2]] ,by = c("site", "date", "lon", "lat", "alt", "name", "state") ) data_j <- data_j %>% dplyr::arrange(date) %>% dplyr::select(site, lon:state, date, tair:rhmin, prec:ws) if (!metadata) { data_j <- data_j %>% dplyr::select(-one_of("lon", "lat", "alt", "name", "state")) } rm(data2, str_search) return(data_j) } # x <- read_aws_inmet(file.name = xfiles[457])
#' Add edges and attributes to graph from a table #' @description Add edges and their attributes to an #' existing graph object from data in a CSV file or a #' data frame. #' @param graph a graph object of class #' \code{dgr_graph}. #' @param table either a path to a CSV file, or, a data #' frame object. #' @param from_col the name of the table column from #' which edges originate. #' @param to_col the name of the table column to #' which edges terminate. #' @param from_to_map a single character value for #' the mapping of the \code{from} and \code{to} columns #' in the external table (supplied as \code{from_col} #' and \code{to_col}, respectively) to a column in the #' graph's internal node data frame (ndf). #' @param rel_col an option to apply a column of data #' in the table as \code{rel} attribute values. #' @param set_rel an optional string to apply a #' \code{rel} attribute to all edges created from the #' table records. #' @param drop_cols an optional column selection #' statement for dropping columns from the external #' table before inclusion as attributes in the graph's #' internal edge data frame. Several columns can be #' dropped by name using the syntax #' \code{col_1 & col_2 & ...}. Columns can also be #' dropped using a numeric column range with \code{:} #' (e.g., \code{5:8}), or, by using the \code{:} #' between column names to specify the range (e.g., #' \code{col_5_name:col_8_name}). #' @return a graph object of class \code{dgr_graph}. #' @examples #' # Create an empty graph and then #' # add nodes to it from the #' # `currencies` dataset available #' # in the package #' graph <- #' create_graph() %>% #' add_nodes_from_table( #' table = currencies) #' #' # Now we want to add edges to the #' # graph using an included dataset, #' # `usd_exchange_rates`, which has #' # exchange rates between USD and #' # many other currencies; the key #' # here is that the data in the #' # `from` and `to` columns in the #' # external table maps to graph #' # node data available in the #' # `iso_4217_code` column of the #' # graph's internal node data frame #' graph_1 <- #' graph %>% #' add_edges_from_table( #' table = usd_exchange_rates, #' from_col = from_currency, #' to_col = to_currency, #' from_to_map = iso_4217_code) #' #' # View part of the graph's #' # internal edge data frame #' graph_1 %>% #' get_edge_df() %>% #' head() #' #> id from to rel cost_unit #' #> 1 1 148 1 <NA> 0.272300 #' #> 2 2 148 2 <NA> 0.015210 #' #> 3 3 148 3 <NA> 0.008055 #' #> 4 4 148 4 <NA> 0.002107 #' #> 5 5 148 5 <NA> 0.565000 #' #> 6 6 148 6 <NA> 0.006058 #' #' # If you would like to assign #' # any of the table's columns as the #' # `rel` attribute, this can done #' # with the `rel_col` argument; to #' # set a static `rel` attribute for #' # all edges created, use `set_rel` #' graph_2 <- #' graph %>% #' add_edges_from_table( #' table = usd_exchange_rates, #' from_col = from_currency, #' to_col = to_currency, #' from_to_map = iso_4217_code, #' set_rel = "from_usd") #' #' # View part of the graph's internal #' # edge data frame (edf) #' graph_2 %>% #' get_edge_df() %>% #' head() #' #> id from to rel cost_unit #' #> 1 1 148 1 from_usd 0.272300 #' #> 2 2 148 2 from_usd 0.015210 #' #> 3 3 148 3 from_usd 0.008055 #' #> 4 4 148 4 from_usd 0.002107 #' #> 5 5 148 5 from_usd 0.565000 #' #> 6 6 148 6 from_usd 0.006058 #' @importFrom utils read.csv #' @importFrom stats setNames #' @importFrom tibble as_tibble #' @importFrom dplyr left_join select rename mutate bind_cols everything #' @importFrom rlang enquo UQ #' @export add_edges_from_table add_edges_from_table <- function(graph, table, from_col, to_col, from_to_map, rel_col = NULL, set_rel = NULL, drop_cols = NULL) { # Get the time of function start time_function_start <- Sys.time() from_col <- rlang::enquo(from_col) from_col <- (rlang::UQ(from_col) %>% paste())[2] to_col <- rlang::enquo(to_col) to_col <- (rlang::UQ(to_col) %>% paste())[2] from_to_map <- rlang::enquo(from_to_map) from_to_map <- (rlang::UQ(from_to_map) %>% paste())[2] drop_cols <- rlang::enquo(drop_cols) drop_cols <- (rlang::UQ(drop_cols) %>% paste())[2] if (drop_cols == "NULL") { drop_cols <- NULL } # Validation: Graph object is valid if (graph_object_valid(graph) == FALSE) { stop("The graph object is not valid.") } # Validation: Graph contains nodes if (graph_contains_nodes(graph) == FALSE) { stop("The graph contains no nodes, so, edges cannot be added.") } # Create bindings for specific variables rel <- id <- from <- to <- label_col <- NULL # Determine whether the table is a file connection # to a CSV file or a data frame if (inherits(table, "character")) { # Load in CSV file csv <- utils::read.csv(table, stringsAsFactors = FALSE) } else if (inherits(table, "data.frame")) { # Rename `table` object as `csv` csv <- table } # Verify that value for `from_col` is in the table if (!(from_col %in% colnames(csv))) { stop("The value specified in `from_col` is not in the table.") } # Verify that value for `to_col` is in the table if (!(to_col %in% colnames(csv))) { stop("The value specified in `to_col` is not in the table.") } # Verify that value for `from_to_map` is in the # graph's ndf if (!(from_to_map %in% colnames(get_node_df(graph)))) { stop("The value specified in `from_to_map` is not in the graph.") } # Optionally set the `rel` attribute from a # specified column in the CSV if (!is.null(rel_col)) { colnames(csv)[which(colnames(csv) == label_col)] <- "rel" if (any(colnames(csv) == rel_col)) { colnames(csv)[which(colnames(csv) == rel_col)] <- "rel" csv <- mutate(csv, rel = as.character(rel)) } } # Extract the ndf from the graph ndf <- graph$nodes_df csv_data_excluding_from_to <- csv %>% dplyr::select(setdiff(colnames(csv), c(from_col, to_col))) # Get the `from` col col_from <- tibble::as_tibble(csv) %>% dplyr::select(rlang::UQ(from_col)) %>% dplyr::left_join( ndf %>% select(id, rlang::UQ(from_to_map)), by = stats::setNames(from_to_map, from_col)) %>% dplyr::select(id) %>% dplyr::rename(from = id) %>% dplyr::mutate(from = as.integer(from)) # Get the `to` col col_to <- tibble::as_tibble(csv) %>% dplyr::select(rlang::UQ(to_col)) %>% dplyr::left_join( ndf %>% select(id, rlang::UQ(from_to_map)), by = stats::setNames(from_to_map, to_col)) %>% dplyr::select(id) %>% dplyr::rename(to = id) %>% dplyr::mutate(to = as.integer(to)) # Combine the `from` and `to` columns together along # with a new `rel` column (filled with NAs) and additional # columns from the CSV edf <- col_from %>% dplyr::bind_cols(col_to) %>% dplyr::bind_cols(csv_data_excluding_from_to) # Add in a `rel` column (filled with NAs) if it's not # already in the table if (!("rel" %in% colnames(edf))) { edf <- edf %>% dplyr::mutate(rel = as.character(NA)) } # Use the `select()` function to arrange the # column rows and then convert to a data frame edf <- edf %>% dplyr::select(from, to, rel, dplyr::everything()) %>% as.data.frame(stringsAsFactors = FALSE) # Remove any rows where there is an NA in either # `from` or `to` edf <- edf[which(!is.na(edf$from) & !is.na(edf$to)), ] rownames(edf) <- NULL # Add in an `id` column edf <- dplyr::bind_cols( data.frame(id = as.integer(1:nrow(edf)) + graph$last_edge), edf) # Optionally set the `rel` attribute with a single # value repeated down if (is.null(rel_col) & !is.null(set_rel)) { edf <- edf %>% dplyr::mutate(rel = as.character(set_rel)) } # If values for `drop_cols` provided, filter the CSV # columns by those named columns if (!is.null(drop_cols)) { col_selection <- get_col_selection(col_selection_stmt = drop_cols) if (col_selection[["selection_type"]] == "column_range") { col_index_1 <- which(colnames(csv) == col_selection[["column_selection"]][1]) col_index_2 <- which(colnames(csv) == col_selection[["column_selection"]][2]) col_indices <- col_index_1:col_index_2 %>% sort() columns_retained <- base::setdiff(colnames(csv), colnames(csv)[col_indices]) } else if (col_selection[["selection_type"]] == "column_index_range") { col_indices <- col_selection[["column_selection"]] %>% sort() columns_retained <- base::setdiff(colnames(csv), colnames(csv)[col_indices]) } else if (col_selection[["selection_type"]] %in% c("single_column_name", "column_names")) { columns_retained <- base::setdiff(colnames(csv), col_selection[["column_selection"]]) } else if (length(col_selection) == 0) { columns_retained <- colnames(csv) } edf <- edf[, c("id", columns_retained)] } # Get the number of edges in the graph edges_graph_1 <- graph %>% count_edges() # Add the edf to the graph object if (is.null(graph$edges_df)) { graph$edges_df <- edf } else { graph$edges_df <- dplyr::bind_rows(graph$edges_df, edf) } # Get the updated number of edges in the graph edges_graph_2 <- graph %>% count_edges() # Get the number of edges added to # the graph edges_added <- edges_graph_2 - edges_graph_1 # Update the `last_edge` value in the graph graph$last_edge <- nrow(graph$edges_df) graph$graph_log <- add_action_to_log( graph_log = graph$graph_log, version_id = nrow(graph$graph_log) + 1, function_used = "add_edges_from_table", time_modified = time_function_start, duration = graph_function_duration(time_function_start), nodes = nrow(graph$nodes_df), edges = nrow(graph$edges_df), d_e = edges_added) # Perform graph actions, if any are available if (nrow(graph$graph_actions) > 0) { graph <- graph %>% trigger_graph_actions() } # Write graph backup if the option is set if (graph$graph_info$write_backups) { save_graph_as_rds(graph = graph) } graph }
/R/add_edges_from_table.R
permissive
andorfor/DiagrammeR
R
false
false
10,411
r
#' Add edges and attributes to graph from a table #' @description Add edges and their attributes to an #' existing graph object from data in a CSV file or a #' data frame. #' @param graph a graph object of class #' \code{dgr_graph}. #' @param table either a path to a CSV file, or, a data #' frame object. #' @param from_col the name of the table column from #' which edges originate. #' @param to_col the name of the table column to #' which edges terminate. #' @param from_to_map a single character value for #' the mapping of the \code{from} and \code{to} columns #' in the external table (supplied as \code{from_col} #' and \code{to_col}, respectively) to a column in the #' graph's internal node data frame (ndf). #' @param rel_col an option to apply a column of data #' in the table as \code{rel} attribute values. #' @param set_rel an optional string to apply a #' \code{rel} attribute to all edges created from the #' table records. #' @param drop_cols an optional column selection #' statement for dropping columns from the external #' table before inclusion as attributes in the graph's #' internal edge data frame. Several columns can be #' dropped by name using the syntax #' \code{col_1 & col_2 & ...}. Columns can also be #' dropped using a numeric column range with \code{:} #' (e.g., \code{5:8}), or, by using the \code{:} #' between column names to specify the range (e.g., #' \code{col_5_name:col_8_name}). #' @return a graph object of class \code{dgr_graph}. #' @examples #' # Create an empty graph and then #' # add nodes to it from the #' # `currencies` dataset available #' # in the package #' graph <- #' create_graph() %>% #' add_nodes_from_table( #' table = currencies) #' #' # Now we want to add edges to the #' # graph using an included dataset, #' # `usd_exchange_rates`, which has #' # exchange rates between USD and #' # many other currencies; the key #' # here is that the data in the #' # `from` and `to` columns in the #' # external table maps to graph #' # node data available in the #' # `iso_4217_code` column of the #' # graph's internal node data frame #' graph_1 <- #' graph %>% #' add_edges_from_table( #' table = usd_exchange_rates, #' from_col = from_currency, #' to_col = to_currency, #' from_to_map = iso_4217_code) #' #' # View part of the graph's #' # internal edge data frame #' graph_1 %>% #' get_edge_df() %>% #' head() #' #> id from to rel cost_unit #' #> 1 1 148 1 <NA> 0.272300 #' #> 2 2 148 2 <NA> 0.015210 #' #> 3 3 148 3 <NA> 0.008055 #' #> 4 4 148 4 <NA> 0.002107 #' #> 5 5 148 5 <NA> 0.565000 #' #> 6 6 148 6 <NA> 0.006058 #' #' # If you would like to assign #' # any of the table's columns as the #' # `rel` attribute, this can done #' # with the `rel_col` argument; to #' # set a static `rel` attribute for #' # all edges created, use `set_rel` #' graph_2 <- #' graph %>% #' add_edges_from_table( #' table = usd_exchange_rates, #' from_col = from_currency, #' to_col = to_currency, #' from_to_map = iso_4217_code, #' set_rel = "from_usd") #' #' # View part of the graph's internal #' # edge data frame (edf) #' graph_2 %>% #' get_edge_df() %>% #' head() #' #> id from to rel cost_unit #' #> 1 1 148 1 from_usd 0.272300 #' #> 2 2 148 2 from_usd 0.015210 #' #> 3 3 148 3 from_usd 0.008055 #' #> 4 4 148 4 from_usd 0.002107 #' #> 5 5 148 5 from_usd 0.565000 #' #> 6 6 148 6 from_usd 0.006058 #' @importFrom utils read.csv #' @importFrom stats setNames #' @importFrom tibble as_tibble #' @importFrom dplyr left_join select rename mutate bind_cols everything #' @importFrom rlang enquo UQ #' @export add_edges_from_table add_edges_from_table <- function(graph, table, from_col, to_col, from_to_map, rel_col = NULL, set_rel = NULL, drop_cols = NULL) { # Get the time of function start time_function_start <- Sys.time() from_col <- rlang::enquo(from_col) from_col <- (rlang::UQ(from_col) %>% paste())[2] to_col <- rlang::enquo(to_col) to_col <- (rlang::UQ(to_col) %>% paste())[2] from_to_map <- rlang::enquo(from_to_map) from_to_map <- (rlang::UQ(from_to_map) %>% paste())[2] drop_cols <- rlang::enquo(drop_cols) drop_cols <- (rlang::UQ(drop_cols) %>% paste())[2] if (drop_cols == "NULL") { drop_cols <- NULL } # Validation: Graph object is valid if (graph_object_valid(graph) == FALSE) { stop("The graph object is not valid.") } # Validation: Graph contains nodes if (graph_contains_nodes(graph) == FALSE) { stop("The graph contains no nodes, so, edges cannot be added.") } # Create bindings for specific variables rel <- id <- from <- to <- label_col <- NULL # Determine whether the table is a file connection # to a CSV file or a data frame if (inherits(table, "character")) { # Load in CSV file csv <- utils::read.csv(table, stringsAsFactors = FALSE) } else if (inherits(table, "data.frame")) { # Rename `table` object as `csv` csv <- table } # Verify that value for `from_col` is in the table if (!(from_col %in% colnames(csv))) { stop("The value specified in `from_col` is not in the table.") } # Verify that value for `to_col` is in the table if (!(to_col %in% colnames(csv))) { stop("The value specified in `to_col` is not in the table.") } # Verify that value for `from_to_map` is in the # graph's ndf if (!(from_to_map %in% colnames(get_node_df(graph)))) { stop("The value specified in `from_to_map` is not in the graph.") } # Optionally set the `rel` attribute from a # specified column in the CSV if (!is.null(rel_col)) { colnames(csv)[which(colnames(csv) == label_col)] <- "rel" if (any(colnames(csv) == rel_col)) { colnames(csv)[which(colnames(csv) == rel_col)] <- "rel" csv <- mutate(csv, rel = as.character(rel)) } } # Extract the ndf from the graph ndf <- graph$nodes_df csv_data_excluding_from_to <- csv %>% dplyr::select(setdiff(colnames(csv), c(from_col, to_col))) # Get the `from` col col_from <- tibble::as_tibble(csv) %>% dplyr::select(rlang::UQ(from_col)) %>% dplyr::left_join( ndf %>% select(id, rlang::UQ(from_to_map)), by = stats::setNames(from_to_map, from_col)) %>% dplyr::select(id) %>% dplyr::rename(from = id) %>% dplyr::mutate(from = as.integer(from)) # Get the `to` col col_to <- tibble::as_tibble(csv) %>% dplyr::select(rlang::UQ(to_col)) %>% dplyr::left_join( ndf %>% select(id, rlang::UQ(from_to_map)), by = stats::setNames(from_to_map, to_col)) %>% dplyr::select(id) %>% dplyr::rename(to = id) %>% dplyr::mutate(to = as.integer(to)) # Combine the `from` and `to` columns together along # with a new `rel` column (filled with NAs) and additional # columns from the CSV edf <- col_from %>% dplyr::bind_cols(col_to) %>% dplyr::bind_cols(csv_data_excluding_from_to) # Add in a `rel` column (filled with NAs) if it's not # already in the table if (!("rel" %in% colnames(edf))) { edf <- edf %>% dplyr::mutate(rel = as.character(NA)) } # Use the `select()` function to arrange the # column rows and then convert to a data frame edf <- edf %>% dplyr::select(from, to, rel, dplyr::everything()) %>% as.data.frame(stringsAsFactors = FALSE) # Remove any rows where there is an NA in either # `from` or `to` edf <- edf[which(!is.na(edf$from) & !is.na(edf$to)), ] rownames(edf) <- NULL # Add in an `id` column edf <- dplyr::bind_cols( data.frame(id = as.integer(1:nrow(edf)) + graph$last_edge), edf) # Optionally set the `rel` attribute with a single # value repeated down if (is.null(rel_col) & !is.null(set_rel)) { edf <- edf %>% dplyr::mutate(rel = as.character(set_rel)) } # If values for `drop_cols` provided, filter the CSV # columns by those named columns if (!is.null(drop_cols)) { col_selection <- get_col_selection(col_selection_stmt = drop_cols) if (col_selection[["selection_type"]] == "column_range") { col_index_1 <- which(colnames(csv) == col_selection[["column_selection"]][1]) col_index_2 <- which(colnames(csv) == col_selection[["column_selection"]][2]) col_indices <- col_index_1:col_index_2 %>% sort() columns_retained <- base::setdiff(colnames(csv), colnames(csv)[col_indices]) } else if (col_selection[["selection_type"]] == "column_index_range") { col_indices <- col_selection[["column_selection"]] %>% sort() columns_retained <- base::setdiff(colnames(csv), colnames(csv)[col_indices]) } else if (col_selection[["selection_type"]] %in% c("single_column_name", "column_names")) { columns_retained <- base::setdiff(colnames(csv), col_selection[["column_selection"]]) } else if (length(col_selection) == 0) { columns_retained <- colnames(csv) } edf <- edf[, c("id", columns_retained)] } # Get the number of edges in the graph edges_graph_1 <- graph %>% count_edges() # Add the edf to the graph object if (is.null(graph$edges_df)) { graph$edges_df <- edf } else { graph$edges_df <- dplyr::bind_rows(graph$edges_df, edf) } # Get the updated number of edges in the graph edges_graph_2 <- graph %>% count_edges() # Get the number of edges added to # the graph edges_added <- edges_graph_2 - edges_graph_1 # Update the `last_edge` value in the graph graph$last_edge <- nrow(graph$edges_df) graph$graph_log <- add_action_to_log( graph_log = graph$graph_log, version_id = nrow(graph$graph_log) + 1, function_used = "add_edges_from_table", time_modified = time_function_start, duration = graph_function_duration(time_function_start), nodes = nrow(graph$nodes_df), edges = nrow(graph$edges_df), d_e = edges_added) # Perform graph actions, if any are available if (nrow(graph$graph_actions) > 0) { graph <- graph %>% trigger_graph_actions() } # Write graph backup if the option is set if (graph$graph_info$write_backups) { save_graph_as_rds(graph = graph) } graph }
\name{TWCR.get.obs} \alias{TWCR.get.obs} \title{TWCR get observations} \usage{ TWCR.get.obs(year, month, day, hour, version = 2, range = 0.5) } \arguments{ \item{range}{Date range (in days) to retrieve observations from - period is specified time +- range days (default is 0.5 - giving 1 day's obs).} } \value{ A data frame - one row for each observation. } \description{ Retrieves observations from the obs. feedback (prepbufr) files Gets all obs in +-range days around specified date } \details{ Specification of obs format is at http://rda.ucar.edu/datasets/ds131.1/docs/ISPD_quick_assimilated_ascii_format.pdf File access only - the observations feedback files are not online. } \seealso{ \code{TWCR.get.obs.1file} get the observations for a specific analysis run. }
/GSDF.TWCR/man/TWCR.get.obs.Rd
permissive
jacobvanetten/GSDF
R
false
false
802
rd
\name{TWCR.get.obs} \alias{TWCR.get.obs} \title{TWCR get observations} \usage{ TWCR.get.obs(year, month, day, hour, version = 2, range = 0.5) } \arguments{ \item{range}{Date range (in days) to retrieve observations from - period is specified time +- range days (default is 0.5 - giving 1 day's obs).} } \value{ A data frame - one row for each observation. } \description{ Retrieves observations from the obs. feedback (prepbufr) files Gets all obs in +-range days around specified date } \details{ Specification of obs format is at http://rda.ucar.edu/datasets/ds131.1/docs/ISPD_quick_assimilated_ascii_format.pdf File access only - the observations feedback files are not online. } \seealso{ \code{TWCR.get.obs.1file} get the observations for a specific analysis run. }
f_mat_regi <- function(x, connection, ilevel, iYYYY, ifreq, fcperiod, sendfcserie, fcrun, todate) { print(x) phantom <- x[2] iorg_level <- 'topdown' region <- x[1] sma_only = FALSE if (x[3] == 1 ) {sma_only <- TRUE} else {sma_only <- FALSE} print(region) print(phantom) print (iorg_level) status_message <- 0 status_message$status <- 'Initialized' status_message$message <- 'Initialized' iquery <- "SELECT to_date(month,'YYYYMM') requested_deliv_date_to, revenue litre from revenue_bw_v where month <= to_char(to_date($1,'YYYY-MM-DD'),'YYYYMM') and country = $2 order by 1 asc " fcaccuracy <- extTryCatch(fcstMat_region(connection , phantom, iorg_level, region, iquery, FALSE,sma_only, iYYYY, ifreq , status_message, todate)) # fcaccuracy <- extTryCatch(fcstMat_region(con , "phantom", "iorg_level", "region", iquery, FALSE,sma_only, iYYYY, ifreq , status_message, todate)) #print(x[1]) #print(fcaccuracy) write_fcobject_todb(connection, fcaccuracy, ilevel, phantom, region, iYYYY, fcperiod, sendfcserie, fcrun,sma_only) # print(x[1]) } fcstMat_region <- function( connection , Phantom, org_level, region, query, intermittent, sma_only, DateMask, yrfreq, status, todate) { df_postgres <- RPostgreSQL::dbGetQuery(connection, query, c( todate,region))#Phantom, org_level, DateMask, region, # df_postgres <- RPostgreSQL::dbGetQuery(con, iquery, c( todate,region))#Phantom, org_level, DateMask, region, print (df_postgres) myts <- ts(df_postgres[ ,2], start = c(2015, 1), frequency = yrfreq) # myts <- ts(df_postgres[ ,2], start = c(2015, 1), frequency = ifreq) ##return (myts) returnobject <- fcstgetAccuracy(myts, intermittent, status, yrfreq,sma_only) returnobject$totalvolume = sum(myts) returnobject$ts <- myts status$status <- "Completed" status$message <- "Completed" return (returnobject) }
/R/revenue.R
no_license
Magi1414/aXialyzefcstcontrol
R
false
false
1,904
r
f_mat_regi <- function(x, connection, ilevel, iYYYY, ifreq, fcperiod, sendfcserie, fcrun, todate) { print(x) phantom <- x[2] iorg_level <- 'topdown' region <- x[1] sma_only = FALSE if (x[3] == 1 ) {sma_only <- TRUE} else {sma_only <- FALSE} print(region) print(phantom) print (iorg_level) status_message <- 0 status_message$status <- 'Initialized' status_message$message <- 'Initialized' iquery <- "SELECT to_date(month,'YYYYMM') requested_deliv_date_to, revenue litre from revenue_bw_v where month <= to_char(to_date($1,'YYYY-MM-DD'),'YYYYMM') and country = $2 order by 1 asc " fcaccuracy <- extTryCatch(fcstMat_region(connection , phantom, iorg_level, region, iquery, FALSE,sma_only, iYYYY, ifreq , status_message, todate)) # fcaccuracy <- extTryCatch(fcstMat_region(con , "phantom", "iorg_level", "region", iquery, FALSE,sma_only, iYYYY, ifreq , status_message, todate)) #print(x[1]) #print(fcaccuracy) write_fcobject_todb(connection, fcaccuracy, ilevel, phantom, region, iYYYY, fcperiod, sendfcserie, fcrun,sma_only) # print(x[1]) } fcstMat_region <- function( connection , Phantom, org_level, region, query, intermittent, sma_only, DateMask, yrfreq, status, todate) { df_postgres <- RPostgreSQL::dbGetQuery(connection, query, c( todate,region))#Phantom, org_level, DateMask, region, # df_postgres <- RPostgreSQL::dbGetQuery(con, iquery, c( todate,region))#Phantom, org_level, DateMask, region, print (df_postgres) myts <- ts(df_postgres[ ,2], start = c(2015, 1), frequency = yrfreq) # myts <- ts(df_postgres[ ,2], start = c(2015, 1), frequency = ifreq) ##return (myts) returnobject <- fcstgetAccuracy(myts, intermittent, status, yrfreq,sma_only) returnobject$totalvolume = sum(myts) returnobject$ts <- myts status$status <- "Completed" status$message <- "Completed" return (returnobject) }
library(moments) library(boot) library(logspline) library(quantmod) require(ggplot2) library(quantmod) library(TTR) library(PerformanceAnalytics) library(tseries) # # Pairs Trading # (1)random walk getSymbols('BBY',from="2012-01-02") BBY_price<-Ad(BBY) lag_BBY_price<-lag(apple_price,k=1) fit<-lm(BBY_price~lag_BBY_price) fit resid(fit) # (2) pair trading function mycor=function(s1,s2,from,to) { ap1=Ad(getSymbols(s1,auto.assign=FALSE,from=from,to=to)) ap2=Ad(getSymbols(s2,auto.assign=FALSE,from=from,to=to)) n=length(ap2) ## this is how many days we want to work with ### some math...we want to start at day 180 and take the ### correlation from day 1 to day 180 ### we then contue and stop at day n-179 ### store the results in vals nn=n-179 vals=1:nn for(i in 180:n) { p1 = ap1[(i-179):i] p2 = ap2[(i-179):i] vals[i-179]=cor(p1,p2) } plot(EMA(vals),type="l",main = 'Correlation',col='Brown') } volume_price<-function(stock,stock1,from){ s<-getSymbols(stock,auto.assign=FALSE,from=from) s1<-getSymbols(stock1,auto.assign=FALSE,from=from) stock<-c(stock,stock1) meanvolume<-c(mean(Vo(s)),mean(Vo(s1))) meanprice<-c(mean(Ad(s)),mean(Ad(s1))) return(list(stock_name=stock,mean_volume=meanvolume,mean_price=meanprice)) } #Pair select mycoin=function(s1,s2,from,to) { ap1=Ad(getSymbols(s1,auto.assign=FALSE,from=from)) ap2=Ad(getSymbols(s2,auto.assign=FALSE,from=from)) n=length(ap2) nn=n-179 vals=1:nn for(i in 180:n) { p1 = ap1[(i-179):i] p2 = ap2[(i-179):i] fit=lm(p1~-1+p2) beta=coef(fit)[1] sprd=p1-beta*p2 sprd=as.numeric(sprd) vals[i-179]=1-adf.test(sprd,alternative="stationary",k=0)$p.value } plot(EMA(vals),type="l",main = 'Cointegration',col='Blue') } myvals=function(s1,s2,from) { ap1=Ad(getSymbols(s1,auto.assign=FALSE,from=from)) ap2=Ad(getSymbols(s2,auto.assign=FALSE,from=from)) n=length(ap2) nn=n-179 vals=1:nn i=n p1 = ap1[(i-179):i] p2 = ap2[(i-179):i] fit=lm(p1~-1+p2) beta=coef(fit)[1] sprd=p1-beta*p2 sprd=as.numeric(sprd) # cat("Cointegration p=value = ",adf.test(sprd,alternative="stationary",k=0)$p.value,"\n") # cat("Correlation = ",cor(p1,p2),"\n") coin<-adf.test(sprd,alternative="stationary",k=0)$p.value corre<-cor(p1,p2) out<-list( co_intigration=as.numeric(coin), correlation_coeff=as.numeric(corre), volume_1=mean(Vo(getSymbols(s1,auto.assign=FALSE,from=from))), volume_2=mean(Vo(getSymbols(s2,auto.assign=FALSE,from=from))) ) return(out) } pairs.trade <- function(stock1, stock2, from = "2015-01-01", to = Sys.Date(), ma.days = 14, method = "diff", threshold = 2, closeout = T){ require(quantmod) x1 <- getSymbols(stock1, auto.assign = F, from = from, to = to) x1 <- as.numeric(Ad(x1)) x2 <- getSymbols(stock2, auto.assign = F, from = from, to = to) x2 <- as.numeric(Ad(x2)) if (method == "diff"){ x1.norm <- (x1 - runMean(x1, n = ma.days))/runSD(x1, ma.days) x2.norm <- (x2 - runMean(x2, n = ma.days))/runSD(x2, ma.days) out.ts <- x1.norm - x2.norm } else if (method == "ratio"){ ts <- x1/x2 out.ts <- (ts - runMean(ts, n = ma.days))/runSD(ts, ma.days) } else if (method == "log ratio"){ ma.days<-130 ts<-log(x1/x2) out.ts <- (ts - runMean(ts, n = ma.days))/runSD(ts, ma.days) } numdays <- length(out.ts) # initialize quantities x1.traded = x2.traded = 0 current = "neither" profit = 0 maxprofit = minprofit = numtrades = winners = 0 mytrade=c() for(i in ma.days:numdays){ if(out.ts[i] < -threshold & current == "neither"){ x1.traded = (10000/x1[i]) x2.traded = (-10000/x2[i]) current = "x2" numtrades = numtrades + 1 ##print(paste("Short", stock2, "at", x2[i], ##"and Long", stock1, "at", x1[i])) } if(out.ts[i] > threshold & current == "neither"){ x1.traded = (-10000/x1[i]) x2.traded = (10000/x2[i]) current = "x1" numtrades = numtrades + 1 ##print(paste("Short", stock1, "at", x1[i], ##"and Long", stock2, "at", x2[i])) } if((out.ts[i] > 0 & current == "x2") | (out.ts[i] < 0 & current == "x1")){ profit.temp = x1.traded*x1[i] + x2.traded*x2[i] profit = profit + profit.temp mytrade<-c(mytrade,profit.temp) winners = winners + (profit.temp > 0) maxprofit = max(maxprofit,profit.temp) minprofit = min(minprofit,profit.temp) x1.traded = 0 x2.traded = 0 current = "neither" } } # Note: this optional closing out is outside the for loop! if (x1.traded != 0 & closeout == T){ profit.temp = x1.traded*x1[i] + x2.traded*x2[i] profit = profit + profit.temp mytrade<-c(mytrade,profit.temp) winners = winners + (profit.temp > 0) maxprofit = max(maxprofit,profit.temp) minprofit = min(minprofit,profit.temp) x1.traded = 0 x2.traded = 0 current = "neither" } # Counting issue if (x1.traded != 0 & closeout == F){ numtrades = numtrades - 1 } # tabulate results results = list(Winners = winners, number.of.trades = numtrades, winning.percentage = 100*winners/numtrades, max.profit = maxprofit, min.profit = minprofit, # my.trade=mytrade, avg.wining=mean(mytrade[mytrade>0]), avg.losing=mean(mytrade[mytrade<0]), profit = profit) return(results) } sample(as.character(samll_cap_com_lis_hist$Symbol),10) pair_test<-combn(as.character(samll_cap_com_lis_hist$Symbol),2) pair_test[1,] value<-list() for(i in 1:length(pair_test[1,])){ get_pair<-myvals(pair_test[1,i],pair_test[2,i],from='2013-09-01') my_list<-list(co_intigration=get_pair[1], correlation_coeff=get_pair[2], stock_1=pair_test[1,i], stock_2=pair_test[2,i], vol_1=get_pair[3], vol_2=get_pair[4]) get_pair<-data.frame(my_list) value<-rbind(value,get_pair) } value_sub<-subset(value,subset = co_intigration<0.1 &correlation_coeff>0.6 &volume_1>1000000 &volume_2>1000000) #Traning #6 no #13 no #8 yes #4 yes i<- pair_stk_1<-as.character(value_sub$stock_1)[i] pair_stk_2<-as.character(value_sub$stock_2)[i] from = "2014-06-01" to= "2015-06-01" pairs.trade(pair_stk_1,pair_stk_2,from=from,to=to) par(mfrow=c(2,1)) mycoin(pair_stk_1,pair_stk_2,from = from ) mycor(pair_stk_1,pair_stk_2,from = from) par(mfrow=c(1,1)) # stock_a<-'SINA' # stock_b<-'BABA' stock_a<-'AMZN' stock_b<-'T' stock_a<-pair_stk_1 stock_b<-pair_stk_2 par(mfrow=c(1,2)) mycor(s1=stock_a,s2=stock_b,from=from,to=to) mycoin(s1=stock_a,s2=stock_b,from = from,to=to) par(mfrow=c(1,1)) # v_p<-volume_price(stock=stock_a,stock1=stock_b,from = from ) # v_p<-data.frame(v_p) # v_p coin_corr<-myvals(stock_a,stock_b,from=from,to=to) coin_corr<-data.frame(coin_corr) coin_corr diff<-pairs.trade(stock_a,stock_b,from=from,to=to,method = 'diff') ratio<-pairs.trade(stock_a,stock_b,from=from,to=to,method = 'ratio') log_ratio<-pairs.trade(stock_a,stock_b,from=from,to=to,method = 'log ratio') Item<-c('Winners', 'number.of.trades', 'winning.percentage' , 'max.profit', 'min.profit' , # my.trade=mytrade, 'avg.wining', 'avg.losing', 'profit') diff<-round(as.numeric(diff),3) ratio<-round(as.numeric(ratio),3) log_ratio<-round(as.numeric(log_ratio),3) data_compare<-cbind(Item,diff,ratio,log_ratio) data_compare<-data.frame(data_compare) data_compare par(mfrow=c(1,1)) plot(diff,ratio,col='Brown',main="Diff vs. Ratio") textxy(diff,ratio,Item) abline(a=0,b=1,col='Green') abline(h=0,v=0) plot(diff,ratio, ylim=c(-5000,5000),xlim=c(-5000,5000),,col='Brown',main="Diff vs. Ratio zoom") textxy(diff,ratio,Item) abline(a=0,b=1,col='Green') abline(h=0,v=0) par(mfrow=c(1,1)) #Testing 8 4 i<-8 pair_stk_1<-as.character(value_sub$stock_1)[i] pair_stk_2<-as.character(value_sub$stock_2)[i] from = "2015-06-01" to= "2016-06-01" pairs.trade(pair_stk_1,pair_stk_2,from=from,to=to) par(mfrow=c(2,1)) mycoin(pair_stk_1,pair_stk_2,from = from ) mycor(pair_stk_1,pair_stk_2,from = from) par(mfrow=c(1,1)) # stock_a<-'SINA' # stock_b<-'BABA' stock_a<-'AMZN' stock_b<-'T' stock_a<-pair_stk_1 stock_b<-pair_stk_2 par(mfrow=c(1,2)) mycor(s1=stock_a,s2=stock_b,from=from,to=to) mycoin(s1=stock_a,s2=stock_b,from = from,to=to) par(mfrow=c(1,1)) # v_p<-volume_price(stock=stock_a,stock1=stock_b,from = from ) # v_p<-data.frame(v_p) # v_p coin_corr<-myvals(stock_a,stock_b,from=from,to=to) coin_corr<-data.frame(coin_corr) coin_corr diff<-pairs.trade(stock_a,stock_b,from=from,to=to,method = 'diff') ratio<-pairs.trade(stock_a,stock_b,from=from,to=to,method = 'ratio') log_ratio<-pairs.trade(stock_a,stock_b,from=from,to=to,method = 'log ratio') Item<-c('Winners', 'number.of.trades', 'winning.percentage' , 'max.profit', 'min.profit' , # my.trade=mytrade, 'avg.wining', 'avg.losing', 'profit') diff<-round(as.numeric(diff),3) ratio<-round(as.numeric(ratio),3) log_ratio<-round(as.numeric(log_ratio),3) data_compare<-cbind(Item,diff,ratio,log_ratio) data_compare<-data.frame(data_compare) data_compare
/Final_Project_Pairs_Trading_Tool.R
no_license
Wiley508/Stat107_FinalProject
R
false
false
9,874
r
library(moments) library(boot) library(logspline) library(quantmod) require(ggplot2) library(quantmod) library(TTR) library(PerformanceAnalytics) library(tseries) # # Pairs Trading # (1)random walk getSymbols('BBY',from="2012-01-02") BBY_price<-Ad(BBY) lag_BBY_price<-lag(apple_price,k=1) fit<-lm(BBY_price~lag_BBY_price) fit resid(fit) # (2) pair trading function mycor=function(s1,s2,from,to) { ap1=Ad(getSymbols(s1,auto.assign=FALSE,from=from,to=to)) ap2=Ad(getSymbols(s2,auto.assign=FALSE,from=from,to=to)) n=length(ap2) ## this is how many days we want to work with ### some math...we want to start at day 180 and take the ### correlation from day 1 to day 180 ### we then contue and stop at day n-179 ### store the results in vals nn=n-179 vals=1:nn for(i in 180:n) { p1 = ap1[(i-179):i] p2 = ap2[(i-179):i] vals[i-179]=cor(p1,p2) } plot(EMA(vals),type="l",main = 'Correlation',col='Brown') } volume_price<-function(stock,stock1,from){ s<-getSymbols(stock,auto.assign=FALSE,from=from) s1<-getSymbols(stock1,auto.assign=FALSE,from=from) stock<-c(stock,stock1) meanvolume<-c(mean(Vo(s)),mean(Vo(s1))) meanprice<-c(mean(Ad(s)),mean(Ad(s1))) return(list(stock_name=stock,mean_volume=meanvolume,mean_price=meanprice)) } #Pair select mycoin=function(s1,s2,from,to) { ap1=Ad(getSymbols(s1,auto.assign=FALSE,from=from)) ap2=Ad(getSymbols(s2,auto.assign=FALSE,from=from)) n=length(ap2) nn=n-179 vals=1:nn for(i in 180:n) { p1 = ap1[(i-179):i] p2 = ap2[(i-179):i] fit=lm(p1~-1+p2) beta=coef(fit)[1] sprd=p1-beta*p2 sprd=as.numeric(sprd) vals[i-179]=1-adf.test(sprd,alternative="stationary",k=0)$p.value } plot(EMA(vals),type="l",main = 'Cointegration',col='Blue') } myvals=function(s1,s2,from) { ap1=Ad(getSymbols(s1,auto.assign=FALSE,from=from)) ap2=Ad(getSymbols(s2,auto.assign=FALSE,from=from)) n=length(ap2) nn=n-179 vals=1:nn i=n p1 = ap1[(i-179):i] p2 = ap2[(i-179):i] fit=lm(p1~-1+p2) beta=coef(fit)[1] sprd=p1-beta*p2 sprd=as.numeric(sprd) # cat("Cointegration p=value = ",adf.test(sprd,alternative="stationary",k=0)$p.value,"\n") # cat("Correlation = ",cor(p1,p2),"\n") coin<-adf.test(sprd,alternative="stationary",k=0)$p.value corre<-cor(p1,p2) out<-list( co_intigration=as.numeric(coin), correlation_coeff=as.numeric(corre), volume_1=mean(Vo(getSymbols(s1,auto.assign=FALSE,from=from))), volume_2=mean(Vo(getSymbols(s2,auto.assign=FALSE,from=from))) ) return(out) } pairs.trade <- function(stock1, stock2, from = "2015-01-01", to = Sys.Date(), ma.days = 14, method = "diff", threshold = 2, closeout = T){ require(quantmod) x1 <- getSymbols(stock1, auto.assign = F, from = from, to = to) x1 <- as.numeric(Ad(x1)) x2 <- getSymbols(stock2, auto.assign = F, from = from, to = to) x2 <- as.numeric(Ad(x2)) if (method == "diff"){ x1.norm <- (x1 - runMean(x1, n = ma.days))/runSD(x1, ma.days) x2.norm <- (x2 - runMean(x2, n = ma.days))/runSD(x2, ma.days) out.ts <- x1.norm - x2.norm } else if (method == "ratio"){ ts <- x1/x2 out.ts <- (ts - runMean(ts, n = ma.days))/runSD(ts, ma.days) } else if (method == "log ratio"){ ma.days<-130 ts<-log(x1/x2) out.ts <- (ts - runMean(ts, n = ma.days))/runSD(ts, ma.days) } numdays <- length(out.ts) # initialize quantities x1.traded = x2.traded = 0 current = "neither" profit = 0 maxprofit = minprofit = numtrades = winners = 0 mytrade=c() for(i in ma.days:numdays){ if(out.ts[i] < -threshold & current == "neither"){ x1.traded = (10000/x1[i]) x2.traded = (-10000/x2[i]) current = "x2" numtrades = numtrades + 1 ##print(paste("Short", stock2, "at", x2[i], ##"and Long", stock1, "at", x1[i])) } if(out.ts[i] > threshold & current == "neither"){ x1.traded = (-10000/x1[i]) x2.traded = (10000/x2[i]) current = "x1" numtrades = numtrades + 1 ##print(paste("Short", stock1, "at", x1[i], ##"and Long", stock2, "at", x2[i])) } if((out.ts[i] > 0 & current == "x2") | (out.ts[i] < 0 & current == "x1")){ profit.temp = x1.traded*x1[i] + x2.traded*x2[i] profit = profit + profit.temp mytrade<-c(mytrade,profit.temp) winners = winners + (profit.temp > 0) maxprofit = max(maxprofit,profit.temp) minprofit = min(minprofit,profit.temp) x1.traded = 0 x2.traded = 0 current = "neither" } } # Note: this optional closing out is outside the for loop! if (x1.traded != 0 & closeout == T){ profit.temp = x1.traded*x1[i] + x2.traded*x2[i] profit = profit + profit.temp mytrade<-c(mytrade,profit.temp) winners = winners + (profit.temp > 0) maxprofit = max(maxprofit,profit.temp) minprofit = min(minprofit,profit.temp) x1.traded = 0 x2.traded = 0 current = "neither" } # Counting issue if (x1.traded != 0 & closeout == F){ numtrades = numtrades - 1 } # tabulate results results = list(Winners = winners, number.of.trades = numtrades, winning.percentage = 100*winners/numtrades, max.profit = maxprofit, min.profit = minprofit, # my.trade=mytrade, avg.wining=mean(mytrade[mytrade>0]), avg.losing=mean(mytrade[mytrade<0]), profit = profit) return(results) } sample(as.character(samll_cap_com_lis_hist$Symbol),10) pair_test<-combn(as.character(samll_cap_com_lis_hist$Symbol),2) pair_test[1,] value<-list() for(i in 1:length(pair_test[1,])){ get_pair<-myvals(pair_test[1,i],pair_test[2,i],from='2013-09-01') my_list<-list(co_intigration=get_pair[1], correlation_coeff=get_pair[2], stock_1=pair_test[1,i], stock_2=pair_test[2,i], vol_1=get_pair[3], vol_2=get_pair[4]) get_pair<-data.frame(my_list) value<-rbind(value,get_pair) } value_sub<-subset(value,subset = co_intigration<0.1 &correlation_coeff>0.6 &volume_1>1000000 &volume_2>1000000) #Traning #6 no #13 no #8 yes #4 yes i<- pair_stk_1<-as.character(value_sub$stock_1)[i] pair_stk_2<-as.character(value_sub$stock_2)[i] from = "2014-06-01" to= "2015-06-01" pairs.trade(pair_stk_1,pair_stk_2,from=from,to=to) par(mfrow=c(2,1)) mycoin(pair_stk_1,pair_stk_2,from = from ) mycor(pair_stk_1,pair_stk_2,from = from) par(mfrow=c(1,1)) # stock_a<-'SINA' # stock_b<-'BABA' stock_a<-'AMZN' stock_b<-'T' stock_a<-pair_stk_1 stock_b<-pair_stk_2 par(mfrow=c(1,2)) mycor(s1=stock_a,s2=stock_b,from=from,to=to) mycoin(s1=stock_a,s2=stock_b,from = from,to=to) par(mfrow=c(1,1)) # v_p<-volume_price(stock=stock_a,stock1=stock_b,from = from ) # v_p<-data.frame(v_p) # v_p coin_corr<-myvals(stock_a,stock_b,from=from,to=to) coin_corr<-data.frame(coin_corr) coin_corr diff<-pairs.trade(stock_a,stock_b,from=from,to=to,method = 'diff') ratio<-pairs.trade(stock_a,stock_b,from=from,to=to,method = 'ratio') log_ratio<-pairs.trade(stock_a,stock_b,from=from,to=to,method = 'log ratio') Item<-c('Winners', 'number.of.trades', 'winning.percentage' , 'max.profit', 'min.profit' , # my.trade=mytrade, 'avg.wining', 'avg.losing', 'profit') diff<-round(as.numeric(diff),3) ratio<-round(as.numeric(ratio),3) log_ratio<-round(as.numeric(log_ratio),3) data_compare<-cbind(Item,diff,ratio,log_ratio) data_compare<-data.frame(data_compare) data_compare par(mfrow=c(1,1)) plot(diff,ratio,col='Brown',main="Diff vs. Ratio") textxy(diff,ratio,Item) abline(a=0,b=1,col='Green') abline(h=0,v=0) plot(diff,ratio, ylim=c(-5000,5000),xlim=c(-5000,5000),,col='Brown',main="Diff vs. Ratio zoom") textxy(diff,ratio,Item) abline(a=0,b=1,col='Green') abline(h=0,v=0) par(mfrow=c(1,1)) #Testing 8 4 i<-8 pair_stk_1<-as.character(value_sub$stock_1)[i] pair_stk_2<-as.character(value_sub$stock_2)[i] from = "2015-06-01" to= "2016-06-01" pairs.trade(pair_stk_1,pair_stk_2,from=from,to=to) par(mfrow=c(2,1)) mycoin(pair_stk_1,pair_stk_2,from = from ) mycor(pair_stk_1,pair_stk_2,from = from) par(mfrow=c(1,1)) # stock_a<-'SINA' # stock_b<-'BABA' stock_a<-'AMZN' stock_b<-'T' stock_a<-pair_stk_1 stock_b<-pair_stk_2 par(mfrow=c(1,2)) mycor(s1=stock_a,s2=stock_b,from=from,to=to) mycoin(s1=stock_a,s2=stock_b,from = from,to=to) par(mfrow=c(1,1)) # v_p<-volume_price(stock=stock_a,stock1=stock_b,from = from ) # v_p<-data.frame(v_p) # v_p coin_corr<-myvals(stock_a,stock_b,from=from,to=to) coin_corr<-data.frame(coin_corr) coin_corr diff<-pairs.trade(stock_a,stock_b,from=from,to=to,method = 'diff') ratio<-pairs.trade(stock_a,stock_b,from=from,to=to,method = 'ratio') log_ratio<-pairs.trade(stock_a,stock_b,from=from,to=to,method = 'log ratio') Item<-c('Winners', 'number.of.trades', 'winning.percentage' , 'max.profit', 'min.profit' , # my.trade=mytrade, 'avg.wining', 'avg.losing', 'profit') diff<-round(as.numeric(diff),3) ratio<-round(as.numeric(ratio),3) log_ratio<-round(as.numeric(log_ratio),3) data_compare<-cbind(Item,diff,ratio,log_ratio) data_compare<-data.frame(data_compare) data_compare
grow_factors_vec <- grow_factors %>% filter(!str_detect(pufvname, "odd"), !is.na(grow_factor)) growvals <- grow_factors_vec$grow_factor names(growvals) <- grow_factors_vec$pufvname growvals puf <- get_puf_xagg() setdiff(names(puf), names(growvals)) grownames <- intersect(names(puf), names(growvals)) growf <- growvals[grownames] # just those ones we can grow puf2017 <- puf %>% select(RECID, wt, MARS, XTOT, E01500, E02100, grownames) p2 <- puf2017 p2[, grownames] <- t(t(p2[, grownames]) * growf) # double transpose is fast (https://stackoverflow.com/questions/3643555/multiply-rows-of-matrix-by-vector) puf2017[1:10, 1:10] p2[1:10, 1:10] puf2017 <- p2 %>% mutate(wt_2017=wt * grow_factors$grow_factor[grow_factors$pufvname=="N1_odd"]) var <- "E00600" tmp <- puf %>% select(RECID, value=var) %>% left_join(puf2017 %>% select(RECID, value=var), by="RECID") %>% mutate(grow=value.y / value.x) tmp # note that E01000 minimum needs to be limited to negative 3000 comp <- bind_rows(puf %>% select(intersect(names(puf), names(puf2017))) %>% mutate(ftype="puf"), puf2017 %>% mutate(wt=wt_2017) %>% select(intersect(names(puf), names(puf2017))) %>% mutate(ftype="puf2017")) ns(comp) tab <- comp %>% select(-MARS, -RECID) %>% pivot_longer(-c(ftype, wt)) %>% group_by(ftype, name) %>% summarise(value=sum(value * wt) / 1e6) tab %>% pivot_wider(names_from=ftype) %>% mutate(diff=puf2017 - puf, pdiff=diff / puf * 100, apdiff=abs(pdiff)) %>% arrange(-apdiff) #**************************************************************************************************** # prepare puf for Tax-Calculator and run the tc CLI #### #**************************************************************************************************** #.. define the Windows command to call the tc CLI #### # This is an excerpt from a function I wrote # Build a Windows system command that will call the Tax-Calculator CLI. See: # https://pslmodels.github.io/Tax-Calculator/ # CAUTION: must use full dir names, not relative to working directory # CAUTION: any directory names that have spaces in them must be shQuoted # CAUTION: when I updated Anaconda most recently, I had to add # C:\Users\donbo\Anaconda3\Library\bin to the system path for Tax-Calculator to work with the system(cmd) approach # CAUTION: 2013 is the FIRST possible tax year that Tax-Calculator will do # Here is the tc CLI usage: # tc INPUT TAXYEAR [--help] # [--baseline BASELINE] [--reform REFORM] [--assump ASSUMP] # [--exact] [--tables] [--graphs] # [--dump] [--dvars DVARS] [--sqldb] [--outdir OUTDIR] # [--test] [--version] globals puf2017tc <- puf2017 %>% setNames(change_case(names(.))) %>% # Tax-Calculator expects mostly lower-case names do(impose_variable_rules(.)) %>% # not needed for synpuf5 and later do(prime_spouse_splits(.)) system.time(puf2017tc %>% write_csv(paste0(globals$statedir, "puf2017tc.csv"))) # tmp <- puf2017tc %>% select(RECID, e01500, e01700) dvars <- c("c00100", "c62100", "c09600", "c05800", "taxbc") dvars_path <- paste0(globals$statedir, "dumpvars.txt") # "D:/tax_data/tc_testfiles/dumpvars.txt" cat(dvars, file=dvars_path, sep=" ") # write the dvars file cmd1 <- "C:/ProgramData/Anaconda3/Scripts/tc" args <- c(shQuote(paste0(globals$statedir, "puf2017tc.csv")), 2017, "--dump", "--dvars", shQuote(dvars_path), "--outdir", shQuote(globals$statedir)) args <- c(shQuote(paste0(globals$statedir, "puf2017tc.csv")), 2017, "--dump", "--outdir", shQuote(globals$statedir)) cmd1 args #.. run the command ---- a <- proc.time() system2(cmd1, args) # CAUTION: this will overwrite any existing output file that was based on the same input filename! b <- proc.time() b - a # it can easily take 5-10 minutes depending on the size of the input file # system.time(tmp <- read_csv(paste0(globals$statedir, "puf2017tc-17-#-#-#.csv"))) # system.time(tmp %>% write_csv(paste0(globals$statedir, "test.csv"))) # add the calculated variables to puf2017 tcout <- read_csv(paste0(globals$statedir, "puf2017tc-17-#-#-#.csv")) sum(tcout$c00100) # 86,486,641,590 sum(tcout$c00100) # 86,486,641,590 f <- function(var){ c(sum(puf[[str_to_upper(var)]]), sum(puf2017[[str_to_upper(var)]]), sum(tcout[[var]])) } f("e00200") puf2017_full <- puf2017 %>% left_join(tcout, by="RECID") glimpse(puf2017_full) sum(puf$wt) / 1e6 sum(puf$wt * puf$E00100) / 1e9 sum(puf$wt * puf$E00200) / 1e9 sum(puf$wt * puf$E05800) / 1e9 sum(puf2017$wt * puf2017$E00200) / 1e9 sum(puf2017_full$wt) / 1e6 sum(puf2017_full$wt * puf2017_full$c00100) / 1e9 sum(puf2017_full$wt * puf2017_full$E00200) / 1e9 sum(puf2017_full$wt * puf2017_full$taxbc) / 1e9 sum(puf2017_full$wt_2017) / 1e6 sum(puf2017_full$wt_2017 * puf2017_full$c00100) / 1e9 sum(puf2017_full$wt_2017 * puf2017_full$E00200) / 1e9 sum(puf2017_full$wt_2017 * puf2017_full$taxbc) / 1e9 # 2011, 2017 per HT2; per puf, puf2017 w/puf wts, puf2017 w/2017wts # returns 146,455,970, 152,455,900; 145.162, 145.162, 151.1089 # AGI 8,378,794,024, 10,991,386,516 31.2%; 8293, 8774.591, 9134.064 10.1% maybe negative numbers are the problem??? # AGI taxcalc with no growthfactors applied, but only our growth variables - is this the problem # or is it growth of negative numbers?? 7428.786 2017 agi concept 2011 income levels # vs puf 2011 agi concept 2011 income levels 8293 so I am missing some key variables?? # wages 6,072,880,934, 7,557,396,023 24.4% gf18.7%; 6044.771, 7177.065, 7471.091 23.6% # taxbc 1,125,358,637, 1,662,439,370 47.7%; 1099.319, 1279.374, 1331.787 21.1%
/r/temp_next_step.r
no_license
donboyd5/make_state_puf
R
false
false
5,655
r
grow_factors_vec <- grow_factors %>% filter(!str_detect(pufvname, "odd"), !is.na(grow_factor)) growvals <- grow_factors_vec$grow_factor names(growvals) <- grow_factors_vec$pufvname growvals puf <- get_puf_xagg() setdiff(names(puf), names(growvals)) grownames <- intersect(names(puf), names(growvals)) growf <- growvals[grownames] # just those ones we can grow puf2017 <- puf %>% select(RECID, wt, MARS, XTOT, E01500, E02100, grownames) p2 <- puf2017 p2[, grownames] <- t(t(p2[, grownames]) * growf) # double transpose is fast (https://stackoverflow.com/questions/3643555/multiply-rows-of-matrix-by-vector) puf2017[1:10, 1:10] p2[1:10, 1:10] puf2017 <- p2 %>% mutate(wt_2017=wt * grow_factors$grow_factor[grow_factors$pufvname=="N1_odd"]) var <- "E00600" tmp <- puf %>% select(RECID, value=var) %>% left_join(puf2017 %>% select(RECID, value=var), by="RECID") %>% mutate(grow=value.y / value.x) tmp # note that E01000 minimum needs to be limited to negative 3000 comp <- bind_rows(puf %>% select(intersect(names(puf), names(puf2017))) %>% mutate(ftype="puf"), puf2017 %>% mutate(wt=wt_2017) %>% select(intersect(names(puf), names(puf2017))) %>% mutate(ftype="puf2017")) ns(comp) tab <- comp %>% select(-MARS, -RECID) %>% pivot_longer(-c(ftype, wt)) %>% group_by(ftype, name) %>% summarise(value=sum(value * wt) / 1e6) tab %>% pivot_wider(names_from=ftype) %>% mutate(diff=puf2017 - puf, pdiff=diff / puf * 100, apdiff=abs(pdiff)) %>% arrange(-apdiff) #**************************************************************************************************** # prepare puf for Tax-Calculator and run the tc CLI #### #**************************************************************************************************** #.. define the Windows command to call the tc CLI #### # This is an excerpt from a function I wrote # Build a Windows system command that will call the Tax-Calculator CLI. See: # https://pslmodels.github.io/Tax-Calculator/ # CAUTION: must use full dir names, not relative to working directory # CAUTION: any directory names that have spaces in them must be shQuoted # CAUTION: when I updated Anaconda most recently, I had to add # C:\Users\donbo\Anaconda3\Library\bin to the system path for Tax-Calculator to work with the system(cmd) approach # CAUTION: 2013 is the FIRST possible tax year that Tax-Calculator will do # Here is the tc CLI usage: # tc INPUT TAXYEAR [--help] # [--baseline BASELINE] [--reform REFORM] [--assump ASSUMP] # [--exact] [--tables] [--graphs] # [--dump] [--dvars DVARS] [--sqldb] [--outdir OUTDIR] # [--test] [--version] globals puf2017tc <- puf2017 %>% setNames(change_case(names(.))) %>% # Tax-Calculator expects mostly lower-case names do(impose_variable_rules(.)) %>% # not needed for synpuf5 and later do(prime_spouse_splits(.)) system.time(puf2017tc %>% write_csv(paste0(globals$statedir, "puf2017tc.csv"))) # tmp <- puf2017tc %>% select(RECID, e01500, e01700) dvars <- c("c00100", "c62100", "c09600", "c05800", "taxbc") dvars_path <- paste0(globals$statedir, "dumpvars.txt") # "D:/tax_data/tc_testfiles/dumpvars.txt" cat(dvars, file=dvars_path, sep=" ") # write the dvars file cmd1 <- "C:/ProgramData/Anaconda3/Scripts/tc" args <- c(shQuote(paste0(globals$statedir, "puf2017tc.csv")), 2017, "--dump", "--dvars", shQuote(dvars_path), "--outdir", shQuote(globals$statedir)) args <- c(shQuote(paste0(globals$statedir, "puf2017tc.csv")), 2017, "--dump", "--outdir", shQuote(globals$statedir)) cmd1 args #.. run the command ---- a <- proc.time() system2(cmd1, args) # CAUTION: this will overwrite any existing output file that was based on the same input filename! b <- proc.time() b - a # it can easily take 5-10 minutes depending on the size of the input file # system.time(tmp <- read_csv(paste0(globals$statedir, "puf2017tc-17-#-#-#.csv"))) # system.time(tmp %>% write_csv(paste0(globals$statedir, "test.csv"))) # add the calculated variables to puf2017 tcout <- read_csv(paste0(globals$statedir, "puf2017tc-17-#-#-#.csv")) sum(tcout$c00100) # 86,486,641,590 sum(tcout$c00100) # 86,486,641,590 f <- function(var){ c(sum(puf[[str_to_upper(var)]]), sum(puf2017[[str_to_upper(var)]]), sum(tcout[[var]])) } f("e00200") puf2017_full <- puf2017 %>% left_join(tcout, by="RECID") glimpse(puf2017_full) sum(puf$wt) / 1e6 sum(puf$wt * puf$E00100) / 1e9 sum(puf$wt * puf$E00200) / 1e9 sum(puf$wt * puf$E05800) / 1e9 sum(puf2017$wt * puf2017$E00200) / 1e9 sum(puf2017_full$wt) / 1e6 sum(puf2017_full$wt * puf2017_full$c00100) / 1e9 sum(puf2017_full$wt * puf2017_full$E00200) / 1e9 sum(puf2017_full$wt * puf2017_full$taxbc) / 1e9 sum(puf2017_full$wt_2017) / 1e6 sum(puf2017_full$wt_2017 * puf2017_full$c00100) / 1e9 sum(puf2017_full$wt_2017 * puf2017_full$E00200) / 1e9 sum(puf2017_full$wt_2017 * puf2017_full$taxbc) / 1e9 # 2011, 2017 per HT2; per puf, puf2017 w/puf wts, puf2017 w/2017wts # returns 146,455,970, 152,455,900; 145.162, 145.162, 151.1089 # AGI 8,378,794,024, 10,991,386,516 31.2%; 8293, 8774.591, 9134.064 10.1% maybe negative numbers are the problem??? # AGI taxcalc with no growthfactors applied, but only our growth variables - is this the problem # or is it growth of negative numbers?? 7428.786 2017 agi concept 2011 income levels # vs puf 2011 agi concept 2011 income levels 8293 so I am missing some key variables?? # wages 6,072,880,934, 7,557,396,023 24.4% gf18.7%; 6044.771, 7177.065, 7471.091 23.6% # taxbc 1,125,358,637, 1,662,439,370 47.7%; 1099.319, 1279.374, 1331.787 21.1%
coefplot.default <- function(coefs, sds, CI=2, lower.conf.bounds, upper.conf.bounds, varnames=NULL, vertical=TRUE, v.axis=TRUE, h.axis=TRUE, cex.var=0.8, cex.pts=0.9, col.pts=1, pch.pts=20, var.las=2, main=NULL, xlab=NULL, ylab=NULL, mar=c(1,3,5.1,2), plot=TRUE, add=FALSE, offset=0.1, ...) { # collect informations if (is.list(coefs)){ coefs <- unlist(coefs) } n.x <- length(coefs) idx <- seq(1, n.x) #bound <- lower.bound if(!missing(lower.conf.bounds)){ if(length(coefs)!=length(lower.conf.bounds)){ stop("Number of conf.bounds does not equal to number of estimates") } } if(!missing(upper.conf.bounds)){ if(length(coefs)!=length(upper.conf.bounds)){ stop("Number of conf.bounds does not equal to number of estimates") } } if(!missing(sds)){ coefs.h <- coefs + CI*sds coefs.l <- coefs - CI*sds est1 <- cbind(coefs - sds, coefs + sds) est2 <- cbind(coefs - 2*sds, coefs + 2*sds) if(!missing(lower.conf.bounds)){ est1[,1] <- lower.conf.bounds CI <- 1 } if(!missing(upper.conf.bounds)){ est1[,2] <- upper.conf.bounds CI <- 1 } }else{ #coefs.h <- upper.conf.bounds #coefs.l <- lower.conf.bounds est1 <- cbind(coefs, coefs) if(!missing(lower.conf.bounds)){ est1[,1] <- lower.conf.bounds CI <- 1 } if(!missing(upper.conf.bounds)){ est1[,2] <- upper.conf.bounds CI <- 1 } } old.par <- par(no.readonly=TRUE) #on.exit(par(old.par)) min.mar <- par('mar') if (is.null(main)){main <- "Regression Estimates"} if (is.null(xlab)){xlab <- ""} if (is.null(ylab)){ylab <- ""} par(mar = mar) if (is.null(varnames)) { maxchar <- 0 } else{ maxchar <- max(sapply(varnames, nchar)) } # add margin to the axis k <- 1/n.x if(plot){ if (vertical){ mar[2] <- max(min.mar[2], trunc(mar[2] + maxchar/10)) + 0.1 par(mar=mar) if(!add){ plot(c(coefs.l, coefs.h), c(idx+k,idx-k), type="n", axes=F, main=main, xlab=xlab, ylab=ylab,...) if (h.axis){ #axis(1) axis(3) } if (v.axis){ axis(2, n.x:1, varnames[n.x:1], las=var.las, tck=FALSE, lty=0, cex.axis=cex.var) } abline(v=0, lty=2) points(coefs, idx, pch=pch.pts, cex=cex.pts, col=col.pts) if (CI==2){ segments (est1[,1], idx, est1[,2], idx, lwd=2, col=col.pts) segments (est2[,1], idx, est2[,2], idx, lwd=1, col=col.pts) } else{ segments (est1[,1], idx, est1[,2], idx, lwd=1, col=col.pts) } } else{ idx <- idx + offset points(coefs, idx, pch=pch.pts, cex=cex.pts, col=col.pts) if (CI==2){ segments (est1[,1], idx, est1[,2], idx, lwd=2, col=col.pts) segments (est2[,1], idx, est2[,2], idx, lwd=1, col=col.pts) } else{ segments (est1[,1], idx, est1[,2], idx, lwd=1, col=col.pts) } } } # end of if vertical else{ # horizontal mar[1] <- max(min.mar[1], trunc(mar[1] + maxchar/10)) + 0.1 par(mar=mar) if(!add){ plot(c(idx+k,idx-k), c(coefs.l, coefs.h), type="n", axes=F, main=main, xlab=xlab, ylab=ylab,...) if (v.axis){ axis(2, las=var.las) #axis(4, las=var.las) } if (h.axis){ axis(1, 1:n.x, varnames[1:n.x], las=var.las, tck=FALSE, lty=0, cex.axis=cex.var) } abline(h=0, lty=2) points(idx, coefs, pch=pch.pts, cex=cex.pts, col=col.pts) if (CI==2){ segments (idx, est1[,1], idx, est1[,2], lwd=2, col=col.pts) segments (idx, est2[,1], idx, est2[,2], lwd=1, col=col.pts) } else if (CI==1) { segments (idx, est1[,1], idx, est1[,2], lwd=1, col=col.pts) } } else{ idx <- idx + offset points(idx, coefs, pch=pch.pts, cex=cex.pts, col=col.pts) if (CI==2){ segments (idx, est1[,1], idx, est1[,2], lwd=2, col=col.pts) segments (idx, est2[,1], idx, est2[,2], lwd=1, col=col.pts) } else if (CI==1) { segments (idx, est1[,1], idx, est1[,2], lwd=1, col=col.pts) } } } } else{ if (vertical){ mar[2] <- max(min.mar[2], trunc(mar[2] + maxchar/10)) + 0.1 par(mar=mar) plot(c(coefs.l, coefs.h), c(idx+k,idx-k), type="n", axes=F, main="", xlab=xlab, ylab=ylab,...) # if (v.axis){ # axis(2, n.x:1, varnames[n.x:1], las=var.las, tck=FALSE, # lty=0, cex.axis=cex.var) # } } else{ # horizontal mar[1] <- max(min.mar[1], trunc(mar[1] + maxchar/10)) + 0.1 par(mar=mar) plot(c(idx+k,idx-k), c(coefs.l, coefs.h), type="n", axes=F, main=main, xlab=xlab, ylab=ylab,...) #if (h.axis){ # axis(1, 1:n.x, varnames[1:n.x], las=var.las, tck=FALSE, # lty=0, cex.axis=cex.var) # } } } #on.exit(par(old.par)) } setMethod("coefplot", signature(object = "numeric"), function(object, ...) { coefplot.default(object, ...) } ) setMethod("coefplot", signature(object = "lm"), function(object, varnames=NULL, intercept=FALSE, ...) { # collect informations coefs <- summary(object)$coef[,1] sds <- summary(object)$coef[,2] ifelse (is.null(varnames), varnames <- names(coefs), varnames <- varnames) if (length(varnames)!= length(names(coefs))){ stop(message="the length of varnames does not equal the length of predictors. Note: varnames must include a name for constant/intercept") } chk.int <- attr(terms(object), "intercep") if(chk.int & intercept | !chk.int & intercept | !chk.int & !intercept){ intercept <- TRUE coefs <- coefs sds <- sds varnames <- varnames } else if(chk.int & !intercept){ coefs <- coefs[-1] sds <- sds[-1] varnames <- varnames[-1] } # plotting coefplot(coefs, sds, varnames=varnames, ...) } ) setMethod("coefplot", signature(object = "glm"), function(object, varnames=NULL, intercept=FALSE,...) { # collect informations coefs <- summary(object)$coef[,1] sds <- summary(object)$coef[,2] ifelse (is.null(varnames), varnames <- names(coefs), varnames <- varnames) if (length(varnames)!= length(names(coefs))){ stop(message="the length of varnames does not equal the length of predictors. Note: varnames must include a name for constant/intercept") } chk.int <- attr(terms(object), "intercep") if(chk.int & intercept | !chk.int & intercept | !chk.int & !intercept){ intercept <- TRUE coefs <- coefs sds <- sds varnames <- varnames } else if(chk.int & !intercept){ coefs <- coefs[-1] sds <- sds[-1] varnames <- varnames[-1] } # plotting coefplot(coefs, sds, varnames=varnames, ...) } ) setMethod("coefplot", signature(object = "bugs"), function(object, var.idx=NULL, varnames=NULL, CI=1, vertical=TRUE, v.axis=TRUE, h.axis=TRUE, cex.var=0.8, cex.pts=0.9, col.pts=1, pch.pts=20, var.las=2, main=NULL, xlab=NULL, ylab=NULL, plot=TRUE, add=FALSE, offset=.1, mar=c(1,3,5.1,2), ...) { if (is.null(var.idx)){ var.idx <- 1:length(object$summary[,"50%"]) } n.x <- length(var.idx) idx <- 1:n.x coefs <- object$summary[,"50%"][var.idx] if (is.null(varnames)){ varnames <- names(coefs) } if (is.null(main)){main <- "Regression Estimates"} if (is.null(xlab)){xlab <- ""} if (is.null(ylab)){ylab <- ""} min.mar <- par('mar') par(mar=mar) maxchar <- max(sapply(varnames, nchar)) k <- 1/n.x if (CI==1){ CI50.h <- object$summary[,"75%"][var.idx] CI50.l <- object$summary[,"25%"][var.idx] CI50 <- cbind(CI50.l, CI50.h) if (vertical){ mar[2] <- min(min.mar[2], trunc(mar[2] + maxchar/10)) + 0.1 par(mar=mar) if(add){ segments (CI50[,1], idx+offset, CI50[,2], idx+offset, lwd=1, col=col.pts) points(coefs, idx+offset, pch=20, cex=cex.pts, col=col.pts) } else{ plot(c(CI50[,1],CI50[,2]), c(idx+k,idx-k), type="n", axes=F, main=main, xlab=xlab, ylab=ylab, ...) if(plot){ if (h.axis){ axis(3) } if (v.axis){ axis(2, n.x:1, varnames[n.x:1], las=var.las, tck=FALSE, lty=0, cex.axis=cex.var) } abline(v=0, lty=2) segments (CI50[,1], idx, CI50[,2], idx, lwd=1, col=col.pts) points(coefs, idx, pch=20, cex=cex.pts, col=col.pts) } } } else { mar[1] <- min(min.mar[1], trunc(mar[1] + maxchar/10)) + 0.1 par(mar=mar) if(add){ segments (idx+offset, CI50[,1], idx+offset, CI50[,2], lwd=1, col=col.pts) points(idx+offset, coefs, pch=20, cex=cex.pts, col=col.pts) } else{ plot(c(idx+k,idx-k), c(CI50[,1],CI50[,2]), type="n", axes=F, main=main, xlab=xlab, ylab=ylab,...) if(plot){ if (v.axis){ axis(2) } if (h.axis){ axis(1, n.x:1, varnames[n.x:1], las=var.las, tck=FALSE, lty=0, cex.axis=cex.var) } abline(h=0, lty=2) segments (idx, CI50[,1], idx, CI50[,2], lwd=1, col=col.pts) points(idx, coefs, pch=20, cex=cex.pts, col=col.pts) } } } } if (CI==2){ CI50.h <- object$summary[,"75%"][var.idx] CI50.l <- object$summary[,"25%"][var.idx] CI95.h <- object$summary[,"97.5%"][var.idx] CI95.l <- object$summary[,"2.5%"][var.idx] CI50 <- cbind(CI50.l, CI50.h) CI95 <- cbind(CI95.l, CI95.h) if (vertical){ mar[2] <- min(min.mar[2], trunc(mar[2] + maxchar/10)) + 0.1 par(mar=mar) if(add){ segments (CI50[,1], idx+offset, CI50[,2], idx+offset, lwd=2, col=col.pts) segments (CI95[,1], idx+offset, CI95[,2], idx+offset, lwd=1, col=col.pts) points(coefs, idx+offset, pch=20, cex=cex.pts, col=col.pts) } else{ plot(c(CI95[,1],CI95[,2]), c(idx+k,idx-k), type="n", axes=F, main=main, xlab=xlab, ylab=ylab,...) if(plot){ if (h.axis){ axis(3) } if (v.axis){ axis(2, n.x:1, varnames[n.x:1], las=var.las, tck=FALSE, lty=0, cex.axis=cex.var) } abline(v=0, lty=2) segments (CI50[,1], idx, CI50[,2], idx, lwd=2, col=col.pts) segments (CI95[,1], idx, CI95[,2], idx, lwd=1, col=col.pts) points(coefs, idx, pch=20, cex=cex.pts, col=col.pts) } } } else { mar[1] <- min(min.mar[1], trunc(mar[1] + maxchar/10)) + 0.1 par(mar=mar) if(add){ segments (idx+offset, CI50[,1], idx+offset, CI50[,2], lwd=2, col=col.pts) segments (idx+offset, CI95[,1], idx+offset, CI95[,2], lwd=1, col=col.pts) points(idx+offset, coefs, pch=20, cex=cex.pts, col=col.pts) } else{ plot(c(idx+k,idx-k), c(CI95[,1],CI95[,2]), type="n", axes=F, main=main, xlab=xlab, ylab=ylab,...) if(plot){ if (v.axis){ axis(2) } if (h.axis){ axis(1, n.x:1, varnames[n.x:1], las=var.las, tck=FALSE, lty=0, cex.axis=cex.var) } abline(h=0, lty=2) segments (idx, CI50[,1], idx, CI50[,2], lwd=2, col=col.pts) segments (idx, CI95[,1], idx, CI95[,2], lwd=1, col=col.pts) points(idx, coefs, pch=20, cex=cex.pts, col=col.pts) } } } } } ) setMethod("coefplot", signature(object = "polr"), function(object, varnames=NULL,...) { # collect informations coefs <- summary(object)$coef[,1] sds <- summary(object)$coef[,2] ifelse(is.null(varnames), varnames <- names(coefs), varnames <- varnames) # plotting coefplot(coefs, sds, varnames=varnames, ...) } )
/R/coefplot.R
no_license
suyusung/arm
R
false
false
13,862
r
coefplot.default <- function(coefs, sds, CI=2, lower.conf.bounds, upper.conf.bounds, varnames=NULL, vertical=TRUE, v.axis=TRUE, h.axis=TRUE, cex.var=0.8, cex.pts=0.9, col.pts=1, pch.pts=20, var.las=2, main=NULL, xlab=NULL, ylab=NULL, mar=c(1,3,5.1,2), plot=TRUE, add=FALSE, offset=0.1, ...) { # collect informations if (is.list(coefs)){ coefs <- unlist(coefs) } n.x <- length(coefs) idx <- seq(1, n.x) #bound <- lower.bound if(!missing(lower.conf.bounds)){ if(length(coefs)!=length(lower.conf.bounds)){ stop("Number of conf.bounds does not equal to number of estimates") } } if(!missing(upper.conf.bounds)){ if(length(coefs)!=length(upper.conf.bounds)){ stop("Number of conf.bounds does not equal to number of estimates") } } if(!missing(sds)){ coefs.h <- coefs + CI*sds coefs.l <- coefs - CI*sds est1 <- cbind(coefs - sds, coefs + sds) est2 <- cbind(coefs - 2*sds, coefs + 2*sds) if(!missing(lower.conf.bounds)){ est1[,1] <- lower.conf.bounds CI <- 1 } if(!missing(upper.conf.bounds)){ est1[,2] <- upper.conf.bounds CI <- 1 } }else{ #coefs.h <- upper.conf.bounds #coefs.l <- lower.conf.bounds est1 <- cbind(coefs, coefs) if(!missing(lower.conf.bounds)){ est1[,1] <- lower.conf.bounds CI <- 1 } if(!missing(upper.conf.bounds)){ est1[,2] <- upper.conf.bounds CI <- 1 } } old.par <- par(no.readonly=TRUE) #on.exit(par(old.par)) min.mar <- par('mar') if (is.null(main)){main <- "Regression Estimates"} if (is.null(xlab)){xlab <- ""} if (is.null(ylab)){ylab <- ""} par(mar = mar) if (is.null(varnames)) { maxchar <- 0 } else{ maxchar <- max(sapply(varnames, nchar)) } # add margin to the axis k <- 1/n.x if(plot){ if (vertical){ mar[2] <- max(min.mar[2], trunc(mar[2] + maxchar/10)) + 0.1 par(mar=mar) if(!add){ plot(c(coefs.l, coefs.h), c(idx+k,idx-k), type="n", axes=F, main=main, xlab=xlab, ylab=ylab,...) if (h.axis){ #axis(1) axis(3) } if (v.axis){ axis(2, n.x:1, varnames[n.x:1], las=var.las, tck=FALSE, lty=0, cex.axis=cex.var) } abline(v=0, lty=2) points(coefs, idx, pch=pch.pts, cex=cex.pts, col=col.pts) if (CI==2){ segments (est1[,1], idx, est1[,2], idx, lwd=2, col=col.pts) segments (est2[,1], idx, est2[,2], idx, lwd=1, col=col.pts) } else{ segments (est1[,1], idx, est1[,2], idx, lwd=1, col=col.pts) } } else{ idx <- idx + offset points(coefs, idx, pch=pch.pts, cex=cex.pts, col=col.pts) if (CI==2){ segments (est1[,1], idx, est1[,2], idx, lwd=2, col=col.pts) segments (est2[,1], idx, est2[,2], idx, lwd=1, col=col.pts) } else{ segments (est1[,1], idx, est1[,2], idx, lwd=1, col=col.pts) } } } # end of if vertical else{ # horizontal mar[1] <- max(min.mar[1], trunc(mar[1] + maxchar/10)) + 0.1 par(mar=mar) if(!add){ plot(c(idx+k,idx-k), c(coefs.l, coefs.h), type="n", axes=F, main=main, xlab=xlab, ylab=ylab,...) if (v.axis){ axis(2, las=var.las) #axis(4, las=var.las) } if (h.axis){ axis(1, 1:n.x, varnames[1:n.x], las=var.las, tck=FALSE, lty=0, cex.axis=cex.var) } abline(h=0, lty=2) points(idx, coefs, pch=pch.pts, cex=cex.pts, col=col.pts) if (CI==2){ segments (idx, est1[,1], idx, est1[,2], lwd=2, col=col.pts) segments (idx, est2[,1], idx, est2[,2], lwd=1, col=col.pts) } else if (CI==1) { segments (idx, est1[,1], idx, est1[,2], lwd=1, col=col.pts) } } else{ idx <- idx + offset points(idx, coefs, pch=pch.pts, cex=cex.pts, col=col.pts) if (CI==2){ segments (idx, est1[,1], idx, est1[,2], lwd=2, col=col.pts) segments (idx, est2[,1], idx, est2[,2], lwd=1, col=col.pts) } else if (CI==1) { segments (idx, est1[,1], idx, est1[,2], lwd=1, col=col.pts) } } } } else{ if (vertical){ mar[2] <- max(min.mar[2], trunc(mar[2] + maxchar/10)) + 0.1 par(mar=mar) plot(c(coefs.l, coefs.h), c(idx+k,idx-k), type="n", axes=F, main="", xlab=xlab, ylab=ylab,...) # if (v.axis){ # axis(2, n.x:1, varnames[n.x:1], las=var.las, tck=FALSE, # lty=0, cex.axis=cex.var) # } } else{ # horizontal mar[1] <- max(min.mar[1], trunc(mar[1] + maxchar/10)) + 0.1 par(mar=mar) plot(c(idx+k,idx-k), c(coefs.l, coefs.h), type="n", axes=F, main=main, xlab=xlab, ylab=ylab,...) #if (h.axis){ # axis(1, 1:n.x, varnames[1:n.x], las=var.las, tck=FALSE, # lty=0, cex.axis=cex.var) # } } } #on.exit(par(old.par)) } setMethod("coefplot", signature(object = "numeric"), function(object, ...) { coefplot.default(object, ...) } ) setMethod("coefplot", signature(object = "lm"), function(object, varnames=NULL, intercept=FALSE, ...) { # collect informations coefs <- summary(object)$coef[,1] sds <- summary(object)$coef[,2] ifelse (is.null(varnames), varnames <- names(coefs), varnames <- varnames) if (length(varnames)!= length(names(coefs))){ stop(message="the length of varnames does not equal the length of predictors. Note: varnames must include a name for constant/intercept") } chk.int <- attr(terms(object), "intercep") if(chk.int & intercept | !chk.int & intercept | !chk.int & !intercept){ intercept <- TRUE coefs <- coefs sds <- sds varnames <- varnames } else if(chk.int & !intercept){ coefs <- coefs[-1] sds <- sds[-1] varnames <- varnames[-1] } # plotting coefplot(coefs, sds, varnames=varnames, ...) } ) setMethod("coefplot", signature(object = "glm"), function(object, varnames=NULL, intercept=FALSE,...) { # collect informations coefs <- summary(object)$coef[,1] sds <- summary(object)$coef[,2] ifelse (is.null(varnames), varnames <- names(coefs), varnames <- varnames) if (length(varnames)!= length(names(coefs))){ stop(message="the length of varnames does not equal the length of predictors. Note: varnames must include a name for constant/intercept") } chk.int <- attr(terms(object), "intercep") if(chk.int & intercept | !chk.int & intercept | !chk.int & !intercept){ intercept <- TRUE coefs <- coefs sds <- sds varnames <- varnames } else if(chk.int & !intercept){ coefs <- coefs[-1] sds <- sds[-1] varnames <- varnames[-1] } # plotting coefplot(coefs, sds, varnames=varnames, ...) } ) setMethod("coefplot", signature(object = "bugs"), function(object, var.idx=NULL, varnames=NULL, CI=1, vertical=TRUE, v.axis=TRUE, h.axis=TRUE, cex.var=0.8, cex.pts=0.9, col.pts=1, pch.pts=20, var.las=2, main=NULL, xlab=NULL, ylab=NULL, plot=TRUE, add=FALSE, offset=.1, mar=c(1,3,5.1,2), ...) { if (is.null(var.idx)){ var.idx <- 1:length(object$summary[,"50%"]) } n.x <- length(var.idx) idx <- 1:n.x coefs <- object$summary[,"50%"][var.idx] if (is.null(varnames)){ varnames <- names(coefs) } if (is.null(main)){main <- "Regression Estimates"} if (is.null(xlab)){xlab <- ""} if (is.null(ylab)){ylab <- ""} min.mar <- par('mar') par(mar=mar) maxchar <- max(sapply(varnames, nchar)) k <- 1/n.x if (CI==1){ CI50.h <- object$summary[,"75%"][var.idx] CI50.l <- object$summary[,"25%"][var.idx] CI50 <- cbind(CI50.l, CI50.h) if (vertical){ mar[2] <- min(min.mar[2], trunc(mar[2] + maxchar/10)) + 0.1 par(mar=mar) if(add){ segments (CI50[,1], idx+offset, CI50[,2], idx+offset, lwd=1, col=col.pts) points(coefs, idx+offset, pch=20, cex=cex.pts, col=col.pts) } else{ plot(c(CI50[,1],CI50[,2]), c(idx+k,idx-k), type="n", axes=F, main=main, xlab=xlab, ylab=ylab, ...) if(plot){ if (h.axis){ axis(3) } if (v.axis){ axis(2, n.x:1, varnames[n.x:1], las=var.las, tck=FALSE, lty=0, cex.axis=cex.var) } abline(v=0, lty=2) segments (CI50[,1], idx, CI50[,2], idx, lwd=1, col=col.pts) points(coefs, idx, pch=20, cex=cex.pts, col=col.pts) } } } else { mar[1] <- min(min.mar[1], trunc(mar[1] + maxchar/10)) + 0.1 par(mar=mar) if(add){ segments (idx+offset, CI50[,1], idx+offset, CI50[,2], lwd=1, col=col.pts) points(idx+offset, coefs, pch=20, cex=cex.pts, col=col.pts) } else{ plot(c(idx+k,idx-k), c(CI50[,1],CI50[,2]), type="n", axes=F, main=main, xlab=xlab, ylab=ylab,...) if(plot){ if (v.axis){ axis(2) } if (h.axis){ axis(1, n.x:1, varnames[n.x:1], las=var.las, tck=FALSE, lty=0, cex.axis=cex.var) } abline(h=0, lty=2) segments (idx, CI50[,1], idx, CI50[,2], lwd=1, col=col.pts) points(idx, coefs, pch=20, cex=cex.pts, col=col.pts) } } } } if (CI==2){ CI50.h <- object$summary[,"75%"][var.idx] CI50.l <- object$summary[,"25%"][var.idx] CI95.h <- object$summary[,"97.5%"][var.idx] CI95.l <- object$summary[,"2.5%"][var.idx] CI50 <- cbind(CI50.l, CI50.h) CI95 <- cbind(CI95.l, CI95.h) if (vertical){ mar[2] <- min(min.mar[2], trunc(mar[2] + maxchar/10)) + 0.1 par(mar=mar) if(add){ segments (CI50[,1], idx+offset, CI50[,2], idx+offset, lwd=2, col=col.pts) segments (CI95[,1], idx+offset, CI95[,2], idx+offset, lwd=1, col=col.pts) points(coefs, idx+offset, pch=20, cex=cex.pts, col=col.pts) } else{ plot(c(CI95[,1],CI95[,2]), c(idx+k,idx-k), type="n", axes=F, main=main, xlab=xlab, ylab=ylab,...) if(plot){ if (h.axis){ axis(3) } if (v.axis){ axis(2, n.x:1, varnames[n.x:1], las=var.las, tck=FALSE, lty=0, cex.axis=cex.var) } abline(v=0, lty=2) segments (CI50[,1], idx, CI50[,2], idx, lwd=2, col=col.pts) segments (CI95[,1], idx, CI95[,2], idx, lwd=1, col=col.pts) points(coefs, idx, pch=20, cex=cex.pts, col=col.pts) } } } else { mar[1] <- min(min.mar[1], trunc(mar[1] + maxchar/10)) + 0.1 par(mar=mar) if(add){ segments (idx+offset, CI50[,1], idx+offset, CI50[,2], lwd=2, col=col.pts) segments (idx+offset, CI95[,1], idx+offset, CI95[,2], lwd=1, col=col.pts) points(idx+offset, coefs, pch=20, cex=cex.pts, col=col.pts) } else{ plot(c(idx+k,idx-k), c(CI95[,1],CI95[,2]), type="n", axes=F, main=main, xlab=xlab, ylab=ylab,...) if(plot){ if (v.axis){ axis(2) } if (h.axis){ axis(1, n.x:1, varnames[n.x:1], las=var.las, tck=FALSE, lty=0, cex.axis=cex.var) } abline(h=0, lty=2) segments (idx, CI50[,1], idx, CI50[,2], lwd=2, col=col.pts) segments (idx, CI95[,1], idx, CI95[,2], lwd=1, col=col.pts) points(idx, coefs, pch=20, cex=cex.pts, col=col.pts) } } } } } ) setMethod("coefplot", signature(object = "polr"), function(object, varnames=NULL,...) { # collect informations coefs <- summary(object)$coef[,1] sds <- summary(object)$coef[,2] ifelse(is.null(varnames), varnames <- names(coefs), varnames <- varnames) # plotting coefplot(coefs, sds, varnames=varnames, ...) } )
# batch_maxcovr_dist_mat.R # Use max_covrage function to solve the MCLP with different facilitiy number. Write the result to a RDS file and a csv file. maxcovr_batch_dist_mat <- function(file_demand, file_facility_site, file_dist_mat, vec_n_facility, dist_service, name_case){ # Solving a list of MCLPs with different facility numbers on a case study. Using the maxcovr package. Write the maxcovr objects to a file. # Args: # file_demand: the csv file containing the data frame of demand # file_facility_site: the csv file constaining the data frame of facility sites # file_distance_mat: the csv file containing the data frame of distance matrix. The required columns include distance, name (i.e. potential facility ID), DestinationName (i.e. demand ID), demand # vec_n_facility: a vector of facility numbers # dist_service: the maximal service distance (in metres) # name_case: the name of the case # # Outputs: # a .Rdata file: containing the maxcovr objects # a .csv file: containing the testing result # Returns: # None require(dplyr) require(purrr) require(data.table) # install the maxcovr if necessary if(!require(maxcovr)){ devtools::install_github('huanfachen/maxcovr', force = TRUE) require(maxcovr) } # df_demand <- file_demand %>% data.table::fread() # df_site <- file_facility_site %>% data.table::fread() # read distance matrix file. Convert to a matrix in R. Remember to set the class df_dist <- file_dist_mat %>% data.table::fread(colClasses = c("name" = "character", "DestinationName" = "character")) dist_mat <- xtabs(df_dist$distance ~ df_dist$name + df_dist$DestinationName) # fake existing facility df_facility_fake <- data.frame(long = NA, lat = NA, weight = NA) # proposed facility df_site <- data.frame(long = NA, lat = NA, id = rownames(dist_mat)) # coerce the same order of demand id in the columns of distance matrix and df_demand df_demand <- df_dist %>% select(id = DestinationName, weight = demand) %>% distinct(id, .keep_all = TRUE) %>% arrange(factor(id, levels = colnames(dist_mat))) %>% mutate(long = NA, lat = NA) # assert: data frames must contain two columns: long, lat if(!all(c("long","lat") %in% names(df_demand))){ stop("Error: Demand data must contain long and lat") } if(!all(c("long","lat") %in% names(df_site))){ stop("Error: Facility site data must contain long and lat") } # run multiple MCLPs list_mclp_res <- purrr::map(vec_n_facility, ~ solve_mclp_record_time(existing_facility = df_facility_fake, proposed_facility = df_site, user = df_demand, distance_cutoff = dist_service, d_proposed_user = dist_mat, n_added = .x) ) names(list_mclp_res) <- vec_n_facility # save results: a .rdata file, a csv file # filename: MCLP_case_YYMMDD_HHMM.rds/csv time_stamp <- format(Sys.time(), "%Y%m%d_%H%M") file_prefix = paste0("MCLP","_", name_case, "_", time_stamp) file_rds = paste0(file_prefix, ".rds") file_csv = paste0(file_prefix, ".csv") save(list_mclp_res, file = file_rds) df_res <- purrr::map_dfr(list_mclp_res, magrittr::extract, c("n_facility", "demand_coverage", "comp_sec")) df_res %>% as.data.frame() %>% write.csv(file_csv) print(paste0("Saving results to RDS file: ", file_rds)) print(paste0("Saving results to CSV file: ", file_csv)) } solve_mclp_record_time <- function(...){ time_comp <- system.time( mclp_sol <- maxcovr::max_coverage_weighted(...) ) # record the system computation time, in seconds mclp_sol$comp_sec <- time_comp[2] mclp_sol$demand_coverage <- percent_weight_coverage(mclp_sol) mclp_sol$n_facility <- mclp_sol$model_coverage[[1]]$n_added mclp_sol } # compute the percent of demand weight covered # Compute the percent of covered weights percent_weight_coverage <- function(mc_result){ mc_result$user_affected[[1]]$weight %>% sum() / (mc_result$augmented_users[[1]]$weight %>% sum()) * 100.0 }
/Maxcovr/batch_maxcovr_dist_mat.R
permissive
huanfachen/Open_source_location_cover_models
R
false
false
4,630
r
# batch_maxcovr_dist_mat.R # Use max_covrage function to solve the MCLP with different facilitiy number. Write the result to a RDS file and a csv file. maxcovr_batch_dist_mat <- function(file_demand, file_facility_site, file_dist_mat, vec_n_facility, dist_service, name_case){ # Solving a list of MCLPs with different facility numbers on a case study. Using the maxcovr package. Write the maxcovr objects to a file. # Args: # file_demand: the csv file containing the data frame of demand # file_facility_site: the csv file constaining the data frame of facility sites # file_distance_mat: the csv file containing the data frame of distance matrix. The required columns include distance, name (i.e. potential facility ID), DestinationName (i.e. demand ID), demand # vec_n_facility: a vector of facility numbers # dist_service: the maximal service distance (in metres) # name_case: the name of the case # # Outputs: # a .Rdata file: containing the maxcovr objects # a .csv file: containing the testing result # Returns: # None require(dplyr) require(purrr) require(data.table) # install the maxcovr if necessary if(!require(maxcovr)){ devtools::install_github('huanfachen/maxcovr', force = TRUE) require(maxcovr) } # df_demand <- file_demand %>% data.table::fread() # df_site <- file_facility_site %>% data.table::fread() # read distance matrix file. Convert to a matrix in R. Remember to set the class df_dist <- file_dist_mat %>% data.table::fread(colClasses = c("name" = "character", "DestinationName" = "character")) dist_mat <- xtabs(df_dist$distance ~ df_dist$name + df_dist$DestinationName) # fake existing facility df_facility_fake <- data.frame(long = NA, lat = NA, weight = NA) # proposed facility df_site <- data.frame(long = NA, lat = NA, id = rownames(dist_mat)) # coerce the same order of demand id in the columns of distance matrix and df_demand df_demand <- df_dist %>% select(id = DestinationName, weight = demand) %>% distinct(id, .keep_all = TRUE) %>% arrange(factor(id, levels = colnames(dist_mat))) %>% mutate(long = NA, lat = NA) # assert: data frames must contain two columns: long, lat if(!all(c("long","lat") %in% names(df_demand))){ stop("Error: Demand data must contain long and lat") } if(!all(c("long","lat") %in% names(df_site))){ stop("Error: Facility site data must contain long and lat") } # run multiple MCLPs list_mclp_res <- purrr::map(vec_n_facility, ~ solve_mclp_record_time(existing_facility = df_facility_fake, proposed_facility = df_site, user = df_demand, distance_cutoff = dist_service, d_proposed_user = dist_mat, n_added = .x) ) names(list_mclp_res) <- vec_n_facility # save results: a .rdata file, a csv file # filename: MCLP_case_YYMMDD_HHMM.rds/csv time_stamp <- format(Sys.time(), "%Y%m%d_%H%M") file_prefix = paste0("MCLP","_", name_case, "_", time_stamp) file_rds = paste0(file_prefix, ".rds") file_csv = paste0(file_prefix, ".csv") save(list_mclp_res, file = file_rds) df_res <- purrr::map_dfr(list_mclp_res, magrittr::extract, c("n_facility", "demand_coverage", "comp_sec")) df_res %>% as.data.frame() %>% write.csv(file_csv) print(paste0("Saving results to RDS file: ", file_rds)) print(paste0("Saving results to CSV file: ", file_csv)) } solve_mclp_record_time <- function(...){ time_comp <- system.time( mclp_sol <- maxcovr::max_coverage_weighted(...) ) # record the system computation time, in seconds mclp_sol$comp_sec <- time_comp[2] mclp_sol$demand_coverage <- percent_weight_coverage(mclp_sol) mclp_sol$n_facility <- mclp_sol$model_coverage[[1]]$n_added mclp_sol } # compute the percent of demand weight covered # Compute the percent of covered weights percent_weight_coverage <- function(mc_result){ mc_result$user_affected[[1]]$weight %>% sum() / (mc_result$augmented_users[[1]]$weight %>% sum()) * 100.0 }
# ------------------------------------------------------------------------------ # Name of Quantlet: MMSTATsample_param # ------------------------------------------------------------------------------ # Published in: MMSTAT # ------------------------------------------------------------------------------ # Description: Shows estimated parameters for univariate data sample. # The user can interactively choose the parameter that is estimated # (mean, median, standard deviation, interquartile range) and the sample size. # Also, variables of the data sets CARS, USCRIME and BOSTONHOUSING are available. # The upper panel shows a a histogram of the parameter estimates of # all previously drawn samples. # The lower panel shows a scatterplot of the whole population (green) and # the current sample (orange). A box indicates the interquartile range and # the mean. # ------------------------------------------------------------------------------ # Keywords: plot, scatterplot, histogram, boxplot, mean, median, quantile, # visualization, data visualization, parameter, interactive, # uscrime, standard deviation, sampling, empirical, estimation, # distribution # ------------------------------------------------------------------------------ # Usage: MMSTAThelper_function # ------------------------------------------------------------------------------ # Output: Interactive shiny application # ------------------------------------------------------------------------------ # Example: Uses the variable POPULATION of the USCRIME data set. # It shows the histogram of the mean estimates in the upper panel # and the comparison of the population and the sample in the lower # panel. # ------------------------------------------------------------------------------ # See also: BCS_Hist1, BCS_Hist2, MSRsca_bmw_vw, BCS_Boxplot, # MMSTATtime_series_1, MMSTATlinreg, MMSTATconfmean, # MMSTATconfi_sigma, MMSTATassociation, MMSTAThelper_function # ------------------------------------------------------------------------------ # Author : Sigbert Klinke # ------------------------------------------------------------------------------ # Code Editor: Yafei Xu # ------------------------------------------------------------------------------ # Datafiles: CARS.rds, USCRIME.rds, BOSTONHOUSING.rds # ------------------------------------------------------------------------------ # please use "Esc" key to jump out of the Shiny app rm(list = ls(all = TRUE)) graphics.off() # please set working directory setwd('C:/...') # setwd('~/...') # linux/mac os # setwd('/Users/...') # windows source("MMSTAThelper_function.r") ############################### SUBROUTINES ################################## ### server ################################################################### dpc = gettext(c("MEAN", "MEDIAN", "STDDEV", "IQR"), "name") mmstat$vartype = "numvars" mmstat.ui.elem("param", "selectInput", label = gettext("Select parameter"), choices = dpc, value = "MEAN") mmstat.ui.elem("size", "sampleSize") mmstat.ui.elem("go", "drawSample") mmstat.ui.elem("speed", "speedSlider") mmstat.ui.elem("dataset", "dataSet", choices = mmstat.getDataNames("USCRIME", "CARS", "BOSTONHOUSING")) mmstat.ui.elem("variable", "variable1", vartype = "numeric") mmstat.ui.elem("cex", "fontSize") param = c() drawIqrBoxWithPoints = function(x, jitter, ylim, box.param = NULL, points.param = NULL) { if (is.list(points.param) || is.null(points.param) || points.param) { points.param$x = x points.param$y = ylim[1] + diff(ylim) * jitter suppressWarnings(do.call("points", points.param)) } if (is.list(box.param) || is.null(box.param) || box.param) { q = quantile(x, c(0.25, 0.5, 0.75), na.rm = T) box.param$xleft = q[1] box.param$xright = q[3] box.param$ybottom = ylim[1] box.param$ytop = ylim[2] suppressWarnings(do.call("rect", box.param)) box.param$x = c(q[2], q[2]) box.param$y = ylim if (!is.null(box.param$border)) box.param$col = box.param$border suppressWarnings(do.call("lines", box.param)) } } server = shinyServer(function(input, output, session) { output$paramUI = renderUI({ mmstat.ui.call("param") }) output$goUI = renderUI({ mmstat.ui.call("go") }) output$speedUI = renderUI({ mmstat.ui.call("speed") }) output$datasetUI = renderUI({ mmstat.ui.call("dataset") }) output$cexUI = renderUI({ mmstat.ui.call("cex") }) output$variableUI = renderUI({ inp = mmstat.getValues(NULL, dataset = input$dataset) mmstat.ui.call("variable", choices = mmstat.getVarNames(inp$dataset, "numeric")) }) output$sizeUI = renderUI({ var = getVar() mmstat.ui.call("size", ticks = var$ticks, max = length(var$ticks)) }) getVar = reactive({ inp = mmstat.getValues(NULL, dataset = input$dataset, variable = input$variable) var = mmstat.getVar(inp$dataset, inp$variable) var$ticks = mmstat.ticks(var$n, nmin = 30) dec = mmstat.dec(c(var$mean, var$median)) var$decimal = dec$decimal var[["pos"]] = 2 * (var$mean < var$median) param <<- c() var }) getSize = reactive({ var = getVar() inp = mmstat.getValues(NULL, param = input$param, size = input$size) if (inp$param == "MEAN") param <<- var$mean if (inp$param == "MEDIAN") param <<- var$median if (inp$param == "STDDEV") param <<- var$sd if (inp$param == "IQR") param <<- var$iqr var$ticks[inp$size] }) drawSample = reactive({ input$go inp = mmstat.getValues(NULL, speed = input$speed, param = input$param) if (inp$speed > 0) invalidateLater(500/inp$speed, session) var = getVar() repeat { # ensure at least two samples size = getSize() index = sample(var$n, size = size, replace = T) sample = var$values[index] if (inp$param == "MEAN") param <<- c(param, mean(sample)) if (inp$param == "MEDIAN") param <<- c(param, median(sample)) if (inp$param == "STDDEV") param <<- c(param, sd(sample)) if (inp$param == "IQR") param <<- c(param, IQR(sample)) if (length(param) > 2) break } index }) output$samplePlot = renderPlot({ mmstat.log(sprintf("samplePlot %s", input$param)) var = getVar() inp = mmstat.getValues(NULL, param = input$param, cex = input$cex) drawSample() par(mar = c(5, 0, 2, 0)) hist(param, breaks = "Scott", freq = F, axes = F, xlab = var$xlab, ylab = "", main = sprintf(gettext("Histogram and %s based on %.0f samples of size n=%.0f"), inp$param, length(param), getSize()), cex.axis = inp$cex, cex.lab = inp$cex, cex.main = 1.2 * inp$cex, cex.sub = inp$cex) rug(param) usr = par("usr") mmstat.axis(1, usr[1:2], cex.axis = inp$cex) box() if (inp$param == "MEAN") { lty = "dotted" col = mmstat$col[[1]] abline(v = var$mean, lwd = 3, lty = lty, col = col) text(var$mean, 0.95 * usr[4], pos = 4, sprintf("%.*f", var$dec, var$mean), col = col, cex = inp$cex) } if (inp$param == "MEDIAN") { lty = "dashed" col = mmstat$col[[3]] abline(v = var$median, lwd = 3, lty = lty, col = col) text(var$median, 0.95 * usr[4], pos = 4, sprintf("%.*f", var$dec, var$median), col = col, cex = inp$cex) } if (inp$param == "STDDEV") { lty = "dotted" col = mmstat$col[[2]] abline(v = var$sd, lwd = 3, lty = lty, col = col) text(var$sd, 0.95 * usr[4], pos = 4, sprintf("%.*f", var$dec, var$sd), col = col, cex = inp$cex) } if (inp$param == "IQR") { lty = "dashed" col = mmstat$col[[4]] abline(v = diff(var$quart), lwd = 3, lty = lty, col = col) text(diff(var$quart), 0.95 * usr[4], pos = 4, sprintf("%.*f", var$dec, diff(var$quart)), col = col, cex = inp$cex) } }) output$outputSamplePlot = renderPlot({ var = getVar() index = drawSample() inp = mmstat.getValues(NULL, cex = input$cex, param = input$param) par(mar = c(5, 0, 2, 0)) plot(range(var$values), c(-0.05, 1), type = "n", axes = F, main = gettext("Population and sample"), xlab = var$xlab, sub = var$sub, cex.axis = inp$cex, cex.lab = inp$cex, cex.main = 1.2 * inp$cex, cex.sub = inp$cex) usr = par("usr") mmstat.axis(1, usr[1:2], cex.axis = inp$cex) drawIqrBoxWithPoints(var$values, var$jitter, ylim = c(0, 0.45), box.param = list(border = mmstat$col[[1]], lwd = 2), points.param = list(col = mmstat$col[[9]], pch = 19, cex = 0.5 * inp$cex)) drawIqrBoxWithPoints(var$values[index], var$jitter[index], ylim = 0.5 + c(0, 0.45 * sqrt(length(index)/var$n)), box.param = list(border = mmstat$col[[2]], lwd = 2), points.param = list(col = mmstat$col[[10]], pch = 19, cex = 0.5 * inp$cex)) box() }) output$logText = renderText({ mmstat.getLog(session) }) }) ############################### SUBROUTINES ################################## ### ui ####################################################################### ui = shinyUI(fluidPage( div(class="navbar navbar-static-top", div(class = "navbar-inner", fluidRow(column(4, div(class = "brand pull-left", gettext("Distribution of sample parameters"))), column(2, checkboxInput("showsample", gettext("Sample parameter"), TRUE)), column(2, checkboxInput("showspeed", gettext("Specify speed"), FALSE)), column(2, checkboxInput("showdata", gettext("Data choice"), FALSE)), column(2, checkboxInput("showoptions", gettext("Options"), FALSE))))), sidebarLayout( sidebarPanel( conditionalPanel( condition = 'input.showsample', uiOutput("paramUI"), br(), uiOutput("sizeUI"), br(), uiOutput("goUI") ), conditionalPanel( condition = 'input.showspeed', br(), uiOutput("speedUI") ), conditionalPanel( condition = 'input.showdata', hr(), uiOutput("datasetUI"), uiOutput("variableUI") ), conditionalPanel( condition = 'input.showoptions', hr(), uiOutput("cexUI") ) ), mainPanel(plotOutput("samplePlot"), plotOutput("outputSamplePlot", height = "200px"))), htmlOutput("logText") )) ############################### SUBROUTINES ################################## ### shinyApp ################################################################# shinyApp(ui = ui, server = server)
/MMSTATsample_param/MMSTATsample_param.r
no_license
QuantLet/MMSTAT
R
false
false
12,150
r
# ------------------------------------------------------------------------------ # Name of Quantlet: MMSTATsample_param # ------------------------------------------------------------------------------ # Published in: MMSTAT # ------------------------------------------------------------------------------ # Description: Shows estimated parameters for univariate data sample. # The user can interactively choose the parameter that is estimated # (mean, median, standard deviation, interquartile range) and the sample size. # Also, variables of the data sets CARS, USCRIME and BOSTONHOUSING are available. # The upper panel shows a a histogram of the parameter estimates of # all previously drawn samples. # The lower panel shows a scatterplot of the whole population (green) and # the current sample (orange). A box indicates the interquartile range and # the mean. # ------------------------------------------------------------------------------ # Keywords: plot, scatterplot, histogram, boxplot, mean, median, quantile, # visualization, data visualization, parameter, interactive, # uscrime, standard deviation, sampling, empirical, estimation, # distribution # ------------------------------------------------------------------------------ # Usage: MMSTAThelper_function # ------------------------------------------------------------------------------ # Output: Interactive shiny application # ------------------------------------------------------------------------------ # Example: Uses the variable POPULATION of the USCRIME data set. # It shows the histogram of the mean estimates in the upper panel # and the comparison of the population and the sample in the lower # panel. # ------------------------------------------------------------------------------ # See also: BCS_Hist1, BCS_Hist2, MSRsca_bmw_vw, BCS_Boxplot, # MMSTATtime_series_1, MMSTATlinreg, MMSTATconfmean, # MMSTATconfi_sigma, MMSTATassociation, MMSTAThelper_function # ------------------------------------------------------------------------------ # Author : Sigbert Klinke # ------------------------------------------------------------------------------ # Code Editor: Yafei Xu # ------------------------------------------------------------------------------ # Datafiles: CARS.rds, USCRIME.rds, BOSTONHOUSING.rds # ------------------------------------------------------------------------------ # please use "Esc" key to jump out of the Shiny app rm(list = ls(all = TRUE)) graphics.off() # please set working directory setwd('C:/...') # setwd('~/...') # linux/mac os # setwd('/Users/...') # windows source("MMSTAThelper_function.r") ############################### SUBROUTINES ################################## ### server ################################################################### dpc = gettext(c("MEAN", "MEDIAN", "STDDEV", "IQR"), "name") mmstat$vartype = "numvars" mmstat.ui.elem("param", "selectInput", label = gettext("Select parameter"), choices = dpc, value = "MEAN") mmstat.ui.elem("size", "sampleSize") mmstat.ui.elem("go", "drawSample") mmstat.ui.elem("speed", "speedSlider") mmstat.ui.elem("dataset", "dataSet", choices = mmstat.getDataNames("USCRIME", "CARS", "BOSTONHOUSING")) mmstat.ui.elem("variable", "variable1", vartype = "numeric") mmstat.ui.elem("cex", "fontSize") param = c() drawIqrBoxWithPoints = function(x, jitter, ylim, box.param = NULL, points.param = NULL) { if (is.list(points.param) || is.null(points.param) || points.param) { points.param$x = x points.param$y = ylim[1] + diff(ylim) * jitter suppressWarnings(do.call("points", points.param)) } if (is.list(box.param) || is.null(box.param) || box.param) { q = quantile(x, c(0.25, 0.5, 0.75), na.rm = T) box.param$xleft = q[1] box.param$xright = q[3] box.param$ybottom = ylim[1] box.param$ytop = ylim[2] suppressWarnings(do.call("rect", box.param)) box.param$x = c(q[2], q[2]) box.param$y = ylim if (!is.null(box.param$border)) box.param$col = box.param$border suppressWarnings(do.call("lines", box.param)) } } server = shinyServer(function(input, output, session) { output$paramUI = renderUI({ mmstat.ui.call("param") }) output$goUI = renderUI({ mmstat.ui.call("go") }) output$speedUI = renderUI({ mmstat.ui.call("speed") }) output$datasetUI = renderUI({ mmstat.ui.call("dataset") }) output$cexUI = renderUI({ mmstat.ui.call("cex") }) output$variableUI = renderUI({ inp = mmstat.getValues(NULL, dataset = input$dataset) mmstat.ui.call("variable", choices = mmstat.getVarNames(inp$dataset, "numeric")) }) output$sizeUI = renderUI({ var = getVar() mmstat.ui.call("size", ticks = var$ticks, max = length(var$ticks)) }) getVar = reactive({ inp = mmstat.getValues(NULL, dataset = input$dataset, variable = input$variable) var = mmstat.getVar(inp$dataset, inp$variable) var$ticks = mmstat.ticks(var$n, nmin = 30) dec = mmstat.dec(c(var$mean, var$median)) var$decimal = dec$decimal var[["pos"]] = 2 * (var$mean < var$median) param <<- c() var }) getSize = reactive({ var = getVar() inp = mmstat.getValues(NULL, param = input$param, size = input$size) if (inp$param == "MEAN") param <<- var$mean if (inp$param == "MEDIAN") param <<- var$median if (inp$param == "STDDEV") param <<- var$sd if (inp$param == "IQR") param <<- var$iqr var$ticks[inp$size] }) drawSample = reactive({ input$go inp = mmstat.getValues(NULL, speed = input$speed, param = input$param) if (inp$speed > 0) invalidateLater(500/inp$speed, session) var = getVar() repeat { # ensure at least two samples size = getSize() index = sample(var$n, size = size, replace = T) sample = var$values[index] if (inp$param == "MEAN") param <<- c(param, mean(sample)) if (inp$param == "MEDIAN") param <<- c(param, median(sample)) if (inp$param == "STDDEV") param <<- c(param, sd(sample)) if (inp$param == "IQR") param <<- c(param, IQR(sample)) if (length(param) > 2) break } index }) output$samplePlot = renderPlot({ mmstat.log(sprintf("samplePlot %s", input$param)) var = getVar() inp = mmstat.getValues(NULL, param = input$param, cex = input$cex) drawSample() par(mar = c(5, 0, 2, 0)) hist(param, breaks = "Scott", freq = F, axes = F, xlab = var$xlab, ylab = "", main = sprintf(gettext("Histogram and %s based on %.0f samples of size n=%.0f"), inp$param, length(param), getSize()), cex.axis = inp$cex, cex.lab = inp$cex, cex.main = 1.2 * inp$cex, cex.sub = inp$cex) rug(param) usr = par("usr") mmstat.axis(1, usr[1:2], cex.axis = inp$cex) box() if (inp$param == "MEAN") { lty = "dotted" col = mmstat$col[[1]] abline(v = var$mean, lwd = 3, lty = lty, col = col) text(var$mean, 0.95 * usr[4], pos = 4, sprintf("%.*f", var$dec, var$mean), col = col, cex = inp$cex) } if (inp$param == "MEDIAN") { lty = "dashed" col = mmstat$col[[3]] abline(v = var$median, lwd = 3, lty = lty, col = col) text(var$median, 0.95 * usr[4], pos = 4, sprintf("%.*f", var$dec, var$median), col = col, cex = inp$cex) } if (inp$param == "STDDEV") { lty = "dotted" col = mmstat$col[[2]] abline(v = var$sd, lwd = 3, lty = lty, col = col) text(var$sd, 0.95 * usr[4], pos = 4, sprintf("%.*f", var$dec, var$sd), col = col, cex = inp$cex) } if (inp$param == "IQR") { lty = "dashed" col = mmstat$col[[4]] abline(v = diff(var$quart), lwd = 3, lty = lty, col = col) text(diff(var$quart), 0.95 * usr[4], pos = 4, sprintf("%.*f", var$dec, diff(var$quart)), col = col, cex = inp$cex) } }) output$outputSamplePlot = renderPlot({ var = getVar() index = drawSample() inp = mmstat.getValues(NULL, cex = input$cex, param = input$param) par(mar = c(5, 0, 2, 0)) plot(range(var$values), c(-0.05, 1), type = "n", axes = F, main = gettext("Population and sample"), xlab = var$xlab, sub = var$sub, cex.axis = inp$cex, cex.lab = inp$cex, cex.main = 1.2 * inp$cex, cex.sub = inp$cex) usr = par("usr") mmstat.axis(1, usr[1:2], cex.axis = inp$cex) drawIqrBoxWithPoints(var$values, var$jitter, ylim = c(0, 0.45), box.param = list(border = mmstat$col[[1]], lwd = 2), points.param = list(col = mmstat$col[[9]], pch = 19, cex = 0.5 * inp$cex)) drawIqrBoxWithPoints(var$values[index], var$jitter[index], ylim = 0.5 + c(0, 0.45 * sqrt(length(index)/var$n)), box.param = list(border = mmstat$col[[2]], lwd = 2), points.param = list(col = mmstat$col[[10]], pch = 19, cex = 0.5 * inp$cex)) box() }) output$logText = renderText({ mmstat.getLog(session) }) }) ############################### SUBROUTINES ################################## ### ui ####################################################################### ui = shinyUI(fluidPage( div(class="navbar navbar-static-top", div(class = "navbar-inner", fluidRow(column(4, div(class = "brand pull-left", gettext("Distribution of sample parameters"))), column(2, checkboxInput("showsample", gettext("Sample parameter"), TRUE)), column(2, checkboxInput("showspeed", gettext("Specify speed"), FALSE)), column(2, checkboxInput("showdata", gettext("Data choice"), FALSE)), column(2, checkboxInput("showoptions", gettext("Options"), FALSE))))), sidebarLayout( sidebarPanel( conditionalPanel( condition = 'input.showsample', uiOutput("paramUI"), br(), uiOutput("sizeUI"), br(), uiOutput("goUI") ), conditionalPanel( condition = 'input.showspeed', br(), uiOutput("speedUI") ), conditionalPanel( condition = 'input.showdata', hr(), uiOutput("datasetUI"), uiOutput("variableUI") ), conditionalPanel( condition = 'input.showoptions', hr(), uiOutput("cexUI") ) ), mainPanel(plotOutput("samplePlot"), plotOutput("outputSamplePlot", height = "200px"))), htmlOutput("logText") )) ############################### SUBROUTINES ################################## ### shinyApp ################################################################# shinyApp(ui = ui, server = server)
#File created -> ind_asean, chn_asean, # ind_sgp, chn_sgp, ind_sgp_table, chn_sgp_table library(readr) ind_asean <- read.csv("D:/CEEW/Data/Data_Summary/IND_ASEAN.csv") chn_asean <- read.csv("D:/CEEW/Data/Data_Summary/CHN_ASEAN.csv") ind_indo <- subset(ind_asean, partner_code == "IDN") chn_indo <- subset(chn_asean, partner_code == "IDN") ind_indo_table <- as.data.frame(table(unlist(ind_indo$hs_product_code))) chn_indo_table <- as.data.frame(table(unlist(chn_indo$hs_product_code))) #Sorting by the frequency ind_indo_table <- ind_indo_table[with(ind_indo_table, order(-Freq)),] chn_indo_table <- chn_indo_table[with(chn_indo_table, order(-Freq)),] names(ind_indo_table)[names(ind_indo_table) == "Var1"] <- "hs_product_code" names(chn_indo_table)[names(chn_indo_table) == "Var1"] <- "hs_product_code" for (i in 1:NROW(ind_indo_table$hs_product_code)){ exp <- subset(ind_indo, hs_product_code == ind_indo_table$hs_product_code[i], select = c("export_value")) imp <- subset(ind_indo, hs_product_code == ind_indo_table$hs_product_code[i], select = c("import_value")) tot_sum_ind <- sum(exp) tot_sum_ind_imp <- sum(imp) ind_indo_table$export_value[i] <- tot_sum_ind ind_indo_table$import_value[i] <- tot_sum_ind_imp print(i) } for (i in 1:NROW(chn_indo_table$hs_product_code)){ exp <- subset(chn_indo, hs_product_code == chn_indo_table$hs_product_code[i], select = c("export_value")) imp <- subset(chn_indo, hs_product_code == chn_indo_table$hs_product_code[i], select = c("import_value")) tot_sum_chn <- sum(exp) tot_sum_chn_imp <- sum(imp) chn_indo_table$export_value[i] <- tot_sum_chn chn_indo_table$import_value[i] <- tot_sum_chn_imp print(i) } write.csv(ind_indo, "D:/CEEW/Data/Data_Summary/INDO/IND_INDO.csv", row.names = FALSE) write.csv(chn_indo, "D:/CEEW/Data/Data_Summary/INDO/CHN_INDO.csv", row.names = FALSE) write.csv(ind_indo_table, "D:/CEEW/Data/Data_Summary/INDO/IND_INDO_table.csv", row.names = FALSE) write.csv(chn_indo_table, "D:/CEEW/Data/Data_Summary/INDO/CHN_INDO_table.csv", row.names = FALSE)
/Tasks/Task_01/INDO/uploading_data.R
permissive
ankurRangi/Data-Analysis-CEEW
R
false
false
2,118
r
#File created -> ind_asean, chn_asean, # ind_sgp, chn_sgp, ind_sgp_table, chn_sgp_table library(readr) ind_asean <- read.csv("D:/CEEW/Data/Data_Summary/IND_ASEAN.csv") chn_asean <- read.csv("D:/CEEW/Data/Data_Summary/CHN_ASEAN.csv") ind_indo <- subset(ind_asean, partner_code == "IDN") chn_indo <- subset(chn_asean, partner_code == "IDN") ind_indo_table <- as.data.frame(table(unlist(ind_indo$hs_product_code))) chn_indo_table <- as.data.frame(table(unlist(chn_indo$hs_product_code))) #Sorting by the frequency ind_indo_table <- ind_indo_table[with(ind_indo_table, order(-Freq)),] chn_indo_table <- chn_indo_table[with(chn_indo_table, order(-Freq)),] names(ind_indo_table)[names(ind_indo_table) == "Var1"] <- "hs_product_code" names(chn_indo_table)[names(chn_indo_table) == "Var1"] <- "hs_product_code" for (i in 1:NROW(ind_indo_table$hs_product_code)){ exp <- subset(ind_indo, hs_product_code == ind_indo_table$hs_product_code[i], select = c("export_value")) imp <- subset(ind_indo, hs_product_code == ind_indo_table$hs_product_code[i], select = c("import_value")) tot_sum_ind <- sum(exp) tot_sum_ind_imp <- sum(imp) ind_indo_table$export_value[i] <- tot_sum_ind ind_indo_table$import_value[i] <- tot_sum_ind_imp print(i) } for (i in 1:NROW(chn_indo_table$hs_product_code)){ exp <- subset(chn_indo, hs_product_code == chn_indo_table$hs_product_code[i], select = c("export_value")) imp <- subset(chn_indo, hs_product_code == chn_indo_table$hs_product_code[i], select = c("import_value")) tot_sum_chn <- sum(exp) tot_sum_chn_imp <- sum(imp) chn_indo_table$export_value[i] <- tot_sum_chn chn_indo_table$import_value[i] <- tot_sum_chn_imp print(i) } write.csv(ind_indo, "D:/CEEW/Data/Data_Summary/INDO/IND_INDO.csv", row.names = FALSE) write.csv(chn_indo, "D:/CEEW/Data/Data_Summary/INDO/CHN_INDO.csv", row.names = FALSE) write.csv(ind_indo_table, "D:/CEEW/Data/Data_Summary/INDO/IND_INDO_table.csv", row.names = FALSE) write.csv(chn_indo_table, "D:/CEEW/Data/Data_Summary/INDO/CHN_INDO_table.csv", row.names = FALSE)
#### Design Transects no clusters #### library( rgdal) library( sp) library( raster) library( MBHdesign) ######################## #read in the inclusion probs inclProbs <- raster( x="inclProbs_design1.tif") inclProbs <- setValues( inclProbs, values( inclProbs) / sum( values( inclProbs), na.rm=TRUE)) rootInclProbs <- inclProbs rootInclProbs <- setValues( rootInclProbs, sqrt( values( rootInclProbs))) zones <- readRDS( "GBZones_forDesign1.RDS") #BRUVS <- readRDS( "referenceBruvs_forDesign2.RDS") #if( class( BRUVS) != "SpatialPointsDataFrame") # BRUVS <- SpatialPointsDataFrame( coords=BRUVS[,c("longitude","latitude")], data=BRUVS, proj4string = CRS( proj4string( zones[[1]]))) straw.nums <- readRDS( "StrawmanNumbers_Zones.RDS") ##aggregate raster to speed up computation. May need to revisit? #rootInclProbs_agg <- aggregate( rootInclProbs, fact=20, fun=sum) #0.5km between centres #rootInclProbs_agg_100m <- aggregate( rootInclProbs, fact=4, fun=sum) #100m between centres ################################ #### choose reference sites #### This is a one-step sample and hence #### uses orignal inclProbs ################################ #flag for whether to revisit site or not #BRUVS@data$revisit <- FALSE #working out density dependent probs of inclusion #tmpBRUVS <- spTransform( BRUVS, CRS="+init=epsg:3577") #tmpDist <- as.matrix( dist( coordinates( tmpBRUVS))) #tmpDist1 <- apply( tmpDist, 1, function(x) sum( x<1000)) #BRUVS@data$sampleProbs <- 1 / sqrt( tmpDist1) #to alter the importance a little bit #BRUVS@data$sampleProbs <- BRUVS@data$sampleProbs / sum( BRUVS@data$sampleProbs, na.rm=TRUE) #BRUVS@data$sampleProbs <- BRUVS@data$sampleProbs * BRUVS@data$inclProbs #TPI inclusion probs are zero in most places #to get similar numbers of each year... tmp <- tapply( BRUVS@data$sampleProbs, BRUVS@data$year, sum) tmp1 <- BRUVS@data$sampleProbs for( yy in unique( BRUVS@data$year)) tmp1[BRUVS@data$year==yy] <- tmp1[BRUVS@data$year==yy] / tmp[yy] tmp1[BRUVS@data$year==2006] <- 5 * tmp1[BRUVS@data$year==2006] #so 2006 sites are 5 times more likely than they should be. tmp1 <- tmp1 / sum( tmp1) BRUVS@data$sampleProbs <- tmp1 numRef <- rep( NA, 4) names( numRef) <- c("NPZ", "HPZ", "SPZ", "MUZ") #### Set the seed for reproducability #set.seed( 727) for( zz in c( "NPZ", "HPZ", "SPZ", "MUZ")){ myZone <- zones[[zz]] if( zz == "MUS") myZone = zones$AMP - zones$IUCN2 tmpDrop <- as.vector( as.matrix( over( BRUVS, myZone))) numRef[zz] <- min( floor( straw.nums[zz]/4), sum( tmpDrop, na.rm=TRUE)) BRUVS@data[!is.na( tmpDrop), "revisit"][sample.int( sum( tmpDrop, na.rm=TRUE), numRef[zz], prob=BRUVS@data[!is.na( tmpDrop),"sampleProbs"], replace=FALSE)] <- TRUE # BRUVS@data[!is.na( tmpDrop), "revisit"][sample.int( sum( tmpDrop, na.rm=TRUE), numRef[zz], replace=FALSE)] <- TRUE } # load legacy sites legacySites <- readOGR(dsn="C:/Users/00093391/Dropbox/UWA/Research Associate/MBHpackage/Ningaloo19_Data/legacySites_2019-12-23.shp") legacySites@data$year table( legacySites@data$year) / table( BRUVS@data$year) plot( rootInclProbs_agg) plot( BRUVS, add=TRUE) points( coordinates( BRUVS)[BRUVS@data$revisit,], col='red') legacySites <- BRUVS@data[BRUVS@data$revisit,] legacySites <- SpatialPointsDataFrame( coords=legacySites[,c("longitude","latitude")], data=legacySites, proj4string=CRS(proj4string(inclProbs))) # 49 legacy sites ############################ #### Spatial sample of new sites #### from altered incl. probs. ############################ ### Here use quasiSamp to get random points #### ## these points will be the center of buffer for transects ### #### Set the seed for reproducability set.seed( 777) #### Here I use transectSamp rather than quasiSamp---- newSitesT <- list(NPZ=NULL,HPZ=NULL,SPZ=NULL,MUZ=NULL) for( zz in c("NPZ", "HPZ", "SPZ", "MUZ")){ print( zz) #the number of samples to take (specified minus the legacy number) numby <- floor( (straw.nums[zz])) #numby <- floor( (straw.nums[zz] - numRef[zz])/4) #fullZones[[ii]] <- rownames( newSites@data)[ii] #set up spatial domain myZone <- zones[[zz]] #if( zz == "AMP"){ # myZone = zones$AMP - zones$IUCN2 #set.seed( 747) #} #tmpIP <- mask( rootInclProbs_agg_100m, myZone) tmpIP <- mask( rootInclProbs, myZone) tmpIP <- crop( tmpIP, myZone) tmpIP2 <- raster::as.matrix(tmpIP) tmpIP2 <- t(tmpIP2) tmpIPdf <- as.data.frame ( cbind (coordinates (tmpIP), as.numeric (tmpIP2))) colnames(tmpIPdf) <- c("x", "y", "incProbs") tmpIPdf <- tmpIPdf[ order(tmpIPdf$y, tmpIPdf$x),] # order ascending first by northing and then by easting potsites <- coordinates( tmpIP) potsites <- as.data.frame(potsites) potsites <- potsites[ order(potsites$y, potsites$x),] # order ascending first by northing and then by easting #take the sample of clusters based on root incl probs #newSites[[zz]] <- quasiSamp( n=numby, potential.sites=coordinates( tmpIP), inclusion.probs=values(tmpIP), nSampsToConsider=5000) newSitesT[[zz]] <- transectSamp( n=numby, potential.sites = potsites[,c("x","y")], #potential.sites= tmpdf[,c("x","y")], #inclusion.probs= incprobdf[,3], inclusion.probs= tmpIPdf[,3], control=gb.control #constrainedSet=gb.constraints.bool ) #plotting (maybe remove at a later date?) #tmpIPFull <- mask( rootInclProbs, myZone) #tmpIPFull <- crop( tmpIPFull, myZone) #plot( tmpIPFull) #plot( legacySites, add=TRUE, pch=1, col='red') #newsitessp <- coordinates(newSitesT[[zz]]$points) <- ~x+y #points(newsitessp, pch=20, col="black", add=T) #points( newSitesT[[zz]][,c("x","y")], pch=20, col='black') } #newSitesTpoints <- do.call( "rbind", newSitesT$NPZ$points, newSitesT$HPZ$points, newSitesT$SPZ$points, newSitesT$MUZ$points) newSitesTpoints <- rbind (newSitesT$NPZ$points, newSitesT$HPZ$points, newSitesT$SPZ$points, newSitesT$MUZ$points) newSitesTsp <- newSitesTpoints coordinates(newSitesTsp) <- ~x+y #some of the spatial balance is not great... Presumably because the balance of the reference sites is also not great... plot(gb) points(newSitesTsp, add=T) fzones <- list("NPZ1", "NPZ2", "NPZ3", "HPZ1", "HPZ2", "HPZ3", "SPZ1", "SPZ2", "SPZ3", "SPZ4", "SPZ5", "SPZ6", "MUZ1", "MUZ2", "MUZ3", "MUZ4", "MUZ5", "MUZ6") #fullSamplep <- do.call( "rbind", fullSample[19:36]) #fullSamplet <- do.call( "rbind", fullSample$transects) #fullSample2 <- bind_rows(fullSample, .id = "column_label") newSitesTpoints$cluster <- rep( do.call( "c", fzones), each=5) # 15 is number of point per transect #tpoints <- fullSample$points[,c("x","y")] write.csv(newSitesTpoints, "~/MBHdesignGB/outputs/newdesign/GB_design2_TransectsNotClus.csv") newSitesTsp <- SpatialPointsDataFrame( coords=newSitesTpoints[,c("x","y")], data=newSitesTpoints, proj4string=CRS(proj4string(inclProbs))) for( zz in c("NPZ", "HPZ", "SPZ", "MUZ")){ plot( zones[[zz]]) plot( inclProbs, add=TRUE) plot( zones[[zz]], add=TRUE) points( newSitesTsp, pch=20, col='red') #plot( legacySites, add=TRUE, pch=4, col='blue') } #fullSample$ID <- paste( fullSample$cluster, rep( paste0( "shot.",1:6), each=nrow( newSites)), sep="_") #fullSamplesp <- SpatialPointsDataFrame( coords=fullSamplep[,c("x","y")], data=fullSamplep, proj4string=CRS(proj4string(inclProbs))) ################################## #### Write the shape files writeOGR( newSitesTsp, dsn="~/MBHdesignGB/outputs/newdesign", layer=paste( "Design2_notClustTransects", Sys.Date(), sep="_"), driver="ESRI Shapefile", overwrite_layer=TRUE) #writeOGR( legacySites, dsn="C:/Users/21933549/Dropbox/UWA/Research Associate/Ningaloo19_Data", layer=paste( "legacySites", Sys.Date(), sep="_"), driver="ESRI Shapefile", overwrite_layer=TRUE) ### Change to lat long b <- raster("~/MBHdesignGB/SpatialData/GB_CMR_bathy.tif") b2 <- proj4string(b) newSitesLatLon <- spTransform(newSitesTsp, b2) newSitesLatLon2 <- as.data.frame(newSitesLatLon) head(newSitesLatLon2) ################################## #### Changing one version of the decimal degrees to degrees, minutes, seconds convert2dms <- function(xx, fractionsOfSeconds=1){ #this is a bit of a hack (with the signage being dropped) neg.pattern <- (xx < 0) xx[neg.pattern] <- -xx[neg.pattern] degrees <- floor( xx) #these are all positive now degree.remainder <- xx %% 1 x <- degree.remainder * 60 minutes <- floor( x) minute.remainder <- x %% 1 x <- minute.remainder * 60 seconds <- round( x, fractionsOfSeconds) * 10^fractionsOfSeconds tmp <- paste( degrees, minutes, seconds, sep=".") # tmp[neg.pattern] <- paste0("-",tmp[neg.pattern]) return( tmp) } newSitesLocations_dms <- apply( coordinates( newSitesLatLon), 2, convert2dms) newSitesLocations_dms2 <- apply( newSitesLatLon2[,c(10,11)], 2, convert2dms) #legacySitesLocations_dms <- apply( coordinates( legacySites), 2, convert2dms) colnames( newSitesLocations_dms) <- c("longitude","latitude") colnames( newSitesLocations_dms2) <- c("longitude","latitude") newSitesLocations_dms[,"longitude"] <- paste0( newSitesLocations_dms[,"longitude"], "E") newSitesLocations_dms[,"latitude"] <- paste0( newSitesLocations_dms[,"latitude"], "S") newSitesLocations_dms2[,"longitude"] <- paste0( newSitesLocations_dms2[,"longitude"], "E") newSitesLocations_dms2[,"latitude"] <- paste0( newSitesLocations_dms2[,"latitude"], "S") newsitesLatLon3 <- cbind(newSitesLatLon2,newSitesLocations_dms2) #legacySitesLocations_dms[,"longitude"] <- paste0( legacySitesLocations_dms[,"longitude"], "E") #legacySitesLocations_dms[,"latitude"] <- paste0( legacySitesLocations_dms[,"latitude"], "S") write.csv(newsitesLatLon3, paste(o2.dir, "Design2_notClusTrans_LatLon.csv", sep='/')) #write the locations write.table( newSitesLocations_dms, file=paste0( paste("./Designs/newSitesLocations_dms", Sys.Date(), sep="_"), ".txt"), sep=" ", row.names=FALSE, quote=FALSE) write.table( legacySitesLocations_dms, file=paste0( paste("./Designs/legacySitesLocations_dms", Sys.Date(), sep="_"), ".txt"), sep=" ", row.names=FALSE, quote=FALSE) rm( list=lso()$OTHERS) rm( getlocal, convert2dms)
/MBdesignGB-n3/outputs/newdesign/3.1.Design_notClus_transects_zone.R
no_license
anitas-giraldo/MBH-GeoBay
R
false
false
10,123
r
#### Design Transects no clusters #### library( rgdal) library( sp) library( raster) library( MBHdesign) ######################## #read in the inclusion probs inclProbs <- raster( x="inclProbs_design1.tif") inclProbs <- setValues( inclProbs, values( inclProbs) / sum( values( inclProbs), na.rm=TRUE)) rootInclProbs <- inclProbs rootInclProbs <- setValues( rootInclProbs, sqrt( values( rootInclProbs))) zones <- readRDS( "GBZones_forDesign1.RDS") #BRUVS <- readRDS( "referenceBruvs_forDesign2.RDS") #if( class( BRUVS) != "SpatialPointsDataFrame") # BRUVS <- SpatialPointsDataFrame( coords=BRUVS[,c("longitude","latitude")], data=BRUVS, proj4string = CRS( proj4string( zones[[1]]))) straw.nums <- readRDS( "StrawmanNumbers_Zones.RDS") ##aggregate raster to speed up computation. May need to revisit? #rootInclProbs_agg <- aggregate( rootInclProbs, fact=20, fun=sum) #0.5km between centres #rootInclProbs_agg_100m <- aggregate( rootInclProbs, fact=4, fun=sum) #100m between centres ################################ #### choose reference sites #### This is a one-step sample and hence #### uses orignal inclProbs ################################ #flag for whether to revisit site or not #BRUVS@data$revisit <- FALSE #working out density dependent probs of inclusion #tmpBRUVS <- spTransform( BRUVS, CRS="+init=epsg:3577") #tmpDist <- as.matrix( dist( coordinates( tmpBRUVS))) #tmpDist1 <- apply( tmpDist, 1, function(x) sum( x<1000)) #BRUVS@data$sampleProbs <- 1 / sqrt( tmpDist1) #to alter the importance a little bit #BRUVS@data$sampleProbs <- BRUVS@data$sampleProbs / sum( BRUVS@data$sampleProbs, na.rm=TRUE) #BRUVS@data$sampleProbs <- BRUVS@data$sampleProbs * BRUVS@data$inclProbs #TPI inclusion probs are zero in most places #to get similar numbers of each year... tmp <- tapply( BRUVS@data$sampleProbs, BRUVS@data$year, sum) tmp1 <- BRUVS@data$sampleProbs for( yy in unique( BRUVS@data$year)) tmp1[BRUVS@data$year==yy] <- tmp1[BRUVS@data$year==yy] / tmp[yy] tmp1[BRUVS@data$year==2006] <- 5 * tmp1[BRUVS@data$year==2006] #so 2006 sites are 5 times more likely than they should be. tmp1 <- tmp1 / sum( tmp1) BRUVS@data$sampleProbs <- tmp1 numRef <- rep( NA, 4) names( numRef) <- c("NPZ", "HPZ", "SPZ", "MUZ") #### Set the seed for reproducability #set.seed( 727) for( zz in c( "NPZ", "HPZ", "SPZ", "MUZ")){ myZone <- zones[[zz]] if( zz == "MUS") myZone = zones$AMP - zones$IUCN2 tmpDrop <- as.vector( as.matrix( over( BRUVS, myZone))) numRef[zz] <- min( floor( straw.nums[zz]/4), sum( tmpDrop, na.rm=TRUE)) BRUVS@data[!is.na( tmpDrop), "revisit"][sample.int( sum( tmpDrop, na.rm=TRUE), numRef[zz], prob=BRUVS@data[!is.na( tmpDrop),"sampleProbs"], replace=FALSE)] <- TRUE # BRUVS@data[!is.na( tmpDrop), "revisit"][sample.int( sum( tmpDrop, na.rm=TRUE), numRef[zz], replace=FALSE)] <- TRUE } # load legacy sites legacySites <- readOGR(dsn="C:/Users/00093391/Dropbox/UWA/Research Associate/MBHpackage/Ningaloo19_Data/legacySites_2019-12-23.shp") legacySites@data$year table( legacySites@data$year) / table( BRUVS@data$year) plot( rootInclProbs_agg) plot( BRUVS, add=TRUE) points( coordinates( BRUVS)[BRUVS@data$revisit,], col='red') legacySites <- BRUVS@data[BRUVS@data$revisit,] legacySites <- SpatialPointsDataFrame( coords=legacySites[,c("longitude","latitude")], data=legacySites, proj4string=CRS(proj4string(inclProbs))) # 49 legacy sites ############################ #### Spatial sample of new sites #### from altered incl. probs. ############################ ### Here use quasiSamp to get random points #### ## these points will be the center of buffer for transects ### #### Set the seed for reproducability set.seed( 777) #### Here I use transectSamp rather than quasiSamp---- newSitesT <- list(NPZ=NULL,HPZ=NULL,SPZ=NULL,MUZ=NULL) for( zz in c("NPZ", "HPZ", "SPZ", "MUZ")){ print( zz) #the number of samples to take (specified minus the legacy number) numby <- floor( (straw.nums[zz])) #numby <- floor( (straw.nums[zz] - numRef[zz])/4) #fullZones[[ii]] <- rownames( newSites@data)[ii] #set up spatial domain myZone <- zones[[zz]] #if( zz == "AMP"){ # myZone = zones$AMP - zones$IUCN2 #set.seed( 747) #} #tmpIP <- mask( rootInclProbs_agg_100m, myZone) tmpIP <- mask( rootInclProbs, myZone) tmpIP <- crop( tmpIP, myZone) tmpIP2 <- raster::as.matrix(tmpIP) tmpIP2 <- t(tmpIP2) tmpIPdf <- as.data.frame ( cbind (coordinates (tmpIP), as.numeric (tmpIP2))) colnames(tmpIPdf) <- c("x", "y", "incProbs") tmpIPdf <- tmpIPdf[ order(tmpIPdf$y, tmpIPdf$x),] # order ascending first by northing and then by easting potsites <- coordinates( tmpIP) potsites <- as.data.frame(potsites) potsites <- potsites[ order(potsites$y, potsites$x),] # order ascending first by northing and then by easting #take the sample of clusters based on root incl probs #newSites[[zz]] <- quasiSamp( n=numby, potential.sites=coordinates( tmpIP), inclusion.probs=values(tmpIP), nSampsToConsider=5000) newSitesT[[zz]] <- transectSamp( n=numby, potential.sites = potsites[,c("x","y")], #potential.sites= tmpdf[,c("x","y")], #inclusion.probs= incprobdf[,3], inclusion.probs= tmpIPdf[,3], control=gb.control #constrainedSet=gb.constraints.bool ) #plotting (maybe remove at a later date?) #tmpIPFull <- mask( rootInclProbs, myZone) #tmpIPFull <- crop( tmpIPFull, myZone) #plot( tmpIPFull) #plot( legacySites, add=TRUE, pch=1, col='red') #newsitessp <- coordinates(newSitesT[[zz]]$points) <- ~x+y #points(newsitessp, pch=20, col="black", add=T) #points( newSitesT[[zz]][,c("x","y")], pch=20, col='black') } #newSitesTpoints <- do.call( "rbind", newSitesT$NPZ$points, newSitesT$HPZ$points, newSitesT$SPZ$points, newSitesT$MUZ$points) newSitesTpoints <- rbind (newSitesT$NPZ$points, newSitesT$HPZ$points, newSitesT$SPZ$points, newSitesT$MUZ$points) newSitesTsp <- newSitesTpoints coordinates(newSitesTsp) <- ~x+y #some of the spatial balance is not great... Presumably because the balance of the reference sites is also not great... plot(gb) points(newSitesTsp, add=T) fzones <- list("NPZ1", "NPZ2", "NPZ3", "HPZ1", "HPZ2", "HPZ3", "SPZ1", "SPZ2", "SPZ3", "SPZ4", "SPZ5", "SPZ6", "MUZ1", "MUZ2", "MUZ3", "MUZ4", "MUZ5", "MUZ6") #fullSamplep <- do.call( "rbind", fullSample[19:36]) #fullSamplet <- do.call( "rbind", fullSample$transects) #fullSample2 <- bind_rows(fullSample, .id = "column_label") newSitesTpoints$cluster <- rep( do.call( "c", fzones), each=5) # 15 is number of point per transect #tpoints <- fullSample$points[,c("x","y")] write.csv(newSitesTpoints, "~/MBHdesignGB/outputs/newdesign/GB_design2_TransectsNotClus.csv") newSitesTsp <- SpatialPointsDataFrame( coords=newSitesTpoints[,c("x","y")], data=newSitesTpoints, proj4string=CRS(proj4string(inclProbs))) for( zz in c("NPZ", "HPZ", "SPZ", "MUZ")){ plot( zones[[zz]]) plot( inclProbs, add=TRUE) plot( zones[[zz]], add=TRUE) points( newSitesTsp, pch=20, col='red') #plot( legacySites, add=TRUE, pch=4, col='blue') } #fullSample$ID <- paste( fullSample$cluster, rep( paste0( "shot.",1:6), each=nrow( newSites)), sep="_") #fullSamplesp <- SpatialPointsDataFrame( coords=fullSamplep[,c("x","y")], data=fullSamplep, proj4string=CRS(proj4string(inclProbs))) ################################## #### Write the shape files writeOGR( newSitesTsp, dsn="~/MBHdesignGB/outputs/newdesign", layer=paste( "Design2_notClustTransects", Sys.Date(), sep="_"), driver="ESRI Shapefile", overwrite_layer=TRUE) #writeOGR( legacySites, dsn="C:/Users/21933549/Dropbox/UWA/Research Associate/Ningaloo19_Data", layer=paste( "legacySites", Sys.Date(), sep="_"), driver="ESRI Shapefile", overwrite_layer=TRUE) ### Change to lat long b <- raster("~/MBHdesignGB/SpatialData/GB_CMR_bathy.tif") b2 <- proj4string(b) newSitesLatLon <- spTransform(newSitesTsp, b2) newSitesLatLon2 <- as.data.frame(newSitesLatLon) head(newSitesLatLon2) ################################## #### Changing one version of the decimal degrees to degrees, minutes, seconds convert2dms <- function(xx, fractionsOfSeconds=1){ #this is a bit of a hack (with the signage being dropped) neg.pattern <- (xx < 0) xx[neg.pattern] <- -xx[neg.pattern] degrees <- floor( xx) #these are all positive now degree.remainder <- xx %% 1 x <- degree.remainder * 60 minutes <- floor( x) minute.remainder <- x %% 1 x <- minute.remainder * 60 seconds <- round( x, fractionsOfSeconds) * 10^fractionsOfSeconds tmp <- paste( degrees, minutes, seconds, sep=".") # tmp[neg.pattern] <- paste0("-",tmp[neg.pattern]) return( tmp) } newSitesLocations_dms <- apply( coordinates( newSitesLatLon), 2, convert2dms) newSitesLocations_dms2 <- apply( newSitesLatLon2[,c(10,11)], 2, convert2dms) #legacySitesLocations_dms <- apply( coordinates( legacySites), 2, convert2dms) colnames( newSitesLocations_dms) <- c("longitude","latitude") colnames( newSitesLocations_dms2) <- c("longitude","latitude") newSitesLocations_dms[,"longitude"] <- paste0( newSitesLocations_dms[,"longitude"], "E") newSitesLocations_dms[,"latitude"] <- paste0( newSitesLocations_dms[,"latitude"], "S") newSitesLocations_dms2[,"longitude"] <- paste0( newSitesLocations_dms2[,"longitude"], "E") newSitesLocations_dms2[,"latitude"] <- paste0( newSitesLocations_dms2[,"latitude"], "S") newsitesLatLon3 <- cbind(newSitesLatLon2,newSitesLocations_dms2) #legacySitesLocations_dms[,"longitude"] <- paste0( legacySitesLocations_dms[,"longitude"], "E") #legacySitesLocations_dms[,"latitude"] <- paste0( legacySitesLocations_dms[,"latitude"], "S") write.csv(newsitesLatLon3, paste(o2.dir, "Design2_notClusTrans_LatLon.csv", sep='/')) #write the locations write.table( newSitesLocations_dms, file=paste0( paste("./Designs/newSitesLocations_dms", Sys.Date(), sep="_"), ".txt"), sep=" ", row.names=FALSE, quote=FALSE) write.table( legacySitesLocations_dms, file=paste0( paste("./Designs/legacySitesLocations_dms", Sys.Date(), sep="_"), ".txt"), sep=" ", row.names=FALSE, quote=FALSE) rm( list=lso()$OTHERS) rm( getlocal, convert2dms)
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----simulatedata------------------------------------------------------------- # Number of observations n <- 1000 # Coefficient for a path (x -> m) a <- .75 # Coefficient for b path (m -> y) b <- .80 # Coefficient for total effect (x -> y) c <- .60 # Coefficient for indirect effect (x -> m -> y) ab <- a * b # Coefficient for direct effect (x -> y) cd <- c - ab # Compute x, m, y values set.seed(100) x <- rnorm(n) m <- a * x + sqrt(1 - a^2) * rnorm(n) eps <- 1 - (cd^2 + b^2 + 2*a * cd * b) y <- cd * x + b * m + eps * rnorm(n) data <- data.frame(y = y, x = x, m = m) ## ----lavaan1------------------------------------------------------------------ model <- " # direct effect y ~ c*x # mediator m ~ a*x y ~ b*m # indirect effect (a*b) ab := a*b # total effect cd := c + (a*b)" fit <- lavaan::sem(model, data = data) lavaan::summary(fit) ## ----fitdata1, eval = FALSE--------------------------------------------------- # bayesian.fit <- bfw::bfw(project.data = data, # latent = "x,m,y", # saved.steps = 50000, # latent.names = "Independent,Mediator,Dependent", # additional = "indirect <- xm * my , total <- xy + (xm * my)", # additional.names = "AB,C", # jags.model = "fit", # silent = TRUE) # # round(bayesian.fit$summary.MCMC[,3:7],3) # #> Mode ESS HDIlo HDIhi n # #> beta[2,1]: Mediator vs. Independent 0.760 48988 0.720 0.799 1000 # #> beta[3,1]: Dependent vs. Independent 0.024 13042 -0.012 0.058 1000 # #> beta[3,2]: Dependent vs. Mediator 0.751 13230 0.715 0.786 1000 # #> indirect[1]: AB 0.571 21431 0.531 0.611 1000 # #> total[1]: C 0.591 49074 0.555 0.630 1000 ## ----noise-------------------------------------------------------------------- biased.sigma <-matrix(c(1,1,0,1,1,0,0,0,1),3,3) set.seed(101) noise <- MASS::mvrnorm(n=2, mu=c(200, 300, 0), Sigma=biased.sigma, empirical=FALSE) colnames(noise) <- c("y","x","m") biased.data <- rbind(data,noise) ## ----lavaan2------------------------------------------------------------------ biased.fit <- lavaan::sem(model, data = biased.data) lavaan::summary(biased.fit) ## ----fitdata2, eval = FALSE--------------------------------------------------- # biased.bfit <- bfw::bfw(project.data = data, # latent = "x,m,y", # saved.steps = 50000, # latent.names = "Independent,Mediator,Dependent", # additional = "indirect <- xm * my , total <- xy + (xm * my)", # additional.names = "AB,C", # jags.model = "fit", # run.robust = TRUE, # jags.seed = 101, # silent = TRUE) # # round(biased.bfit$summary.MCMC[,3:7],3) # #> Mode ESS HDIlo HDIhi n # #> beta[2,1]: Mediator vs. Independent 0.763 31178 0.721 0.799 1000 # #> beta[3,1]: Dependent vs. Independent 0.022 7724 -0.014 0.057 1000 # #> beta[3,2]: Dependent vs. Mediator 0.751 7772 0.714 0.786 1000 # #> indirect[1]: AB 0.572 12913 0.531 0.610 1000 # #> total[1]: C 0.590 31362 0.557 0.631 1000
/inst/doc/fit_latent_data.R
permissive
cran/bfw
R
false
false
3,710
r
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----simulatedata------------------------------------------------------------- # Number of observations n <- 1000 # Coefficient for a path (x -> m) a <- .75 # Coefficient for b path (m -> y) b <- .80 # Coefficient for total effect (x -> y) c <- .60 # Coefficient for indirect effect (x -> m -> y) ab <- a * b # Coefficient for direct effect (x -> y) cd <- c - ab # Compute x, m, y values set.seed(100) x <- rnorm(n) m <- a * x + sqrt(1 - a^2) * rnorm(n) eps <- 1 - (cd^2 + b^2 + 2*a * cd * b) y <- cd * x + b * m + eps * rnorm(n) data <- data.frame(y = y, x = x, m = m) ## ----lavaan1------------------------------------------------------------------ model <- " # direct effect y ~ c*x # mediator m ~ a*x y ~ b*m # indirect effect (a*b) ab := a*b # total effect cd := c + (a*b)" fit <- lavaan::sem(model, data = data) lavaan::summary(fit) ## ----fitdata1, eval = FALSE--------------------------------------------------- # bayesian.fit <- bfw::bfw(project.data = data, # latent = "x,m,y", # saved.steps = 50000, # latent.names = "Independent,Mediator,Dependent", # additional = "indirect <- xm * my , total <- xy + (xm * my)", # additional.names = "AB,C", # jags.model = "fit", # silent = TRUE) # # round(bayesian.fit$summary.MCMC[,3:7],3) # #> Mode ESS HDIlo HDIhi n # #> beta[2,1]: Mediator vs. Independent 0.760 48988 0.720 0.799 1000 # #> beta[3,1]: Dependent vs. Independent 0.024 13042 -0.012 0.058 1000 # #> beta[3,2]: Dependent vs. Mediator 0.751 13230 0.715 0.786 1000 # #> indirect[1]: AB 0.571 21431 0.531 0.611 1000 # #> total[1]: C 0.591 49074 0.555 0.630 1000 ## ----noise-------------------------------------------------------------------- biased.sigma <-matrix(c(1,1,0,1,1,0,0,0,1),3,3) set.seed(101) noise <- MASS::mvrnorm(n=2, mu=c(200, 300, 0), Sigma=biased.sigma, empirical=FALSE) colnames(noise) <- c("y","x","m") biased.data <- rbind(data,noise) ## ----lavaan2------------------------------------------------------------------ biased.fit <- lavaan::sem(model, data = biased.data) lavaan::summary(biased.fit) ## ----fitdata2, eval = FALSE--------------------------------------------------- # biased.bfit <- bfw::bfw(project.data = data, # latent = "x,m,y", # saved.steps = 50000, # latent.names = "Independent,Mediator,Dependent", # additional = "indirect <- xm * my , total <- xy + (xm * my)", # additional.names = "AB,C", # jags.model = "fit", # run.robust = TRUE, # jags.seed = 101, # silent = TRUE) # # round(biased.bfit$summary.MCMC[,3:7],3) # #> Mode ESS HDIlo HDIhi n # #> beta[2,1]: Mediator vs. Independent 0.763 31178 0.721 0.799 1000 # #> beta[3,1]: Dependent vs. Independent 0.022 7724 -0.014 0.057 1000 # #> beta[3,2]: Dependent vs. Mediator 0.751 7772 0.714 0.786 1000 # #> indirect[1]: AB 0.572 12913 0.531 0.610 1000 # #> total[1]: C 0.590 31362 0.557 0.631 1000
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53830784035076e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L))) result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist) str(result)
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615784285-test.R
no_license
akhikolla/updatedatatype-list2
R
false
false
329
r
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53830784035076e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L))) result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist) str(result)
# Read the data data <- read.table("household_power_consumption.txt", sep =";", header = T, stringsAsFactors = F, na.strings = "?") # Subset the data and change date format subset <- data[data$Date %in% c("1/2/2007", "2/2/2007"),] subset$Date <- as.Date(subset$Date, format = "%d/%m/%Y") # Add variable with day and time subset$Daytime <- as.POSIXct(paste(subset$Date, subset$Time, sep=" ")) # Make and save the plot png(filename="plot2.png", height = 480, width = 480) with(subset, { plot(Daytime, Global_active_power, type="l", xlab = "", ylab = "Global Active Power (kilowatts)") }) dev.off()
/plot2.r
no_license
Lam600/ExData_Plotting1
R
false
false
631
r
# Read the data data <- read.table("household_power_consumption.txt", sep =";", header = T, stringsAsFactors = F, na.strings = "?") # Subset the data and change date format subset <- data[data$Date %in% c("1/2/2007", "2/2/2007"),] subset$Date <- as.Date(subset$Date, format = "%d/%m/%Y") # Add variable with day and time subset$Daytime <- as.POSIXct(paste(subset$Date, subset$Time, sep=" ")) # Make and save the plot png(filename="plot2.png", height = 480, width = 480) with(subset, { plot(Daytime, Global_active_power, type="l", xlab = "", ylab = "Global Active Power (kilowatts)") }) dev.off()
#' @rdname HTSet-Extract #' @aliases $ #' @title Extract or replace parts of an HTSet object #' @description Operators "$" and [[ extract or replace a slot, while [ extracts #' a subset of the \code{\link{HTSet-class}} object in a similar manner of #' \code{\link{subset_features}} and \code{\link{subset_samples}}. #' @param x \code{\link{HTSet-class}}. #' @param name The name of the slot. #' @export #' @seealso \code{\link{HTSet-class}} setMethod("$", "HTSet", function(x, name){ slot(x, name) }) #' @rdname HTSet-Extract #' @param value A replacement value for this slot, which must match the slot #' definition. #' @aliases $<- #' @export setMethod( "$", signature = "HTSet", definition = function(x, name){ slot(x, name) } ) .DollarNames.HTSet = function(x, pattern) { grep(pattern, slotNames(x), value = TRUE) } #' @rdname HTSet-Extract #' @aliases [[<- #' @export setReplaceMethod( "$", "HTSet", function(x, name, value){ slot(x, name) = value validObject(x) return(x) } ) #' @rdname HTSet-Extract #' @aliases [[ #' @aliases [[,HTSet,character-method #' @export setMethod( "[[", signature = signature(x = "HTSet", i = "character", j = "ANY"), function(x, i, j, ...){ slot(x, i) } ) #' @rdname HTSet-Extract #' @aliases [[<- #' @aliases [[<-,HTSet,character-method #' @export setReplaceMethod( "[[", signature = signature(x = "HTSet", i = "character", j = "ANY"), function(x, i, j, ..., value){ stopifnot(is.character(i)) slot(x, i) = value validObject(x) return(x) } ) #' @rdname HTSet-Extract #' @aliases [,HTSet-method #' @aliases [ #' @param i,j indices for features (i) and samples (j) to extract. #' @param k column index for fdata #' @param l column index for pdata #' @param drop If TRUE the result is coerced to the lowest possible dimension #' @param ... other arguments #' @export setMethod( "[", signature = signature(x = "HTSet", i = "ANY", j = "ANY"), function(x, i, j, k, l, ..., drop = FALSE) { x@edata = x@edata[i,j,drop = drop] x@pdata = x@pdata[j,l,drop = drop] x@fdata = x@fdata[i,k,drop = drop] validObject(x) return(x) } )
/R/HTSet-accessors.R
permissive
zhuchcn/HTSet
R
false
false
2,244
r
#' @rdname HTSet-Extract #' @aliases $ #' @title Extract or replace parts of an HTSet object #' @description Operators "$" and [[ extract or replace a slot, while [ extracts #' a subset of the \code{\link{HTSet-class}} object in a similar manner of #' \code{\link{subset_features}} and \code{\link{subset_samples}}. #' @param x \code{\link{HTSet-class}}. #' @param name The name of the slot. #' @export #' @seealso \code{\link{HTSet-class}} setMethod("$", "HTSet", function(x, name){ slot(x, name) }) #' @rdname HTSet-Extract #' @param value A replacement value for this slot, which must match the slot #' definition. #' @aliases $<- #' @export setMethod( "$", signature = "HTSet", definition = function(x, name){ slot(x, name) } ) .DollarNames.HTSet = function(x, pattern) { grep(pattern, slotNames(x), value = TRUE) } #' @rdname HTSet-Extract #' @aliases [[<- #' @export setReplaceMethod( "$", "HTSet", function(x, name, value){ slot(x, name) = value validObject(x) return(x) } ) #' @rdname HTSet-Extract #' @aliases [[ #' @aliases [[,HTSet,character-method #' @export setMethod( "[[", signature = signature(x = "HTSet", i = "character", j = "ANY"), function(x, i, j, ...){ slot(x, i) } ) #' @rdname HTSet-Extract #' @aliases [[<- #' @aliases [[<-,HTSet,character-method #' @export setReplaceMethod( "[[", signature = signature(x = "HTSet", i = "character", j = "ANY"), function(x, i, j, ..., value){ stopifnot(is.character(i)) slot(x, i) = value validObject(x) return(x) } ) #' @rdname HTSet-Extract #' @aliases [,HTSet-method #' @aliases [ #' @param i,j indices for features (i) and samples (j) to extract. #' @param k column index for fdata #' @param l column index for pdata #' @param drop If TRUE the result is coerced to the lowest possible dimension #' @param ... other arguments #' @export setMethod( "[", signature = signature(x = "HTSet", i = "ANY", j = "ANY"), function(x, i, j, k, l, ..., drop = FALSE) { x@edata = x@edata[i,j,drop = drop] x@pdata = x@pdata[j,l,drop = drop] x@fdata = x@fdata[i,k,drop = drop] validObject(x) return(x) } )
#Reading all files testL <- read.table("./test/y_test.txt", col.names="act") testS<- read.table("./test/subject_test.txt", col.names="subject") testD <- read.table("./test/X_test.txt") trainL <- read.table("./train/y_train.txt", col.names="act") trainS <- read.table("./train/subject_train.txt", col.names="subject") trainD <- read.table("./train/X_train.txt") features <- read.table("features.txt", stringsAsFactors=FALSE) activ <- read.table("activity_labels.txt", stringsAsFactors=FALSE) #STEP1 ## Merges the training and the test sets to create one data set. testRes<-cbind(testL,testS,testD) trainRes<-cbind(trainL,trainS,trainD) result<-rbind(testRes,trainRes) # STEP2 ##Extracts only the measurements on the mean and standard deviation for ##each measurement. FNS<-features[grep("mean\\(\\)|std\\(\\)", features$V2),] FNS <- result[, c(1, 2, FNS$V1+2)] #STEP3 ##Uses descriptive activity names to name the activities in the data set FNS$act <- activ[FNS$act, 2] #STEP4 ##Appropriately labels the data set with descriptive variable names. col.names<- features[grep("mean\\(\\)|std\\(\\)", features$V2),2] col.names <- c("activity","subject",col.names) names(FNS) <- col.names #STEP5 ##From the data set in step 4, creates a second, independent tidy data set ##with the average of each variable for each activity and each subject. ind_tidy <- melt(FNS, id=c("subject","activity")) ind_tidy <- dcast(ind_tidy, subject+activity ~ variable, mean) ##Write second independent data into file write.table(format(ind_tidy, scientific=T), "ind_tidy.txt", row.names=FALSE, col.names=FALSE, quote=2)
/run_analysis.R
no_license
Samariya57/Getting-and-Cleaning-Data-Course-Project
R
false
false
1,671
r
#Reading all files testL <- read.table("./test/y_test.txt", col.names="act") testS<- read.table("./test/subject_test.txt", col.names="subject") testD <- read.table("./test/X_test.txt") trainL <- read.table("./train/y_train.txt", col.names="act") trainS <- read.table("./train/subject_train.txt", col.names="subject") trainD <- read.table("./train/X_train.txt") features <- read.table("features.txt", stringsAsFactors=FALSE) activ <- read.table("activity_labels.txt", stringsAsFactors=FALSE) #STEP1 ## Merges the training and the test sets to create one data set. testRes<-cbind(testL,testS,testD) trainRes<-cbind(trainL,trainS,trainD) result<-rbind(testRes,trainRes) # STEP2 ##Extracts only the measurements on the mean and standard deviation for ##each measurement. FNS<-features[grep("mean\\(\\)|std\\(\\)", features$V2),] FNS <- result[, c(1, 2, FNS$V1+2)] #STEP3 ##Uses descriptive activity names to name the activities in the data set FNS$act <- activ[FNS$act, 2] #STEP4 ##Appropriately labels the data set with descriptive variable names. col.names<- features[grep("mean\\(\\)|std\\(\\)", features$V2),2] col.names <- c("activity","subject",col.names) names(FNS) <- col.names #STEP5 ##From the data set in step 4, creates a second, independent tidy data set ##with the average of each variable for each activity and each subject. ind_tidy <- melt(FNS, id=c("subject","activity")) ind_tidy <- dcast(ind_tidy, subject+activity ~ variable, mean) ##Write second independent data into file write.table(format(ind_tidy, scientific=T), "ind_tidy.txt", row.names=FALSE, col.names=FALSE, quote=2)
/P4/P4DavidPerez.R
no_license
davidperez98/DOSBOX
R
false
false
2,534
r
#!/usr/bin/env Rscript set.seed(1701) main <- function(input, output, params, log) { # Log function out <- file(log$out, open = "wt") err <- file(log$err, open = "wt") sink(out, type = "output") sink(err, type = "message") # Script function library(ggplot2) library(ggrepel) library(scales) library(scran) dec <- readRDS(input$rds[1]) hvg <- readRDS(input$rds[2]) dec$variable <- rownames(dec) %in% hvg lab <- list( "TRUE" = sprintf("Variable (%s)", comma(sum(dec$variable))), "FALSE" = sprintf("Non-variable (%s)", comma(sum(!dec$variable))) ) col <- list( "TRUE" = "#E15759", "FALSE" = "#BAB0AC" ) dec$name <- "" ind <- which(dec$bio >= sort(dec$bio, decreasing = TRUE)[params$n], arr.ind = TRUE) dec$name[ind] <- dec$gene.name[ind] plt <- ggplot(as.data.frame(dec)) + geom_point(aes(x = mean, y = total, colour = variable)) + geom_line(aes(x = mean, y = tech), colour = "#E15759") + scale_colour_manual(values = col, labels = lab) + geom_text_repel(aes(x = mean, y = total, label = name), colour = "#000000", size = 2, segment.size = 0.2) + labs(x = "Mean", y = "Total") + theme_bw() + theme(legend.title = element_blank(), legend.position = "top") ggsave(filename = output$pdf, plot = plt, width = 8, height = 6, scale = 0.8) # Image function library(magick) pdf <- image_read_pdf(output$pdf) pdf <- image_trim(pdf) pdf <- image_border(pdf, color = "#FFFFFF", geometry = "50x50") pdf <- image_write(pdf, path = output$pdf, format = "pdf") } main(snakemake@input, snakemake@output, snakemake@params, snakemake@log)
/workflow/scripts/feature-selection/plotGeneVarByPoisson.R
permissive
jma1991/DiasTailbudData
R
false
false
1,764
r
#!/usr/bin/env Rscript set.seed(1701) main <- function(input, output, params, log) { # Log function out <- file(log$out, open = "wt") err <- file(log$err, open = "wt") sink(out, type = "output") sink(err, type = "message") # Script function library(ggplot2) library(ggrepel) library(scales) library(scran) dec <- readRDS(input$rds[1]) hvg <- readRDS(input$rds[2]) dec$variable <- rownames(dec) %in% hvg lab <- list( "TRUE" = sprintf("Variable (%s)", comma(sum(dec$variable))), "FALSE" = sprintf("Non-variable (%s)", comma(sum(!dec$variable))) ) col <- list( "TRUE" = "#E15759", "FALSE" = "#BAB0AC" ) dec$name <- "" ind <- which(dec$bio >= sort(dec$bio, decreasing = TRUE)[params$n], arr.ind = TRUE) dec$name[ind] <- dec$gene.name[ind] plt <- ggplot(as.data.frame(dec)) + geom_point(aes(x = mean, y = total, colour = variable)) + geom_line(aes(x = mean, y = tech), colour = "#E15759") + scale_colour_manual(values = col, labels = lab) + geom_text_repel(aes(x = mean, y = total, label = name), colour = "#000000", size = 2, segment.size = 0.2) + labs(x = "Mean", y = "Total") + theme_bw() + theme(legend.title = element_blank(), legend.position = "top") ggsave(filename = output$pdf, plot = plt, width = 8, height = 6, scale = 0.8) # Image function library(magick) pdf <- image_read_pdf(output$pdf) pdf <- image_trim(pdf) pdf <- image_border(pdf, color = "#FFFFFF", geometry = "50x50") pdf <- image_write(pdf, path = output$pdf, format = "pdf") } main(snakemake@input, snakemake@output, snakemake@params, snakemake@log)
#For a given monkey, weekval (i.e., 1 = weeks 12/13, 2 = weeks 15/16, etc), mut identity and position, determine the percentage of the vRNA (including plasma) that is drug resistant #This was to calculate the week at which most of the RNA was resistant, but I ended up hacking it a bit (see maxInds) maxDRs <- list() for(monk in monknames){ toAdd <- c() for(i in sort(unique(weekind[monkid == monk]))){ if(monk == "T98133" | monk == "A99039"){ toAdd[i] <- percentageDR.wkind(monk, i, "V|I", c(184)) }else if(monk == "A99165" | monk == "A01198"){ toAdd[i] <- percentageDR.wkind(monk, i, "N", c(103)) } } maxDRs[[monk]] <- toAdd } #maxInds returns the first point at which the RNA is more than 90% DR OR the maximally DR if the RNA is never 90+% DR. maxInds <- unlist(lapply(maxDRs, function(x){ #if the vRNA is never observed to be at frequency over 90%, if(sum(x > .9) == 0){ # just return the ind at which it's maximized return(which.max(x)) }else{ #otherwise, return the point at which it's first greater than .9 return(min(which(x > .9))) }} )) #For the ordered monkeys (T98, 165, 039, 198), what are the relevant DRMs mutPos <- c(184, 103, 184, 103) mutIdent <- c("I|V", "N", "I|V", "N") #thetas will store the values of theta that will eventually be returned thetas <- matrix(data = NA, nrow = 4, ncol = 4) colnames(thetas) <- c("Theta_nuc (DR vRNA)", "Theta_hap (DR vRNA)", "Theta_nuc (DR all)", "Theta_hap (DR all)") #for the vRNA from each macaque for(monk in monknames){ #get all the indices for that monk monkInd <- which(monk == monknames) Nes <- c() #For both RNA and RNA+DNA for(rnaOrAll in c("RNA|PLASMA", "RNA|DNA|PLASMA")){ monkRNAinds <- intersect(which(monkid == monk), grep(rnaOrAll, samp.loc)) relTime <- maxInds[monkInd] #Determine ALL the relevant indices for the timepoint we found above relIndsAll <- intersect(monkRNAinds, which(weekind == relTime)) #But now, we only want the drug resistant variants. #relIndsDR are our drug resistant, rna or rna/dna inds from a monkey at the timepoint relIndsDR <- relIndsAll[grep(mutIdent[monkInd], aa[relIndsAll,mutPos[monkInd]])] #Now, onto computing H #Method 1: different identities (i.e., I versus V) count as different variants #store the relevant position (i.e., 184 or 103) in pos pos <- mutPos[which(monk == monknames)] #Look at all the way the nucleotides encode that position AAencodes <- apply(nuc[relIndsDR, (3*(pos+1) -2):(3*(pos+1))], 1, paste, collapse = "") #Compute heterozygosity by determining the count of each variant (table) divided by the length to get frequencies, which are then squared and summed. We need to subtract from 1 to go from homozygosity to heterozygosity H <- 1-sum((table(AAencodes)/length(relIndsDR))^2) theta.est.vars <- H/(1-H) #Method 2: different linked variants #This is pretty ugly, but I'm determining which positions (135-875, because the others are sort of messy) have a minor allele at frequency greater than 10% of the sample size polymorphicInds <- (135:875)[which(unlist(lapply(apply(nuc[relIndsDR, 135:875], 2, table), function(x){ #sort all the different alleles at a position among the relevant inds and store the second most frequent one tmp <- sort(x, decreasing = TRUE)[2] #If that second most frequent allele is at frequency higher than 10% (and also exists), keep track of that index (!is.na(tmp) & tmp > length(relIndsDR)/10) # (!is.na(tmp) & tmp >= 2) } )) == TRUE)] #Compute H among THESE different frequencies, as before H <- 1-sum((table(apply(nuc[relIndsDR,polymorphicInds], 1, paste, collapse = ""))/length(relIndsDR))^2) theta.est.haps <- H/(1-H) #Store Nes <- c(Nes, c(theta.est.vars, theta.est.haps)) } thetas[which(monk == monknames),] <- Nes } #Andy Leigh Brown approach ALB.ests <- c() for(monk in monknames){ monkRNAinds <- intersect(which(monkid == monk), grep("PLASMA", samp.loc)) relTime <- maxInds[which(monk == monknames)] relIndsAll <- intersect(monkRNAinds, which(weekind == relTime)) ALB.ests[which(monk == monknames)] <- mean(distMat[relIndsAll,relIndsAll][lower.tri(distMat[relIndsAll,relIndsAll])])/length(135:900) } fulltab <- cbind(ALB.ests, thetas) rownames(fulltab) <- monknames colnames(fulltab)[1] <- "Theta_d (Plasma)" #Convert from thetas to Nes mu.transition <- 1.4*10^(-5) mu.transversion <- 2*10^(-6) fulltab[1,] <- fulltab[1,]/(2*mu.transition) fulltab[2,] <- fulltab[2,]/(2*mu.transversion) fulltab[3,] <- fulltab[3,]/(2*mu.transversion) fulltab[4,] <- fulltab[4,]/(2*mu.transition) #print the results print.xtable(xtable(fulltab, digits = -2), type = "html", paste("../out/tables/theta.html", sep = ""))
/code/compute-theta.r
no_license
affeder/SHIV-structure
R
false
false
4,999
r
#For a given monkey, weekval (i.e., 1 = weeks 12/13, 2 = weeks 15/16, etc), mut identity and position, determine the percentage of the vRNA (including plasma) that is drug resistant #This was to calculate the week at which most of the RNA was resistant, but I ended up hacking it a bit (see maxInds) maxDRs <- list() for(monk in monknames){ toAdd <- c() for(i in sort(unique(weekind[monkid == monk]))){ if(monk == "T98133" | monk == "A99039"){ toAdd[i] <- percentageDR.wkind(monk, i, "V|I", c(184)) }else if(monk == "A99165" | monk == "A01198"){ toAdd[i] <- percentageDR.wkind(monk, i, "N", c(103)) } } maxDRs[[monk]] <- toAdd } #maxInds returns the first point at which the RNA is more than 90% DR OR the maximally DR if the RNA is never 90+% DR. maxInds <- unlist(lapply(maxDRs, function(x){ #if the vRNA is never observed to be at frequency over 90%, if(sum(x > .9) == 0){ # just return the ind at which it's maximized return(which.max(x)) }else{ #otherwise, return the point at which it's first greater than .9 return(min(which(x > .9))) }} )) #For the ordered monkeys (T98, 165, 039, 198), what are the relevant DRMs mutPos <- c(184, 103, 184, 103) mutIdent <- c("I|V", "N", "I|V", "N") #thetas will store the values of theta that will eventually be returned thetas <- matrix(data = NA, nrow = 4, ncol = 4) colnames(thetas) <- c("Theta_nuc (DR vRNA)", "Theta_hap (DR vRNA)", "Theta_nuc (DR all)", "Theta_hap (DR all)") #for the vRNA from each macaque for(monk in monknames){ #get all the indices for that monk monkInd <- which(monk == monknames) Nes <- c() #For both RNA and RNA+DNA for(rnaOrAll in c("RNA|PLASMA", "RNA|DNA|PLASMA")){ monkRNAinds <- intersect(which(monkid == monk), grep(rnaOrAll, samp.loc)) relTime <- maxInds[monkInd] #Determine ALL the relevant indices for the timepoint we found above relIndsAll <- intersect(monkRNAinds, which(weekind == relTime)) #But now, we only want the drug resistant variants. #relIndsDR are our drug resistant, rna or rna/dna inds from a monkey at the timepoint relIndsDR <- relIndsAll[grep(mutIdent[monkInd], aa[relIndsAll,mutPos[monkInd]])] #Now, onto computing H #Method 1: different identities (i.e., I versus V) count as different variants #store the relevant position (i.e., 184 or 103) in pos pos <- mutPos[which(monk == monknames)] #Look at all the way the nucleotides encode that position AAencodes <- apply(nuc[relIndsDR, (3*(pos+1) -2):(3*(pos+1))], 1, paste, collapse = "") #Compute heterozygosity by determining the count of each variant (table) divided by the length to get frequencies, which are then squared and summed. We need to subtract from 1 to go from homozygosity to heterozygosity H <- 1-sum((table(AAencodes)/length(relIndsDR))^2) theta.est.vars <- H/(1-H) #Method 2: different linked variants #This is pretty ugly, but I'm determining which positions (135-875, because the others are sort of messy) have a minor allele at frequency greater than 10% of the sample size polymorphicInds <- (135:875)[which(unlist(lapply(apply(nuc[relIndsDR, 135:875], 2, table), function(x){ #sort all the different alleles at a position among the relevant inds and store the second most frequent one tmp <- sort(x, decreasing = TRUE)[2] #If that second most frequent allele is at frequency higher than 10% (and also exists), keep track of that index (!is.na(tmp) & tmp > length(relIndsDR)/10) # (!is.na(tmp) & tmp >= 2) } )) == TRUE)] #Compute H among THESE different frequencies, as before H <- 1-sum((table(apply(nuc[relIndsDR,polymorphicInds], 1, paste, collapse = ""))/length(relIndsDR))^2) theta.est.haps <- H/(1-H) #Store Nes <- c(Nes, c(theta.est.vars, theta.est.haps)) } thetas[which(monk == monknames),] <- Nes } #Andy Leigh Brown approach ALB.ests <- c() for(monk in monknames){ monkRNAinds <- intersect(which(monkid == monk), grep("PLASMA", samp.loc)) relTime <- maxInds[which(monk == monknames)] relIndsAll <- intersect(monkRNAinds, which(weekind == relTime)) ALB.ests[which(monk == monknames)] <- mean(distMat[relIndsAll,relIndsAll][lower.tri(distMat[relIndsAll,relIndsAll])])/length(135:900) } fulltab <- cbind(ALB.ests, thetas) rownames(fulltab) <- monknames colnames(fulltab)[1] <- "Theta_d (Plasma)" #Convert from thetas to Nes mu.transition <- 1.4*10^(-5) mu.transversion <- 2*10^(-6) fulltab[1,] <- fulltab[1,]/(2*mu.transition) fulltab[2,] <- fulltab[2,]/(2*mu.transversion) fulltab[3,] <- fulltab[3,]/(2*mu.transversion) fulltab[4,] <- fulltab[4,]/(2*mu.transition) #print the results print.xtable(xtable(fulltab, digits = -2), type = "html", paste("../out/tables/theta.html", sep = ""))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/project.R \name{prLibrary} \alias{prLibrary} \title{Load and install libraries} \usage{ prLibrary(...) } \arguments{ \item{...}{name of the libraries to load. The names need to be quoted.} } \description{ The function tries to load all libraries passed as argument. For those that are not installed, it tries to install them and then load them. } \examples{ prLibrary("data.table", "plyr") } \seealso{ \code{\link{prSource}} }
/man/prLibrary.Rd
no_license
aivachine/project
R
false
true
507
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/project.R \name{prLibrary} \alias{prLibrary} \title{Load and install libraries} \usage{ prLibrary(...) } \arguments{ \item{...}{name of the libraries to load. The names need to be quoted.} } \description{ The function tries to load all libraries passed as argument. For those that are not installed, it tries to install them and then load them. } \examples{ prLibrary("data.table", "plyr") } \seealso{ \code{\link{prSource}} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/EValue.R \name{evalue} \alias{evalue} \title{Compute an E-value for unmeasured confounding} \usage{ evalue(est, lo = NA, hi = NA, se = NA, delta = 1, true = c(0, 1), ...) } \arguments{ \item{est}{The effect estimate that was observed but which is suspected to be biased. A number of class "estimate" (constructed with \code{\link[=RR]{RR()}}, \code{\link[=OR]{OR()}}, \code{\link[=HR]{HR()}}, \code{\link[=OLS]{OLS()}}, or \code{\link[=MD]{MD()}}; for E-values for risk differences, see \code{\link[=evalues.RD]{evalues.RD()}}).} \item{lo}{Optional. Lower bound of the confidence interval. If not an object of class "estimate", assumed to be on the same scale as \code{est}.} \item{hi}{Optional. Upper bound of the confidence interval. If not an object of class "estimate", assumed to be on the same scale as \code{est}.} \item{se}{The standard error of the point estimate, for \code{est} of class "OLS"} \item{delta}{The contrast of interest in the exposure, for \code{est} of class "OLS"} \item{true}{A number to which to shift the observed estimate to. Defaults to 1 for ratio measures (\code{\link[=RR]{RR()}}, \code{\link[=OR]{OR()}}, \code{\link[=HR]{HR()}}) and 0 for additive measures (\code{\link[=OLS]{OLS()}}, \code{\link[=MD]{MD()}}).} \item{...}{Arguments passed to other methods.} } \description{ Returns a data frame containing point estimates, the lower confidence limit, and the upper confidence limit on the risk ratio scale (possibly through an approximate conversion) as well as E-values for the point estimate and the confidence interval limit closer to the null. } \details{ An E-value for unmeasured confounding is minimum strength of association, on the risk ratio scale, that unmeasured confounder(s) would need to have with both the treatment and the outcome to fully explain away a specific treatment–outcome association, conditional on the measured covariates. The estimate is converted appropriately before the E-value is calculated. See \link[=convert_measures]{conversion functions} for more details. The point estimate and confidence limits after conversion are returned, as is the E-value for the point estimate and the confidence limit closest to the proposed "true" value (by default, the null value.) For an \code{\link[=OLS]{OLS()}} estimate, the E-value is for linear regression with a continuous exposure and outcome. Regarding the continuous exposure, the choice of \code{delta} defines essentially a dichotomization in the exposure between hypothetical groups of subjects with exposures equal to an arbitrary value \emph{c} versus to another hypothetical group with exposures equal to \emph{c} + \code{delta}. For example, if resulting E-value is 2, this means that unmeasured confounder(s) would need to double the probability of a subject's having exposure equal to \emph{c} + \code{delta} instead of \emph{c}, and would also need to double the probability of being high versus low on the outcome, in which the cutoff for "high" versus "low" is arbitrary subject to some distributional assumptions (Chinn, 2000). } \examples{ # compute E-value for leukemia example in VanderWeele and Ding (2017) evalue(RR(0.80), 0.71, 0.91) # you can also pass just the point estimate # and return just the E-value for the point estimate with summary() summary(evalue(RR(0.80))) # demonstrate symmetry of E-value # this apparently causative association has same E-value as the above summary(evalue(RR(1 / 0.80))) # E-value for a non-null true value summary(evalue(RR(2), true = 1.5)) ## Hsu and Small (2013 Biometrics) Data ## sensitivity analysis after log-linear or logistic regression head(lead) ## log linear model -- obtain the conditional risk ratio lead.loglinear = glm(lead ~ ., family = binomial(link = "log"), data = lead[,-1]) est_se = summary(lead.loglinear)$coef["smoking", c(1, 2)] est = RR(exp(est_se[1])) lowerRR = exp(est_se[1] - 1.96*est_se[2]) upperRR = exp(est_se[1] + 1.96*est_se[2]) evalue(est, lowerRR, upperRR) ## logistic regression -- obtain the conditional odds ratio lead.logistic = glm(lead ~ ., family = binomial(link = "logit"), data = lead[,-1]) est_se = summary(lead.logistic)$coef["smoking", c(1, 2)] est = OR(exp(est_se[1]), rare = FALSE) lowerOR = exp(est_se[1] - 1.96*est_se[2]) upperOR = exp(est_se[1] + 1.96*est_se[2]) evalue(est, lowerOR, upperOR) ## linear regression # standardizing conservatively by SD(Y) ols = lm(age ~ income, data = lead) est = OLS(ols$coefficients[2], sd = sd(lead$age)) # for a 1-unit increase in income evalue(est = est, se = summary(ols)$coefficients['income', 'Std. Error']) # for a 0.5-unit increase in income evalue(est = est, se = summary(ols)$coefficients['income', 'Std. Error'], delta = 0.5) # E-value for Cohen's d = 0.5 with SE = 0.25 evalue(est = MD(.5), se = .25) # compute E-value for HR = 0.56 with CI: [0.46, 0.69] # for a common outcome evalue(HR(0.56, rare = FALSE), lo = 0.46, hi = 0.69) # for a rare outcome evalue(HR(0.56, rare = TRUE), lo = 0.46, hi = 0.69) } \references{ \enumerate{ \item Ding & VanderWeele (2016). Sensitivity analysis without assumptions. \emph{Epidemiology.} 27(3), 368. \item VanderWeele & Ding (2017). Sensitivity analysis in observational research: Introducing the E-value. \emph{Annals of Internal Medicine.} 27(3), 368. } } \keyword{e-value}
/man/evalue.Rd
no_license
cran/EValue
R
false
true
5,471
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/EValue.R \name{evalue} \alias{evalue} \title{Compute an E-value for unmeasured confounding} \usage{ evalue(est, lo = NA, hi = NA, se = NA, delta = 1, true = c(0, 1), ...) } \arguments{ \item{est}{The effect estimate that was observed but which is suspected to be biased. A number of class "estimate" (constructed with \code{\link[=RR]{RR()}}, \code{\link[=OR]{OR()}}, \code{\link[=HR]{HR()}}, \code{\link[=OLS]{OLS()}}, or \code{\link[=MD]{MD()}}; for E-values for risk differences, see \code{\link[=evalues.RD]{evalues.RD()}}).} \item{lo}{Optional. Lower bound of the confidence interval. If not an object of class "estimate", assumed to be on the same scale as \code{est}.} \item{hi}{Optional. Upper bound of the confidence interval. If not an object of class "estimate", assumed to be on the same scale as \code{est}.} \item{se}{The standard error of the point estimate, for \code{est} of class "OLS"} \item{delta}{The contrast of interest in the exposure, for \code{est} of class "OLS"} \item{true}{A number to which to shift the observed estimate to. Defaults to 1 for ratio measures (\code{\link[=RR]{RR()}}, \code{\link[=OR]{OR()}}, \code{\link[=HR]{HR()}}) and 0 for additive measures (\code{\link[=OLS]{OLS()}}, \code{\link[=MD]{MD()}}).} \item{...}{Arguments passed to other methods.} } \description{ Returns a data frame containing point estimates, the lower confidence limit, and the upper confidence limit on the risk ratio scale (possibly through an approximate conversion) as well as E-values for the point estimate and the confidence interval limit closer to the null. } \details{ An E-value for unmeasured confounding is minimum strength of association, on the risk ratio scale, that unmeasured confounder(s) would need to have with both the treatment and the outcome to fully explain away a specific treatment–outcome association, conditional on the measured covariates. The estimate is converted appropriately before the E-value is calculated. See \link[=convert_measures]{conversion functions} for more details. The point estimate and confidence limits after conversion are returned, as is the E-value for the point estimate and the confidence limit closest to the proposed "true" value (by default, the null value.) For an \code{\link[=OLS]{OLS()}} estimate, the E-value is for linear regression with a continuous exposure and outcome. Regarding the continuous exposure, the choice of \code{delta} defines essentially a dichotomization in the exposure between hypothetical groups of subjects with exposures equal to an arbitrary value \emph{c} versus to another hypothetical group with exposures equal to \emph{c} + \code{delta}. For example, if resulting E-value is 2, this means that unmeasured confounder(s) would need to double the probability of a subject's having exposure equal to \emph{c} + \code{delta} instead of \emph{c}, and would also need to double the probability of being high versus low on the outcome, in which the cutoff for "high" versus "low" is arbitrary subject to some distributional assumptions (Chinn, 2000). } \examples{ # compute E-value for leukemia example in VanderWeele and Ding (2017) evalue(RR(0.80), 0.71, 0.91) # you can also pass just the point estimate # and return just the E-value for the point estimate with summary() summary(evalue(RR(0.80))) # demonstrate symmetry of E-value # this apparently causative association has same E-value as the above summary(evalue(RR(1 / 0.80))) # E-value for a non-null true value summary(evalue(RR(2), true = 1.5)) ## Hsu and Small (2013 Biometrics) Data ## sensitivity analysis after log-linear or logistic regression head(lead) ## log linear model -- obtain the conditional risk ratio lead.loglinear = glm(lead ~ ., family = binomial(link = "log"), data = lead[,-1]) est_se = summary(lead.loglinear)$coef["smoking", c(1, 2)] est = RR(exp(est_se[1])) lowerRR = exp(est_se[1] - 1.96*est_se[2]) upperRR = exp(est_se[1] + 1.96*est_se[2]) evalue(est, lowerRR, upperRR) ## logistic regression -- obtain the conditional odds ratio lead.logistic = glm(lead ~ ., family = binomial(link = "logit"), data = lead[,-1]) est_se = summary(lead.logistic)$coef["smoking", c(1, 2)] est = OR(exp(est_se[1]), rare = FALSE) lowerOR = exp(est_se[1] - 1.96*est_se[2]) upperOR = exp(est_se[1] + 1.96*est_se[2]) evalue(est, lowerOR, upperOR) ## linear regression # standardizing conservatively by SD(Y) ols = lm(age ~ income, data = lead) est = OLS(ols$coefficients[2], sd = sd(lead$age)) # for a 1-unit increase in income evalue(est = est, se = summary(ols)$coefficients['income', 'Std. Error']) # for a 0.5-unit increase in income evalue(est = est, se = summary(ols)$coefficients['income', 'Std. Error'], delta = 0.5) # E-value for Cohen's d = 0.5 with SE = 0.25 evalue(est = MD(.5), se = .25) # compute E-value for HR = 0.56 with CI: [0.46, 0.69] # for a common outcome evalue(HR(0.56, rare = FALSE), lo = 0.46, hi = 0.69) # for a rare outcome evalue(HR(0.56, rare = TRUE), lo = 0.46, hi = 0.69) } \references{ \enumerate{ \item Ding & VanderWeele (2016). Sensitivity analysis without assumptions. \emph{Epidemiology.} 27(3), 368. \item VanderWeele & Ding (2017). Sensitivity analysis in observational research: Introducing the E-value. \emph{Annals of Internal Medicine.} 27(3), 368. } } \keyword{e-value}
% Generated by roxygen2 (4.0.1): do not edit by hand \name{addTransform} \alias{addTransform} \title{Add a Transformation Function to a Distributed Data Object} \usage{ addTransform(obj, fn, name = NULL, params = NULL, packages = NULL) } \arguments{ \item{obj}{a distributed data object} \item{fn}{a function to be applied to each subset of \code{obj}} \item{name}{optional name of the transformation} \item{params}{a named list of parameters external to \code{obj} that are needed in the transformation function (most should be taken care of automatically such that this is rarely necessary to specify)} \item{packages}{a vector of R package names that contain functions used in \code{fn} (most should be taken care of automatically such that this is rarely necessary to specify)} } \description{ Add a transformation function to be applied to each subset of a distributed data object } \details{ When you add a transformation to a distributed data object, the transformation is not applied immediately, but is deferred until a function that kicks off a computation is done. These include \code{\link{divide}}, \code{\link{recombine}}, \code{\link{drJoin}}, \code{\link{drLapply}}, \code{\link{drFilter}}, \code{\link{drSample}}, \code{drSubset}. When any of these are invoked on an object with a transformation attached to it, the transformation will be applied in the map phase of computation prior to any other computation. The transformation will also be applied any time a subset of the data is requested. Thus although the data has not been physically transformed after a call of \code{addTransform}, we can think of it conceptually as already being transformed. When \code{addTransform} is called, it is tested on a subset of the data to make sure we have all of the necessary global variables and packages loaded necessary to portably perform the transformation. It is possible to add multiple transformations to a distributed data object, in which case they are applied in the order supplied, but only one transform should be necessary. }
/man/addTransform.Rd
permissive
linearregression/datadr
R
false
false
2,060
rd
% Generated by roxygen2 (4.0.1): do not edit by hand \name{addTransform} \alias{addTransform} \title{Add a Transformation Function to a Distributed Data Object} \usage{ addTransform(obj, fn, name = NULL, params = NULL, packages = NULL) } \arguments{ \item{obj}{a distributed data object} \item{fn}{a function to be applied to each subset of \code{obj}} \item{name}{optional name of the transformation} \item{params}{a named list of parameters external to \code{obj} that are needed in the transformation function (most should be taken care of automatically such that this is rarely necessary to specify)} \item{packages}{a vector of R package names that contain functions used in \code{fn} (most should be taken care of automatically such that this is rarely necessary to specify)} } \description{ Add a transformation function to be applied to each subset of a distributed data object } \details{ When you add a transformation to a distributed data object, the transformation is not applied immediately, but is deferred until a function that kicks off a computation is done. These include \code{\link{divide}}, \code{\link{recombine}}, \code{\link{drJoin}}, \code{\link{drLapply}}, \code{\link{drFilter}}, \code{\link{drSample}}, \code{drSubset}. When any of these are invoked on an object with a transformation attached to it, the transformation will be applied in the map phase of computation prior to any other computation. The transformation will also be applied any time a subset of the data is requested. Thus although the data has not been physically transformed after a call of \code{addTransform}, we can think of it conceptually as already being transformed. When \code{addTransform} is called, it is tested on a subset of the data to make sure we have all of the necessary global variables and packages loaded necessary to portably perform the transformation. It is possible to add multiple transformations to a distributed data object, in which case they are applied in the order supplied, but only one transform should be necessary. }
##These functions take a matrix, check to see if there is a cached version, ##and if not, creates a cached version of the inverse. ##The first function outputs a list of functions that can be called by the second function. makeCacheMatrix <- function(x = matrix()) { m<-NULL ##clear the cache set<-function(y) { x<<-y ##set the input matrix to the cache m<<-NULL ##clear the cache } get <- function() x ##get will display cached matrix savecache<-function(solve) m<<-solve ##inverted matrix saved to m getcache<-function() m ##getcache will display the cached inverted matrix list(set=set, get=get, savecache=savecache, getcache=getcache) ##output list of functions } ##The second function checks to see if there is a cached inverted matrix. ##If there is a cached result for that matrix, then it outputs the cached inverse. ##If not, then it computes the inverse for the matrix and stores it to cache. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getcache() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$savecache(m) m }
/cachematrix.R
no_license
gesserta/ProgrammingAssignment2
R
false
false
1,230
r
##These functions take a matrix, check to see if there is a cached version, ##and if not, creates a cached version of the inverse. ##The first function outputs a list of functions that can be called by the second function. makeCacheMatrix <- function(x = matrix()) { m<-NULL ##clear the cache set<-function(y) { x<<-y ##set the input matrix to the cache m<<-NULL ##clear the cache } get <- function() x ##get will display cached matrix savecache<-function(solve) m<<-solve ##inverted matrix saved to m getcache<-function() m ##getcache will display the cached inverted matrix list(set=set, get=get, savecache=savecache, getcache=getcache) ##output list of functions } ##The second function checks to see if there is a cached inverted matrix. ##If there is a cached result for that matrix, then it outputs the cached inverse. ##If not, then it computes the inverse for the matrix and stores it to cache. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getcache() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$savecache(m) m }
# # Fetch the project dataset # zipURL <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip' zipFile <- basename(URLdecode(zipURL)) if (!file.exists(zipFile)) { cat('Downloading', zipFile) download.file(zipURL, zipFile) } if (!all(sapply(unzip(zipFile, list=TRUE)$Name, file.exists))) { cat('Unzipping', zipFile) unzip(zipFile) }
/Project1/fetch.dataset.R
no_license
mbattersby/coursera-exdata-035
R
false
false
386
r
# # Fetch the project dataset # zipURL <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip' zipFile <- basename(URLdecode(zipURL)) if (!file.exists(zipFile)) { cat('Downloading', zipFile) download.file(zipURL, zipFile) } if (!all(sapply(unzip(zipFile, list=TRUE)$Name, file.exists))) { cat('Unzipping', zipFile) unzip(zipFile) }
#title: "BIO782P Statistics and Bioinformatics Assignment 1- Dataset 1" #Name: "Ishack M Mougamadou #Student ID: 150346599 #This analysis was run on R Studio Version 1.1.456 #Mac OS El Capitan #Intel i5 #4GB RAM #Please Note: A pull request has been submitted on GitHub ## Dataset 1: Marine microbial diversity #Imported the microbial dataset into Rstudio. marine_data = read.table("Datasets/part_1_student_1887.tdf", header = TRUE) # Imported the microbial dataset marine_data # view the data #Exploratory Data Analyses str(marine_data) # Explored the number and the type of variables (Numeric and Factor) of the dataset #Assigned the three variables to appropriate variable names. Microbial_Diversity = marine_data$UniFracInd # The microbial diversity is measured by UniFrac Latitude = marine_data$latitude # The latitude Season = marine_data$season # The Season #Summarised the microbial diversity dataset. aggregate(.~marine_data$latitude, marine_data[1], summary) # Summarised UniFrac based on the latitude aggregate(.~marine_data$season, marine_data[1], summary) # Summarised UniFrac based on the season #Created a frequency distribution graph of the UniFrac residuals. hist(Microbial_Diversity, col= "yellow", main="The Frequency Distribution of the UniFrac Residuals",xlab="UniFrac Residuals") # A frequency distribution graph of the UniFrac residuals ## Part 1: How does microbial diversity change with latitude? #Created a frequency distribution graph of the of UniFrac residuals (Microbial Diversity) based on the latitude. boxplot(Microbial_Diversity~Latitude, col=c("mistyrose","powderblue"), # Boxplot based on latitude main="The Distribution of Microbial Diversity based on the Latitude", # Title ylab= "Microbial Diversity (UniFrac)",xlab="Latitude", # X and Y label pch=4) # Change the shape of the outliers to a cross to make it for distinct #Tested for the assumption of normally distributed UniFrac residuals. #The diagnostic plots for the Microbial Diversity dataset. diagnostic_plot=lm(Microbial_Diversity~Latitude) # Model used for the diagnostic plots plot(diagnostic_plot) # Plotting the diagnostic plots of the model #Subsequently, due to non-normally distributed UniFrac residuals, the Man-Whitney U test (non-parametric) was used to identify if the latitude affected the microbial diversity. Please note, the function for the Wilcox and Mann-Whitney U test is the same in R Studio. wilcox.test(Microbial_Diversity~Latitude) # Mann-Whitney U test of the microbial diversity based on latitude ## Part 2: How does microbial diversity change with time of year? #Created a boxplot to see the distribution of the microbial diversity based on the two seasons. boxplot(Microbial_Diversity~Season,col=c("gold","royalblue"), # Boxplot based on seasons main="The Distribution of the Microbial Diversity based on the Seasons", # Title ylab= "Microbial Diversity (UniFrac)",xlab="Season", pch=4) # X and Y label with a cross for outliers #The Man-Whitney U test was used again to identify if the season affected microbial diversity. Please note, the function for the Wilcox and Mann-Whitney U test is the same in R Studio. wilcox.test(Microbial_Diversity~Season) # # Mann-Whitney U test of the microbial diversity based on season ## Part 3: Is there an interaction between the season, and location? #ANOVA was used to see if there was an interaction between the location and the time of year. The response variable was the Microbial Diversity (UniFrac). model1=lm(Microbial_Diversity~Latitude*Season) # Model to see if there is any interaction between the season and location based on the UniFrac (Microbial Diversity) anova(model1) # ANOVA was used to identify any significant differences in the means between the groups
/Ishack M Mougamadou 150346599 BIO782P Statistics Assignment 1/R Scripts/Assignment_1_Dataset 1.R
no_license
anhnguyendepocen/QMUL-MSc-BIO782P-stats-bioinfo
R
false
false
3,925
r
#title: "BIO782P Statistics and Bioinformatics Assignment 1- Dataset 1" #Name: "Ishack M Mougamadou #Student ID: 150346599 #This analysis was run on R Studio Version 1.1.456 #Mac OS El Capitan #Intel i5 #4GB RAM #Please Note: A pull request has been submitted on GitHub ## Dataset 1: Marine microbial diversity #Imported the microbial dataset into Rstudio. marine_data = read.table("Datasets/part_1_student_1887.tdf", header = TRUE) # Imported the microbial dataset marine_data # view the data #Exploratory Data Analyses str(marine_data) # Explored the number and the type of variables (Numeric and Factor) of the dataset #Assigned the three variables to appropriate variable names. Microbial_Diversity = marine_data$UniFracInd # The microbial diversity is measured by UniFrac Latitude = marine_data$latitude # The latitude Season = marine_data$season # The Season #Summarised the microbial diversity dataset. aggregate(.~marine_data$latitude, marine_data[1], summary) # Summarised UniFrac based on the latitude aggregate(.~marine_data$season, marine_data[1], summary) # Summarised UniFrac based on the season #Created a frequency distribution graph of the UniFrac residuals. hist(Microbial_Diversity, col= "yellow", main="The Frequency Distribution of the UniFrac Residuals",xlab="UniFrac Residuals") # A frequency distribution graph of the UniFrac residuals ## Part 1: How does microbial diversity change with latitude? #Created a frequency distribution graph of the of UniFrac residuals (Microbial Diversity) based on the latitude. boxplot(Microbial_Diversity~Latitude, col=c("mistyrose","powderblue"), # Boxplot based on latitude main="The Distribution of Microbial Diversity based on the Latitude", # Title ylab= "Microbial Diversity (UniFrac)",xlab="Latitude", # X and Y label pch=4) # Change the shape of the outliers to a cross to make it for distinct #Tested for the assumption of normally distributed UniFrac residuals. #The diagnostic plots for the Microbial Diversity dataset. diagnostic_plot=lm(Microbial_Diversity~Latitude) # Model used for the diagnostic plots plot(diagnostic_plot) # Plotting the diagnostic plots of the model #Subsequently, due to non-normally distributed UniFrac residuals, the Man-Whitney U test (non-parametric) was used to identify if the latitude affected the microbial diversity. Please note, the function for the Wilcox and Mann-Whitney U test is the same in R Studio. wilcox.test(Microbial_Diversity~Latitude) # Mann-Whitney U test of the microbial diversity based on latitude ## Part 2: How does microbial diversity change with time of year? #Created a boxplot to see the distribution of the microbial diversity based on the two seasons. boxplot(Microbial_Diversity~Season,col=c("gold","royalblue"), # Boxplot based on seasons main="The Distribution of the Microbial Diversity based on the Seasons", # Title ylab= "Microbial Diversity (UniFrac)",xlab="Season", pch=4) # X and Y label with a cross for outliers #The Man-Whitney U test was used again to identify if the season affected microbial diversity. Please note, the function for the Wilcox and Mann-Whitney U test is the same in R Studio. wilcox.test(Microbial_Diversity~Season) # # Mann-Whitney U test of the microbial diversity based on season ## Part 3: Is there an interaction between the season, and location? #ANOVA was used to see if there was an interaction between the location and the time of year. The response variable was the Microbial Diversity (UniFrac). model1=lm(Microbial_Diversity~Latitude*Season) # Model to see if there is any interaction between the season and location based on the UniFrac (Microbial Diversity) anova(model1) # ANOVA was used to identify any significant differences in the means between the groups
#' Fits a set of observations (random variable) to test whether is drawn from a certain distribution #' #' @param X A random sample to be fitted. #' @param n.obs A positive integer, is the length of the random sample to be generated #' @param ref Aumber of clusters to use by the kmeans function to split the distribution, if isn't a number, uses mclust classification by default. #' @param crt Criteria to be given to FDist() function #' @param plot FALSE. If TRUE, generates a plot of the density function. #' @param subplot FALSE. If TRUE, generates the plot of the mixed density function's partitions. #' @param p.val_min Minimum p.value to be given to non-reject the null hypothesis. #' #' @return A list with the density functions, a random sample, a data frame with the KS and AD p.values results, the corresponding plots an the random numbers generator functions #' @export #' #' @importFrom purrr map #' @importFrom purrr map_lgl #' @importFrom assertthat is.error #' @importFrom ADGofTest ad.test #' @importFrom MASS fitdistr #' @importFrom fitdistrplus fitdist #' @importFrom mclust Mclust #' @importFrom mclust mclustBIC #' @importFrom cowplot plot_grid #' @importFrom ggplot2 is.ggplot #' #' @examples #' #' set.seed(31109) #' X<-c(rnorm(193,189,12),rweibull(182,401,87),rgamma(190,40,19)) #' #' A_X<-FDistUlt(X,plot=TRUE,subplot=TRUE) #' #' A_X<-FDistUlt(X,plot=TRUE,subplot=TRUE,p.val_min=.005) #' #' # Functions generated #' A_X[[1]][[1]]() #' # Random sample #' A_X[[2]] #' #' #Distributions #' A_X[[3]] #' #' # Plots #' par(mfrow=c(1,2)) #' A_X[[4]][[1]] #' A_X[[4]][[2]] #' #' # More functions #' A_X[[5]][[1]]() #' #' FDistUlt<-function(X,n.obs=length(X),ref="OP",crt=1,plot=FALSE,subplot=FALSE,p.val_min=.05){ if(!is.numeric(ref)){}else{ if(ref>length(X)/3){warning("Number of clusters must be less than input length/3") return(NULL)}} desc<-function(X,fns=FALSE,ref.=ref,crt.=crt,subplot.=subplot,p.val_min.=p.val_min){ eval<-function(X,fns.=fns,crt.=crt,subplot.=subplot,p.val_min.=p.val_min){ FIT<-FDist(X,length(X),crit = crt,plot = subplot,p.val_min=p.val_min) FIT } div<-function(X,ref.=ref){ df<-data.frame(A=1:length(X),B=X) Enteros<-X-floor(X)==0 if(any(Enteros)){ if(all(Enteros)){ if(!is.numeric(ref)){ mod1<-mclust::Mclust(X,modelNames=c("E", "V"))$classification if(length(table(mod1))==1){ df$CL<-kmeans(df,2)$cluster }else{ df$CL<-mod1 } }else{ df$CL<-kmeans(df,ref)$cluster } }else{ df$CL<-ifelse(Enteros,1,2) } }else{ if(!is.numeric(ref)){ mod1<-mclust::Mclust(X)$classification if(length(table(mod1))==1){ df$CL<-kmeans(df,2)$cluster }else{ df$CL<-mod1 } }else{ df$CL<-kmeans(df,ref)$cluster } } CLS<-purrr::map(unique(df$CL),~df[df$CL==.x,2]) CLS return(CLS) } suppressWarnings(EV<-eval(X,fns)) if(is.null(EV)){ if(length(X)>40){ DV<-purrr::map(div(X),~desc(.x,fns)) return(DV) }else{ FN<-rnorm formals(FN)[1]<-length(X) formals(FN)[2]<-mean(X) formals(FN)[3]<-ifelse(length(X)==1,0,sd(X)) return(list(paste0("normal(",mean(X),",",ifelse(length(X)==1,0,sd(X)),")"),FN,FN(), data.frame(Dist="norm",AD_p.v=1,KS_p.v=1,estimate1=mean(X),estimate2=sd(X),estimateLL1=0,estimateLL2=1,PV_S=2) )) } }else{ return(EV) } } FCNS<-desc(X) flattenlist <- function(x){ morelists <- sapply(x, function(xprime) class(xprime)[1]=="list") out <- c(x[!morelists], unlist(x[morelists], recursive=FALSE)) if(sum(morelists)){ base::Recall(out) }else{ return(out) } } superficie<-flattenlist(FCNS) FUN<-superficie[purrr::map_lgl(superficie,~"function" %in% class(.x))] Global_FUN<-superficie[purrr::map_lgl(superficie,~"gl_fun" %in% class(.x))] Dist<-unlist(superficie[purrr::map_lgl(superficie,is.character)]) PLTS<-superficie[purrr::map_lgl(superficie,ggplot2::is.ggplot)] dfss<-superficie[purrr::map_lgl(superficie,~is.data.frame(.x))] PV<-do.call("rbind",dfss[purrr::map_lgl(dfss,~ncol(.x)==9)]) Len<-MA<-c() repp<-floor(n.obs/length(X))+1 for (OBS in 1:repp) { for (mst in 1:length(FUN)) { ljsd<-FUN[[mst]]() MA<-c(MA,ljsd) if(OBS==1){ Len<-c(Len,length(ljsd)/length(X)) } } } MA<-sample(MA,n.obs) pv1<-data.frame(Distribution=Dist[nchar(Dist)!=0],Dist_Prop=Len[nchar(Dist)!=0]) p.v<-try(cbind(pv1,PV)) if(assertthat::is.error(pv1)){p.v<-pv1} cp<-plt<-c() if(plot){ DF<-rbind(data.frame(A="Fit",DT=MA), data.frame(A="Real",DT=X)) plt <- ggplot2::ggplot(DF,ggplot2::aes(x=DF$DT,fill=DF$A)) + ggplot2::geom_density(alpha=0.55)+ggplot2::ggtitle("Original Dist.") plt } TPlts<-c() if(subplot){ cp<-cowplot::plot_grid(plotlist = PLTS, ncol = floor(sqrt(length(PLTS)))) } TPlts<-list(plt,cp) return(list(unlist(FUN),MA,p.v,TPlts,Global_FUN)) }
/R/FDistUlt.R
no_license
cran/FitUltD
R
false
false
5,337
r
#' Fits a set of observations (random variable) to test whether is drawn from a certain distribution #' #' @param X A random sample to be fitted. #' @param n.obs A positive integer, is the length of the random sample to be generated #' @param ref Aumber of clusters to use by the kmeans function to split the distribution, if isn't a number, uses mclust classification by default. #' @param crt Criteria to be given to FDist() function #' @param plot FALSE. If TRUE, generates a plot of the density function. #' @param subplot FALSE. If TRUE, generates the plot of the mixed density function's partitions. #' @param p.val_min Minimum p.value to be given to non-reject the null hypothesis. #' #' @return A list with the density functions, a random sample, a data frame with the KS and AD p.values results, the corresponding plots an the random numbers generator functions #' @export #' #' @importFrom purrr map #' @importFrom purrr map_lgl #' @importFrom assertthat is.error #' @importFrom ADGofTest ad.test #' @importFrom MASS fitdistr #' @importFrom fitdistrplus fitdist #' @importFrom mclust Mclust #' @importFrom mclust mclustBIC #' @importFrom cowplot plot_grid #' @importFrom ggplot2 is.ggplot #' #' @examples #' #' set.seed(31109) #' X<-c(rnorm(193,189,12),rweibull(182,401,87),rgamma(190,40,19)) #' #' A_X<-FDistUlt(X,plot=TRUE,subplot=TRUE) #' #' A_X<-FDistUlt(X,plot=TRUE,subplot=TRUE,p.val_min=.005) #' #' # Functions generated #' A_X[[1]][[1]]() #' # Random sample #' A_X[[2]] #' #' #Distributions #' A_X[[3]] #' #' # Plots #' par(mfrow=c(1,2)) #' A_X[[4]][[1]] #' A_X[[4]][[2]] #' #' # More functions #' A_X[[5]][[1]]() #' #' FDistUlt<-function(X,n.obs=length(X),ref="OP",crt=1,plot=FALSE,subplot=FALSE,p.val_min=.05){ if(!is.numeric(ref)){}else{ if(ref>length(X)/3){warning("Number of clusters must be less than input length/3") return(NULL)}} desc<-function(X,fns=FALSE,ref.=ref,crt.=crt,subplot.=subplot,p.val_min.=p.val_min){ eval<-function(X,fns.=fns,crt.=crt,subplot.=subplot,p.val_min.=p.val_min){ FIT<-FDist(X,length(X),crit = crt,plot = subplot,p.val_min=p.val_min) FIT } div<-function(X,ref.=ref){ df<-data.frame(A=1:length(X),B=X) Enteros<-X-floor(X)==0 if(any(Enteros)){ if(all(Enteros)){ if(!is.numeric(ref)){ mod1<-mclust::Mclust(X,modelNames=c("E", "V"))$classification if(length(table(mod1))==1){ df$CL<-kmeans(df,2)$cluster }else{ df$CL<-mod1 } }else{ df$CL<-kmeans(df,ref)$cluster } }else{ df$CL<-ifelse(Enteros,1,2) } }else{ if(!is.numeric(ref)){ mod1<-mclust::Mclust(X)$classification if(length(table(mod1))==1){ df$CL<-kmeans(df,2)$cluster }else{ df$CL<-mod1 } }else{ df$CL<-kmeans(df,ref)$cluster } } CLS<-purrr::map(unique(df$CL),~df[df$CL==.x,2]) CLS return(CLS) } suppressWarnings(EV<-eval(X,fns)) if(is.null(EV)){ if(length(X)>40){ DV<-purrr::map(div(X),~desc(.x,fns)) return(DV) }else{ FN<-rnorm formals(FN)[1]<-length(X) formals(FN)[2]<-mean(X) formals(FN)[3]<-ifelse(length(X)==1,0,sd(X)) return(list(paste0("normal(",mean(X),",",ifelse(length(X)==1,0,sd(X)),")"),FN,FN(), data.frame(Dist="norm",AD_p.v=1,KS_p.v=1,estimate1=mean(X),estimate2=sd(X),estimateLL1=0,estimateLL2=1,PV_S=2) )) } }else{ return(EV) } } FCNS<-desc(X) flattenlist <- function(x){ morelists <- sapply(x, function(xprime) class(xprime)[1]=="list") out <- c(x[!morelists], unlist(x[morelists], recursive=FALSE)) if(sum(morelists)){ base::Recall(out) }else{ return(out) } } superficie<-flattenlist(FCNS) FUN<-superficie[purrr::map_lgl(superficie,~"function" %in% class(.x))] Global_FUN<-superficie[purrr::map_lgl(superficie,~"gl_fun" %in% class(.x))] Dist<-unlist(superficie[purrr::map_lgl(superficie,is.character)]) PLTS<-superficie[purrr::map_lgl(superficie,ggplot2::is.ggplot)] dfss<-superficie[purrr::map_lgl(superficie,~is.data.frame(.x))] PV<-do.call("rbind",dfss[purrr::map_lgl(dfss,~ncol(.x)==9)]) Len<-MA<-c() repp<-floor(n.obs/length(X))+1 for (OBS in 1:repp) { for (mst in 1:length(FUN)) { ljsd<-FUN[[mst]]() MA<-c(MA,ljsd) if(OBS==1){ Len<-c(Len,length(ljsd)/length(X)) } } } MA<-sample(MA,n.obs) pv1<-data.frame(Distribution=Dist[nchar(Dist)!=0],Dist_Prop=Len[nchar(Dist)!=0]) p.v<-try(cbind(pv1,PV)) if(assertthat::is.error(pv1)){p.v<-pv1} cp<-plt<-c() if(plot){ DF<-rbind(data.frame(A="Fit",DT=MA), data.frame(A="Real",DT=X)) plt <- ggplot2::ggplot(DF,ggplot2::aes(x=DF$DT,fill=DF$A)) + ggplot2::geom_density(alpha=0.55)+ggplot2::ggtitle("Original Dist.") plt } TPlts<-c() if(subplot){ cp<-cowplot::plot_grid(plotlist = PLTS, ncol = floor(sqrt(length(PLTS)))) } TPlts<-list(plt,cp) return(list(unlist(FUN),MA,p.v,TPlts,Global_FUN)) }
#' Adjustment for multiple testing #' #' Adjusts the variables \code{n_status_c} (the number of women with an ascertained HIV status) and \code{totpos_c} (the total number of women with #' HIV) when multiple testing is suspected based on the presence of HIV testing coverage values greater than 100\%. #' #' This function was designed to adjust the variables \code{n_status_c} and \code{totpos_c}, using one of the three adjustment options, when multiple testing is #' suspected (i.e. coverage values greater than 100\% are present). The three options include: 1) impute; 2) remove; and, 3) set to maximum. #' #' @param data The ANC-RT dataset. The functions \link[ANCRTAdjust]{name_var} and \link[ANCRTAdjust]{data_clean} should have been run on the data to properly #' prepare the data for use here. The data set must have the following variables: #' \itemize{ #' \item \code{n_status_c}: Cleaned \code{n_status} (generated using the \link[ANCRTAdjust]{data_clean} function) #' \item \code{n_status_c.impute}: \code{n_status_c} adjusted using the impute adjustment option (generated using the \link[ANCRTAdjust]{data_clean} function) #' \item \code{n_status_c.remove}: \code{n_status_c} adjusted using the remove adjustment option (generated using the \link[ANCRTAdjust]{data_clean} function) #' \item \code{n_status_c.setmax}: \code{n_status_c} adjusted using the set to maximum adjustment option (generated using the \link[ANCRTAdjust]{data_clean} function) #' \item \code{totpos_c}: Cleaned \code{totpos} (generated using the \link[ANCRTAdjust]{data_clean} function) #' \item \code{totpos_c.impute}: \code{totpos_c} adjusted using the impute adjustment option (generated using the \link[ANCRTAdjust]{data_clean} function) #' \item \code{totpos_c.remove}: \code{totpos_c} adjusted using the remove adjustment option (generated using the \link[ANCRTAdjust]{data_clean} function) #' \item \code{totpos_c.setmax}: \code{totpos_c} adjusted using the set to maximum adjustment option (generated using the \link[ANCRTAdjust]{data_clean} function) #' \item \code{n_clients}: The number of women who attended the facility during the time period #' } #' @param adjust_option The adjustment option chosen. Possible options include: #' \itemize{ #' \item \code{impute}: When \code{n_status_c} > \code{n_clients}, replace \code{n_status_c} with its facility's mean coverage #' (only including valid coverage observations to calculate the mean) multiplied by \code{n_clients}. #' \item \code{remove}: When \code{n_status_c} > \code{n_clients}, replace \code{n_status_c} as missing. #' \item \code{setmax}: When \code{n_status_c} > \code{n_clients}, replace \code{n_status_c} with \code{n_clients}. #' \item \code{none}: When \code{n_status_c} > \code{n_clients}, no adjustment is made. #' } #' @author Mathieu Maheu-Giroux #' @author Brittany Blouin #' #' @return A data set with the variables \code{n_status_c} and \code{totpos_c}, adjusted for multiple testing using the selected adjustment option, is returned. The #' adjustment option variables (e.g. \code{n_status_c.impute}, \code{n_status_c.remove}, \code{totpos_c.impute}, etc.) are removed from the dataset. Coverage and prevalence #' (for each facility-time period) are calculated using the newly adjusted variables. #' #' @export mt_adjust <- function(data, adjust_option) { if (adjust_option == "impute") { data$n_status_c <- data$n_status_c.impute data$totpos_c <- data$totpos_c.impute data$n_status_c.impute <- data$n_status_c.remove <- data$n_status_c.setmax <- NULL data$totpos_c.impute <- data$totpos_c.remove <- data$totpos_c.setmax <- NULL } if (adjust_option == "remove") { data$n_status_c <- data$n_status_c.remove data$totpos_c <- data$totpos_c.remove data$n_status_c.impute <- data$n_status_c.remove <- data$n_status_c.setmax <- NULL data$totpos_c.impute <- data$totpos_c.remove <- data$totpos_c.setmax <- NULL } if (adjust_option == "setmax") { data$n_status_c <- data$n_status_c.setmax data$totpos_c <- data$totpos_c.setmax data$n_status_c.impute <- data$n_status_c.remove <- data$n_status_c.setmax <- NULL data$totpos_c.impute <- data$totpos_c.remove <- data$totpos_c.setmax <- NULL } if (adjust_option == "none") { data$n_status_c.impute <- data$n_status_c.remove <- data$n_status_c.setmax <- NULL data$totpos_c.impute <- data$totpos_c.remove <- data$totpos_c.setmax <- NULL } data$Cov <- ifelse(data$n_clients > 0 & !is.na(data$n_clients), data$n_status_c / data$n_clients, NA) data$Prv <- ifelse(data$n_status_c > 0 & !is.na(data$n_status_c), data$totpos_c / data$n_status_c, NA) return(data) }
/R/mt_adjust.R
permissive
ellessenne/ANCRTAdjust
R
false
false
4,710
r
#' Adjustment for multiple testing #' #' Adjusts the variables \code{n_status_c} (the number of women with an ascertained HIV status) and \code{totpos_c} (the total number of women with #' HIV) when multiple testing is suspected based on the presence of HIV testing coverage values greater than 100\%. #' #' This function was designed to adjust the variables \code{n_status_c} and \code{totpos_c}, using one of the three adjustment options, when multiple testing is #' suspected (i.e. coverage values greater than 100\% are present). The three options include: 1) impute; 2) remove; and, 3) set to maximum. #' #' @param data The ANC-RT dataset. The functions \link[ANCRTAdjust]{name_var} and \link[ANCRTAdjust]{data_clean} should have been run on the data to properly #' prepare the data for use here. The data set must have the following variables: #' \itemize{ #' \item \code{n_status_c}: Cleaned \code{n_status} (generated using the \link[ANCRTAdjust]{data_clean} function) #' \item \code{n_status_c.impute}: \code{n_status_c} adjusted using the impute adjustment option (generated using the \link[ANCRTAdjust]{data_clean} function) #' \item \code{n_status_c.remove}: \code{n_status_c} adjusted using the remove adjustment option (generated using the \link[ANCRTAdjust]{data_clean} function) #' \item \code{n_status_c.setmax}: \code{n_status_c} adjusted using the set to maximum adjustment option (generated using the \link[ANCRTAdjust]{data_clean} function) #' \item \code{totpos_c}: Cleaned \code{totpos} (generated using the \link[ANCRTAdjust]{data_clean} function) #' \item \code{totpos_c.impute}: \code{totpos_c} adjusted using the impute adjustment option (generated using the \link[ANCRTAdjust]{data_clean} function) #' \item \code{totpos_c.remove}: \code{totpos_c} adjusted using the remove adjustment option (generated using the \link[ANCRTAdjust]{data_clean} function) #' \item \code{totpos_c.setmax}: \code{totpos_c} adjusted using the set to maximum adjustment option (generated using the \link[ANCRTAdjust]{data_clean} function) #' \item \code{n_clients}: The number of women who attended the facility during the time period #' } #' @param adjust_option The adjustment option chosen. Possible options include: #' \itemize{ #' \item \code{impute}: When \code{n_status_c} > \code{n_clients}, replace \code{n_status_c} with its facility's mean coverage #' (only including valid coverage observations to calculate the mean) multiplied by \code{n_clients}. #' \item \code{remove}: When \code{n_status_c} > \code{n_clients}, replace \code{n_status_c} as missing. #' \item \code{setmax}: When \code{n_status_c} > \code{n_clients}, replace \code{n_status_c} with \code{n_clients}. #' \item \code{none}: When \code{n_status_c} > \code{n_clients}, no adjustment is made. #' } #' @author Mathieu Maheu-Giroux #' @author Brittany Blouin #' #' @return A data set with the variables \code{n_status_c} and \code{totpos_c}, adjusted for multiple testing using the selected adjustment option, is returned. The #' adjustment option variables (e.g. \code{n_status_c.impute}, \code{n_status_c.remove}, \code{totpos_c.impute}, etc.) are removed from the dataset. Coverage and prevalence #' (for each facility-time period) are calculated using the newly adjusted variables. #' #' @export mt_adjust <- function(data, adjust_option) { if (adjust_option == "impute") { data$n_status_c <- data$n_status_c.impute data$totpos_c <- data$totpos_c.impute data$n_status_c.impute <- data$n_status_c.remove <- data$n_status_c.setmax <- NULL data$totpos_c.impute <- data$totpos_c.remove <- data$totpos_c.setmax <- NULL } if (adjust_option == "remove") { data$n_status_c <- data$n_status_c.remove data$totpos_c <- data$totpos_c.remove data$n_status_c.impute <- data$n_status_c.remove <- data$n_status_c.setmax <- NULL data$totpos_c.impute <- data$totpos_c.remove <- data$totpos_c.setmax <- NULL } if (adjust_option == "setmax") { data$n_status_c <- data$n_status_c.setmax data$totpos_c <- data$totpos_c.setmax data$n_status_c.impute <- data$n_status_c.remove <- data$n_status_c.setmax <- NULL data$totpos_c.impute <- data$totpos_c.remove <- data$totpos_c.setmax <- NULL } if (adjust_option == "none") { data$n_status_c.impute <- data$n_status_c.remove <- data$n_status_c.setmax <- NULL data$totpos_c.impute <- data$totpos_c.remove <- data$totpos_c.setmax <- NULL } data$Cov <- ifelse(data$n_clients > 0 & !is.na(data$n_clients), data$n_status_c / data$n_clients, NA) data$Prv <- ifelse(data$n_status_c > 0 & !is.na(data$n_status_c), data$totpos_c / data$n_status_c, NA) return(data) }
player_model<-function(name) { verdi=subset(all_days_with_fantasy_evaluation,Nome==name) verdi <- verdi[with(verdi,order(verdi$Year,verdi$Day)) , ] ts<-verdi[c("Day","FantasyEvaluation","Voto")] ts$z <- c(NA,diff(ts$Voto)) mod<-auto.arima(scale(ts$z,scale=FALSE)) mod<-auto.arima(scale(verdi$Voto,scale=FALSE)) return(mod); } err<-function(mod) { return (var(mod$residuals,na.rm=TRUE)); } fore<-function(mod) { val<-forecast(mod,1)$mean[1] delta<-mean(ts$z[-1])+val fore<-tail(verdi$Voto, n=1)+delta return(fore) } trivial_predictor<-function(name){ return (mean( subset(all_days_with_fantasy_evaluation,Nome==name)$Voto ) ); } trivial_error<-function(name){ return (var( subset(all_days_with_fantasy_evaluation,Nome==name)$Voto ) ); } mine<-as.data.frame(unique(subset(players_synthesis_by_me,Owner=="ME")$Name)) mods<-apply(mine,1,player_model) res<-lapply(mods,fore) triv<-apply(mine,1,trivial_predictor) arima_err<-lapply(mods,err) triv_err<-apply(mine,1,trivial_error) mine$fore=t(as.data.frame(res)) mine$triv=as.data.frame(triv) mine$err_arima=t(as.data.frame(arima_err)) mine$triv_err=as.data.frame(triv_err) View(mine) rm(arima_err) rm(res) rm(fit) rm(mod) rm(ts)
/r/arima_loop.r
no_license
fbambusi/fantasy-football
R
false
false
1,218
r
player_model<-function(name) { verdi=subset(all_days_with_fantasy_evaluation,Nome==name) verdi <- verdi[with(verdi,order(verdi$Year,verdi$Day)) , ] ts<-verdi[c("Day","FantasyEvaluation","Voto")] ts$z <- c(NA,diff(ts$Voto)) mod<-auto.arima(scale(ts$z,scale=FALSE)) mod<-auto.arima(scale(verdi$Voto,scale=FALSE)) return(mod); } err<-function(mod) { return (var(mod$residuals,na.rm=TRUE)); } fore<-function(mod) { val<-forecast(mod,1)$mean[1] delta<-mean(ts$z[-1])+val fore<-tail(verdi$Voto, n=1)+delta return(fore) } trivial_predictor<-function(name){ return (mean( subset(all_days_with_fantasy_evaluation,Nome==name)$Voto ) ); } trivial_error<-function(name){ return (var( subset(all_days_with_fantasy_evaluation,Nome==name)$Voto ) ); } mine<-as.data.frame(unique(subset(players_synthesis_by_me,Owner=="ME")$Name)) mods<-apply(mine,1,player_model) res<-lapply(mods,fore) triv<-apply(mine,1,trivial_predictor) arima_err<-lapply(mods,err) triv_err<-apply(mine,1,trivial_error) mine$fore=t(as.data.frame(res)) mine$triv=as.data.frame(triv) mine$err_arima=t(as.data.frame(arima_err)) mine$triv_err=as.data.frame(triv_err) View(mine) rm(arima_err) rm(res) rm(fit) rm(mod) rm(ts)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/time-zones.r \name{with_tz} \alias{with_tz} \title{Get date-time in a different time zone} \usage{ with_tz(time, tzone = "") } \arguments{ \item{time}{a POSIXct, POSIXlt, Date, chron date-time object or a data.frame object. When a data.frame all POSIXt elements of a data.frame are processed with \code{with_tz()} and new data.frame is returned.} \item{tzone}{a character string containing the time zone to convert to. R must recognize the name contained in the string as a time zone on your system.} } \value{ a POSIXct object in the updated time zone } \description{ with_tz returns a date-time as it would appear in a different time zone. The actual moment of time measured does not change, just the time zone it is measured in. with_tz defaults to the Universal Coordinated time zone (UTC) when an unrecognized time zone is inputted. See \code{\link[=Sys.timezone]{Sys.timezone()}} for more information on how R recognizes time zones. } \examples{ x <- as.POSIXct("2009-08-07 00:00:01", tz = "America/New_York") with_tz(x, "GMT") } \seealso{ \code{\link[=force_tz]{force_tz()}} } \keyword{chron} \keyword{manip}
/man/with_tz.Rd
no_license
infotroph/lubridate
R
false
true
1,195
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/time-zones.r \name{with_tz} \alias{with_tz} \title{Get date-time in a different time zone} \usage{ with_tz(time, tzone = "") } \arguments{ \item{time}{a POSIXct, POSIXlt, Date, chron date-time object or a data.frame object. When a data.frame all POSIXt elements of a data.frame are processed with \code{with_tz()} and new data.frame is returned.} \item{tzone}{a character string containing the time zone to convert to. R must recognize the name contained in the string as a time zone on your system.} } \value{ a POSIXct object in the updated time zone } \description{ with_tz returns a date-time as it would appear in a different time zone. The actual moment of time measured does not change, just the time zone it is measured in. with_tz defaults to the Universal Coordinated time zone (UTC) when an unrecognized time zone is inputted. See \code{\link[=Sys.timezone]{Sys.timezone()}} for more information on how R recognizes time zones. } \examples{ x <- as.POSIXct("2009-08-07 00:00:01", tz = "America/New_York") with_tz(x, "GMT") } \seealso{ \code{\link[=force_tz]{force_tz()}} } \keyword{chron} \keyword{manip}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/covariateDistributions.R \name{binomialW} \alias{binomialW} \title{binomialW} \usage{ binomialW(n, num, p, ...) } \description{ Draw from binomial(num, p) }
/man/binomialW.Rd
no_license
jlstiles/haltmle.sim
R
false
true
235
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/covariateDistributions.R \name{binomialW} \alias{binomialW} \title{binomialW} \usage{ binomialW(n, num, p, ...) } \description{ Draw from binomial(num, p) }
library("xml2") library("dplyr") library("stringr") library("rlist") library("xts") library("quantmod") library("PortfolioAnalytics") library("DEoptim") library("ROI") require("ROI.plugin.glpk") require("ROI.plugin.quadprog") #function(finnal_fund_all){ ### Read data from last milestone setwd("C:/Users/EDZ/Desktop/理享家/Portfolio_Proj") #can be specified data = read.csv("finalfund.csv",header = TRUE, fileEncoding = "UTF-8", sep=",", na.strings = "") #data = final_fund_all rows = nrow(data) fund_num = ncol(data) - 1 #tmp = as.character(data[,1]) #data[,1] = as.Date(tmp, format="%Y/%m/%d") #####These are the code for last version ###Complete all funds for (j in 1:fund_num){ for (i in 1:rows){ if(is.na(data[i,j+1])){ if(i != rows){ data[i,j+1] = (data[i-1,j+1] + data[i+1,j+1]) /2 } else{ data[i,j+1] = data[i-1,j+1] } } } } ###These are the code for last version if(FALSE){ if(FALSE){ ###Step 1: find the fund with most dates fund_most_date = 1 MAX_date = 1 for (j in 1:fund_num){ i = 1 while(!is.na(data[i,j+1])) i = i + 1 if(i > MAX_date) { MAX_date = i fund_most_date = j } } } if(fund_most_date != fund_num){ for (j in (fund_most_date+1):fund_num){ for (i in i:rows){ if(is.na(data[i,j+1])){ if(i != rows){ #new input format do not require data moving if(FALSE) { #move the dates data[(i+1):rows,2*j] = data[i:(rows-1),2*j] data[(i+1):rows, 2*j+1] = data[i:(rows-1),2*j+1] data[i,2*j] = data[i,2*fund_most_date] data[i,2*j+1] = (data[i-1,2*j+1] + data[i+1,2*j+1]) /2 } data[i,j+1] = (data[i-1,j+1] + data[i+1,j+1]) /2 } else{ data[i,j+1] = data[i-1,j+1] } } } } } if(fund_most_date != fund_num){ for (j in (fund_most_date+1):fund_num){ for (i in i :rows){ if(is.na(data[i,2*j]) || (data[i,2*j] > data[i, 2*fund_most_date])){ if(i != rows){ #move the dates data[(i+1):rows,2*j] = data[i:(rows-1),2*j] data[(i+1):rows, 2*j+1] = data[i:(rows-1),2*j+1] data[i,2*j] = data[i,2*fund_most_date] data[i,2*j+1] = (data[i-1,2*j+1] + data[i+1,2*j+1]) /2 } else{ data[i,2*j] = data[i,2*fund_most_date] data[i,2*j+1] = data[i,2*fund_most_date+1] } } } } } } ### Construct the system by Marko model #Initialization rows = nrow(data) returns_tmp = c() return = data.frame() for (j in 1:fund_num){ #return<-data[,j+1] #for(i in 1:rows-1){ # return[i+1] = data[i+1,j+1] / data[i,j+1] - 1.00 #} data_tmp <- data.frame(data[,1],data[,j+1]) data_tmp <- xts(data_tmp[,2],as.Date(data_tmp[,1])) return <- periodReturn(data_tmp,period="daily",type="log") returns_tmp <- cbind(returns_tmp,return) } returns <- returns_tmp[,1:5] rownames(returns) <- data[,1] colnames(returns) <- c("fd1", "fd2", "fd3", "fd4","fd5") funds <- colnames(returns) init <- portfolio.spec(assets = funds) init <- add.constraint(portfolio=init, type="leverage",min_sum=0.99, max_sum=1.01) init <- add.constraint(portfolio=init, type="box", min=0.10, max=0.40) #print.default(init) #6.2 Maximize mean return with ROI max_ret <- add.objective(portfolio=init, type="return", name="mean") opt_maxret <- optimize.portfolio(R=returns, portfolio=max_ret, optimize_method="ROI",trace=TRUE) #print(opt_maxret) #plot(opt_maxret, risk.col="StdDev", return.col="mean", main="Maximum Return Optimization", chart.assets=TRUE, xlim = c(0,1), ylim = c(0,30)) #6.3 Minimize variance with ROI minvar <- add.objective(portfolio=init, type="risk", name="var") opt_minvar <- optimize.portfolio(R=returns, portfolio=minvar, optimize_method="ROI", trace=TRUE) print(opt_minvar) plot(opt_minvar, risk.col="StdDev", return.col="mean", main="Minimum Variance Optimization", chart.assets=TRUE, xlim = c(0,1), ylim = c(0,200)) #6.4 Maximize quadratic utility with ROI qu <- add.objective(portfolio=init, type="return", name="mean") qu <- add.objective(portfolio=qu, type="risk", name="var", risk_aversion=0.25) #can be specified opt_qu <- optimize.portfolio(R=returns, portfolio=qu, optimize_method="ROI", trace=TRUE) #print(opt_qu) #plot(opt_qu, risk.col="StdDev", return.col="mean", main="Quadratic Utility Optimization", chart.assets=TRUE, xlim = c(0,1), ylim = c(0,200)) #6.5 Minimize expected tail loss with ROI etl <- add.objective(portfolio=init, type="risk", name="ETL") opt_etl <- optimize.portfolio(R=returns, portfolio=etl, optimize_method="ROI", trace=TRUE) #print(opt_etl) #plot(opt_etl, risk.col="ES", return.col="mean", main="ETL Optimization", chart.assets=TRUE, xlim = c(0,1), ylim = c(0,200)) #6.6 Maximize mean return per unit ETL with random portfolios meanETL <- add.objective(portfolio = init, type = "return", name = "mean") meanETL <- add.objective(portfolio = meanETL, type="risk", name="ETL", arguments=list(p=0.95)) opt_meanETL <- optimize.portfolio(R=returns, portfolio = meanETL, optimize_method = "random", trace = TRUE, search_size = 2000) #print(opt_meanETL) stats_meanETL <-extractStats(opt_meanETL) #head(stats_meanETL) #plot(opt_meanETL, risk.col="ETL", return.col="mean", main="mean-ETL Optimization", neighbors=25) w = extractWeights(min_risk) #return w
/12_31/mark_model.r
permissive
dxcv/RoboAdvisor-1
R
false
false
6,171
r
library("xml2") library("dplyr") library("stringr") library("rlist") library("xts") library("quantmod") library("PortfolioAnalytics") library("DEoptim") library("ROI") require("ROI.plugin.glpk") require("ROI.plugin.quadprog") #function(finnal_fund_all){ ### Read data from last milestone setwd("C:/Users/EDZ/Desktop/理享家/Portfolio_Proj") #can be specified data = read.csv("finalfund.csv",header = TRUE, fileEncoding = "UTF-8", sep=",", na.strings = "") #data = final_fund_all rows = nrow(data) fund_num = ncol(data) - 1 #tmp = as.character(data[,1]) #data[,1] = as.Date(tmp, format="%Y/%m/%d") #####These are the code for last version ###Complete all funds for (j in 1:fund_num){ for (i in 1:rows){ if(is.na(data[i,j+1])){ if(i != rows){ data[i,j+1] = (data[i-1,j+1] + data[i+1,j+1]) /2 } else{ data[i,j+1] = data[i-1,j+1] } } } } ###These are the code for last version if(FALSE){ if(FALSE){ ###Step 1: find the fund with most dates fund_most_date = 1 MAX_date = 1 for (j in 1:fund_num){ i = 1 while(!is.na(data[i,j+1])) i = i + 1 if(i > MAX_date) { MAX_date = i fund_most_date = j } } } if(fund_most_date != fund_num){ for (j in (fund_most_date+1):fund_num){ for (i in i:rows){ if(is.na(data[i,j+1])){ if(i != rows){ #new input format do not require data moving if(FALSE) { #move the dates data[(i+1):rows,2*j] = data[i:(rows-1),2*j] data[(i+1):rows, 2*j+1] = data[i:(rows-1),2*j+1] data[i,2*j] = data[i,2*fund_most_date] data[i,2*j+1] = (data[i-1,2*j+1] + data[i+1,2*j+1]) /2 } data[i,j+1] = (data[i-1,j+1] + data[i+1,j+1]) /2 } else{ data[i,j+1] = data[i-1,j+1] } } } } } if(fund_most_date != fund_num){ for (j in (fund_most_date+1):fund_num){ for (i in i :rows){ if(is.na(data[i,2*j]) || (data[i,2*j] > data[i, 2*fund_most_date])){ if(i != rows){ #move the dates data[(i+1):rows,2*j] = data[i:(rows-1),2*j] data[(i+1):rows, 2*j+1] = data[i:(rows-1),2*j+1] data[i,2*j] = data[i,2*fund_most_date] data[i,2*j+1] = (data[i-1,2*j+1] + data[i+1,2*j+1]) /2 } else{ data[i,2*j] = data[i,2*fund_most_date] data[i,2*j+1] = data[i,2*fund_most_date+1] } } } } } } ### Construct the system by Marko model #Initialization rows = nrow(data) returns_tmp = c() return = data.frame() for (j in 1:fund_num){ #return<-data[,j+1] #for(i in 1:rows-1){ # return[i+1] = data[i+1,j+1] / data[i,j+1] - 1.00 #} data_tmp <- data.frame(data[,1],data[,j+1]) data_tmp <- xts(data_tmp[,2],as.Date(data_tmp[,1])) return <- periodReturn(data_tmp,period="daily",type="log") returns_tmp <- cbind(returns_tmp,return) } returns <- returns_tmp[,1:5] rownames(returns) <- data[,1] colnames(returns) <- c("fd1", "fd2", "fd3", "fd4","fd5") funds <- colnames(returns) init <- portfolio.spec(assets = funds) init <- add.constraint(portfolio=init, type="leverage",min_sum=0.99, max_sum=1.01) init <- add.constraint(portfolio=init, type="box", min=0.10, max=0.40) #print.default(init) #6.2 Maximize mean return with ROI max_ret <- add.objective(portfolio=init, type="return", name="mean") opt_maxret <- optimize.portfolio(R=returns, portfolio=max_ret, optimize_method="ROI",trace=TRUE) #print(opt_maxret) #plot(opt_maxret, risk.col="StdDev", return.col="mean", main="Maximum Return Optimization", chart.assets=TRUE, xlim = c(0,1), ylim = c(0,30)) #6.3 Minimize variance with ROI minvar <- add.objective(portfolio=init, type="risk", name="var") opt_minvar <- optimize.portfolio(R=returns, portfolio=minvar, optimize_method="ROI", trace=TRUE) print(opt_minvar) plot(opt_minvar, risk.col="StdDev", return.col="mean", main="Minimum Variance Optimization", chart.assets=TRUE, xlim = c(0,1), ylim = c(0,200)) #6.4 Maximize quadratic utility with ROI qu <- add.objective(portfolio=init, type="return", name="mean") qu <- add.objective(portfolio=qu, type="risk", name="var", risk_aversion=0.25) #can be specified opt_qu <- optimize.portfolio(R=returns, portfolio=qu, optimize_method="ROI", trace=TRUE) #print(opt_qu) #plot(opt_qu, risk.col="StdDev", return.col="mean", main="Quadratic Utility Optimization", chart.assets=TRUE, xlim = c(0,1), ylim = c(0,200)) #6.5 Minimize expected tail loss with ROI etl <- add.objective(portfolio=init, type="risk", name="ETL") opt_etl <- optimize.portfolio(R=returns, portfolio=etl, optimize_method="ROI", trace=TRUE) #print(opt_etl) #plot(opt_etl, risk.col="ES", return.col="mean", main="ETL Optimization", chart.assets=TRUE, xlim = c(0,1), ylim = c(0,200)) #6.6 Maximize mean return per unit ETL with random portfolios meanETL <- add.objective(portfolio = init, type = "return", name = "mean") meanETL <- add.objective(portfolio = meanETL, type="risk", name="ETL", arguments=list(p=0.95)) opt_meanETL <- optimize.portfolio(R=returns, portfolio = meanETL, optimize_method = "random", trace = TRUE, search_size = 2000) #print(opt_meanETL) stats_meanETL <-extractStats(opt_meanETL) #head(stats_meanETL) #plot(opt_meanETL, risk.col="ETL", return.col="mean", main="mean-ETL Optimization", neighbors=25) w = extractWeights(min_risk) #return w
#betas #given two inputs, this calculates RMSE. calc.rmse <- function(trueVal, estVal){ rmse <- (sum((trueVal - estVal)^2)/length(estVal))^.5 #calculates RMSE, works even one or both are vectors rmse } #for a given beta (just a number, i.e. to input B0 RMSE do betaToEval = 0 not betaToEval = "B0") input.rmse <- function(betaToEval, megaList, trueBetaVal, results){ #this asks for the results array to input the results, so we don't need more code to do that modelNames <- names(megaList) #get names of models run bandwidthNames <- names(megaList[[2]]) #megaList[[1]] may only have 1 bandwidth for the GGG model, as it is now model 1 should have all bandwidths though #this loops through the names of each model and bandwidth #if models are done in an unusual order this should still input the correct results for(model in modelNames){ for(bandwidth in 1:length(bandwidthNames)){ #this collects the estimated coefficient, the indexing is a bit intense but this should get the coefficient estimates for the correct model and bandwidth estCoeff <- megaList[[model]][[bandwidth]][[2]][,(betaToEval + 1)] #R indexes start at 1 so to get B0 you need to add 1 #if the names of the dimensions of megaList are not GGG, LGG, ... use the next two lines to get the model name, this will store it in the results section properly too modelName <- megaList[[model]][[bandwidth]][[1]] model <- modelName #calc rmse rmse <- calc.rmse(trueBetaVal, estCoeff) #and put it into the results matrix. Again, this is done by model/BW name not number for if only a subset of models are run results[bandwidth, model, paste0("B", betaToEval, "RMSE")] <- rmse #by indexing to "B0RMSE" we can add metrics fairly easily, this will still put this result in the right place } } #returns the modified results input results } #GCV input.gcv <- function(y, megaList, results){ modelNames <- names(megaList) #get names of models run numBandwidths <- length(megaList[[2]]) #megaList[[1]] may only have 1 bandwidth for the GGG model, as it is now model 1 should have all bandwidths though #this loops through the names of each model and bandwidth #if models are done in an unusual order this should still input the correct results for(model in modelNames){ for(bandwidth in 1:numBandwidths){ #this collects the estimated coefficient, the indexing is a bit intense but this should get the coefficient estimates for the correct model and bandwidth yhat <- megaList[[model]][[bandwidth]][[3]] #estimated ys levs <- megaList[[model]][[bandwidth]][[4]] #leverages #if the names of the dimensions of megaList are not GGG, LGG, ... use the next two lines to get the model name, this will store it in the results section properly too #modelName <- megaList[[model]][[bandwidth]][[1]] #model <- modelName #now to extract the number of non-stationary variables splitModel <- strsplit(model, "") #this splits the model into a list of length 3, so it needs to be indexed to 1 in the next step numNonstationary <- sum(splitModel[[1]] == "L") #calc gcv gcv <- GCV(y, yhat, levs, numNonstationary) #and put it into the results matrix. Again, this is done by model/BW name not number for if only a subset of models are run results[bandwidth, model, "GCV"] <- gcv #by indexing to "GCV" we can add metrics fairly easily, this will still put this result in the right place } } #returns the modified results input results } #AIC calc.aic <- function(y, yhat, lev, nonStationary){ n <- length(y) v1 = nonStationary + sum(lev) errorSD <- apply(y - yhat, 2, sd) aic <- 2*n*log(errorSD) + n*log(2*pi) + n*(n + v1)/(n-2-v1) aic } input.aic <- function(y, megaList, results){ modelNames <- names(megaList) #get names of models run numBandwidths <- length(megaList[[2]]) #megaList[[1]] may only have 1 bandwidth for the GGG model, as it is now model 1 should have all bandwidths though #this loops through the names of each model and bandwidth #if models are done in an unusual order this should still input the correct results for(model in modelNames){ for(bandwidth in 1:numBandwidths){ #this collects the estimated coefficient, the indexing is a bit intense but this should get the coefficient estimates for the correct model and bandwidth yhat <- megaList[[model]][[bandwidth]][[3]] #estimated ys levs <- megaList[[model]][[bandwidth]][[4]] #leverages #if the names of the dimensions of megaList are not GGG, LGG, ... use the next two lines to get the model name, this will store it in the results section properly too #modelName <- megaList[[model]][[bandwidth]][[1]] #model <- modelName #now to extract the number of non-stationary variables splitModel <- strsplit(model, "") #this splits the model into a list of length 3, so it needs to be indexed to 1 in the next step numNonstationary <- sum(splitModel[[1]] == "L") #calc gcv aic <- calc.aic(y, yhat, levs, numNonstationary) #and put it into the results matrix. Again, this is done by model/BW name not number for if only a subset of models are run results[bandwidth, model, "AIC"] <- aic #by indexing to "AIC" we can add metrics fairly easily, this will still put this result in the right place } } #returns the modified results input results } #SCV calc.scv <- function(dep.var, yhats.without) { numer = ((dep.var - yhats.without)^2) denom = rowSums(numer) stan.CV.values = colSums(numer/denom) stan.CV.values } input.scv <- function(y, megaList, results){ modelNames <- names(megaList) #get names of models run numBandwidths <- length(megaList[[2]]) #megaList[[1]] may only have 1 bandwidth for the GGG model, as it is now model 1 should have all bandwidths though n <- length(y) #this loops through the names of each model and bandwidth #if models are done in an unusual order this should still input the correct results #first, we need to loop through everything and extract the fitted values without for(model in modelNames[-1]){ #this ignores the first model, which I assusme is GGG fittedValuesWithoutMat <- matrix(NA, n, numBandwidths) #this matrix should make calculating the SCV easier; it wil be filled with the fitted values without the current observation (the row number) for(bandwidth in 1:numBandwidths){ #this collects the estimated coefficient, the indexing is a bit intense but this should get the coefficient estimates for the correct model and bandwidth yhatWithout <- megaList[[model]][[bandwidth]][[5]] #estimated ys without obs fittedValuesWithoutMat[,bandwidth] <- yhatWithout #putting them in the column } #if the names of the dimensions of megaList are not GGG, LGG, ... use the next two lines to get the model name, this will store it in the results section properly too #modelName <- megaList[[model]][[bandwidth]][[1]] #model <- modelName #calc scv scv <- calc.scv(y, fittedValuesWithoutMat) #and put it into the results matrix. Again, this is done by model/BW name not number for if only a subset of models are run results[, model, "SCV"] <- scv #by indexing to "scv" we can add metrics fairly easily, this will still put this result in the right place } #returns the modified results input results } calc.loocv <- function(y, yhatsWithout){ sum((y - yhatsWithout)^2) } input.loocv <- function(y, megaList, results){ modelNames <- names(megaList) #get names of models run numBandwidths <- length(megaList[[2]]) #megaList[[1]] may only have 1 bandwidth for the GGG model, as it is now model 1 should have all bandwidths though n <- length(y) #this loops through the names of each model and bandwidth #if models are done in an unusual order this should still input the correct results #first, we need to loop through everything and extract the fitted values without for(model in modelNames){ for(bandwidth in 1:numBandwidths){ #this collects the estimated coefficient, the indexing is a bit intense but this should get the coefficient estimates for the correct model and bandwidth yhatWithout <- megaList[[model]][[bandwidth]][[5]] #estimated ys without obs loocv <- calc.loocv(y, yhatWithout) #if the names of the dimensions of megaList are not GGG, LGG, ... use the next two lines to get the model name, this will store it in the results section properly too #modelName <- megaList[[model]][[bandwidth]][[1]] #model <- modelName #calc scv #and put it into the results matrix. Again, this is done by model/BW name not number for if only a subset of models are run results[bandwidth, model, "LOOCV"] <- loocv #by indexing to "loocv" we can add metrics fairly easily, this will still put this result in the right place } } #returns the modified results input results } #adding ranks, the metrics are ranked accross ALL models and bandwidths from lowest to highest #so if bandwidth 7, model 2, GCV Rank = 8, then model 2 using bandwidth 7 has the 8th lowest GCV score of all models and bandwidths rank.results <- function(results, metrics){ for(metric in metrics){ metricVals <- results[,,metric] #pulls out the metrics sortedMetrics <- sort(metricVals, na.last = T) #sort ranks the metrics from smallest to largest and returns them as a vector metricRank <- results[,,metric] #this coppies the NA locations which are dropped in the sort for(rank in 1:length(sortedMetrics)){ index <- which(metricVals == sortedMetrics[rank]) #pulls out the cell number in metricVals that is the rank lowest metricRank[index] <- rank #replaces that cell with its appropriate ranking } results[,,paste0(metric, " Rank")] <- metricRank #puts the new matrix into results } results #and spits out the output } resultsToKeep.gen <- function(results, trueModelNumber, metrics, metricRanks){ uberOutput <- matrix(NA, nrow = 12, ncol = 2 + length(metrics) + length(metricRanks)) #generate the output colnames(uberOutput) <- c("Model Number", "Bandwidth", metrics, metricRanks) #puts the column names in place, the last are where the rankings for each metric will be stored rownames(uberOutput) <- c("True Model AIC", "True Model GCV", "True Model SCV", "True Model B0RMSE", "True Model B1RMSE", "True Model B2RMSE", "AIC", "GCV", "SCV", "B0RMSE", "B1RMSE", "B2RMSE") #input the true data for(metric in metrics){ minMetricTrue <- min(results[,trueModelNumber, metric], na.rm = T) #find the smallest value of the metric for the true model minMetricTrueBW <- which(minMetricTrue == results, arr.ind = T)[1] #this picks out the bandwidth number uberOutput[paste0("True Model ", metric), "Model Number"] <- trueModelNumber #put true model into the output uberOutput[paste0("True Model ", metric), "Bandwidth"] <- minMetricTrueBW #and its bandwidth uberOutput[paste0("True Model ", metric), 3:14] <- results[minMetricTrueBW, trueModelNumber, ] #and filling in every thing else } #now the unrestricted minimization for(metric in metrics){ minMetric <- min(results[,, metric], na.rm = T) minMetricBW <- which(minMetric == results, arr.ind = T)[1] #this picks out the bandwidth number minMetricModel <- which(minMetric == results, arr.ind = T)[2] #and the model number uberOutput[metric, "Model Number"] <- minMetricModel uberOutput[metric, "Bandwidth"] <- minMetricBW #this just returns the bandwidth number (1 through 7) uberOutput[metric, 3:14] <- results[minMetricBW, minMetricModel, ] } uberOutput }
/MixedGWR/Results Input Functions.R
no_license
aswoboda/LWR-Simulations
R
false
false
11,869
r
#betas #given two inputs, this calculates RMSE. calc.rmse <- function(trueVal, estVal){ rmse <- (sum((trueVal - estVal)^2)/length(estVal))^.5 #calculates RMSE, works even one or both are vectors rmse } #for a given beta (just a number, i.e. to input B0 RMSE do betaToEval = 0 not betaToEval = "B0") input.rmse <- function(betaToEval, megaList, trueBetaVal, results){ #this asks for the results array to input the results, so we don't need more code to do that modelNames <- names(megaList) #get names of models run bandwidthNames <- names(megaList[[2]]) #megaList[[1]] may only have 1 bandwidth for the GGG model, as it is now model 1 should have all bandwidths though #this loops through the names of each model and bandwidth #if models are done in an unusual order this should still input the correct results for(model in modelNames){ for(bandwidth in 1:length(bandwidthNames)){ #this collects the estimated coefficient, the indexing is a bit intense but this should get the coefficient estimates for the correct model and bandwidth estCoeff <- megaList[[model]][[bandwidth]][[2]][,(betaToEval + 1)] #R indexes start at 1 so to get B0 you need to add 1 #if the names of the dimensions of megaList are not GGG, LGG, ... use the next two lines to get the model name, this will store it in the results section properly too modelName <- megaList[[model]][[bandwidth]][[1]] model <- modelName #calc rmse rmse <- calc.rmse(trueBetaVal, estCoeff) #and put it into the results matrix. Again, this is done by model/BW name not number for if only a subset of models are run results[bandwidth, model, paste0("B", betaToEval, "RMSE")] <- rmse #by indexing to "B0RMSE" we can add metrics fairly easily, this will still put this result in the right place } } #returns the modified results input results } #GCV input.gcv <- function(y, megaList, results){ modelNames <- names(megaList) #get names of models run numBandwidths <- length(megaList[[2]]) #megaList[[1]] may only have 1 bandwidth for the GGG model, as it is now model 1 should have all bandwidths though #this loops through the names of each model and bandwidth #if models are done in an unusual order this should still input the correct results for(model in modelNames){ for(bandwidth in 1:numBandwidths){ #this collects the estimated coefficient, the indexing is a bit intense but this should get the coefficient estimates for the correct model and bandwidth yhat <- megaList[[model]][[bandwidth]][[3]] #estimated ys levs <- megaList[[model]][[bandwidth]][[4]] #leverages #if the names of the dimensions of megaList are not GGG, LGG, ... use the next two lines to get the model name, this will store it in the results section properly too #modelName <- megaList[[model]][[bandwidth]][[1]] #model <- modelName #now to extract the number of non-stationary variables splitModel <- strsplit(model, "") #this splits the model into a list of length 3, so it needs to be indexed to 1 in the next step numNonstationary <- sum(splitModel[[1]] == "L") #calc gcv gcv <- GCV(y, yhat, levs, numNonstationary) #and put it into the results matrix. Again, this is done by model/BW name not number for if only a subset of models are run results[bandwidth, model, "GCV"] <- gcv #by indexing to "GCV" we can add metrics fairly easily, this will still put this result in the right place } } #returns the modified results input results } #AIC calc.aic <- function(y, yhat, lev, nonStationary){ n <- length(y) v1 = nonStationary + sum(lev) errorSD <- apply(y - yhat, 2, sd) aic <- 2*n*log(errorSD) + n*log(2*pi) + n*(n + v1)/(n-2-v1) aic } input.aic <- function(y, megaList, results){ modelNames <- names(megaList) #get names of models run numBandwidths <- length(megaList[[2]]) #megaList[[1]] may only have 1 bandwidth for the GGG model, as it is now model 1 should have all bandwidths though #this loops through the names of each model and bandwidth #if models are done in an unusual order this should still input the correct results for(model in modelNames){ for(bandwidth in 1:numBandwidths){ #this collects the estimated coefficient, the indexing is a bit intense but this should get the coefficient estimates for the correct model and bandwidth yhat <- megaList[[model]][[bandwidth]][[3]] #estimated ys levs <- megaList[[model]][[bandwidth]][[4]] #leverages #if the names of the dimensions of megaList are not GGG, LGG, ... use the next two lines to get the model name, this will store it in the results section properly too #modelName <- megaList[[model]][[bandwidth]][[1]] #model <- modelName #now to extract the number of non-stationary variables splitModel <- strsplit(model, "") #this splits the model into a list of length 3, so it needs to be indexed to 1 in the next step numNonstationary <- sum(splitModel[[1]] == "L") #calc gcv aic <- calc.aic(y, yhat, levs, numNonstationary) #and put it into the results matrix. Again, this is done by model/BW name not number for if only a subset of models are run results[bandwidth, model, "AIC"] <- aic #by indexing to "AIC" we can add metrics fairly easily, this will still put this result in the right place } } #returns the modified results input results } #SCV calc.scv <- function(dep.var, yhats.without) { numer = ((dep.var - yhats.without)^2) denom = rowSums(numer) stan.CV.values = colSums(numer/denom) stan.CV.values } input.scv <- function(y, megaList, results){ modelNames <- names(megaList) #get names of models run numBandwidths <- length(megaList[[2]]) #megaList[[1]] may only have 1 bandwidth for the GGG model, as it is now model 1 should have all bandwidths though n <- length(y) #this loops through the names of each model and bandwidth #if models are done in an unusual order this should still input the correct results #first, we need to loop through everything and extract the fitted values without for(model in modelNames[-1]){ #this ignores the first model, which I assusme is GGG fittedValuesWithoutMat <- matrix(NA, n, numBandwidths) #this matrix should make calculating the SCV easier; it wil be filled with the fitted values without the current observation (the row number) for(bandwidth in 1:numBandwidths){ #this collects the estimated coefficient, the indexing is a bit intense but this should get the coefficient estimates for the correct model and bandwidth yhatWithout <- megaList[[model]][[bandwidth]][[5]] #estimated ys without obs fittedValuesWithoutMat[,bandwidth] <- yhatWithout #putting them in the column } #if the names of the dimensions of megaList are not GGG, LGG, ... use the next two lines to get the model name, this will store it in the results section properly too #modelName <- megaList[[model]][[bandwidth]][[1]] #model <- modelName #calc scv scv <- calc.scv(y, fittedValuesWithoutMat) #and put it into the results matrix. Again, this is done by model/BW name not number for if only a subset of models are run results[, model, "SCV"] <- scv #by indexing to "scv" we can add metrics fairly easily, this will still put this result in the right place } #returns the modified results input results } calc.loocv <- function(y, yhatsWithout){ sum((y - yhatsWithout)^2) } input.loocv <- function(y, megaList, results){ modelNames <- names(megaList) #get names of models run numBandwidths <- length(megaList[[2]]) #megaList[[1]] may only have 1 bandwidth for the GGG model, as it is now model 1 should have all bandwidths though n <- length(y) #this loops through the names of each model and bandwidth #if models are done in an unusual order this should still input the correct results #first, we need to loop through everything and extract the fitted values without for(model in modelNames){ for(bandwidth in 1:numBandwidths){ #this collects the estimated coefficient, the indexing is a bit intense but this should get the coefficient estimates for the correct model and bandwidth yhatWithout <- megaList[[model]][[bandwidth]][[5]] #estimated ys without obs loocv <- calc.loocv(y, yhatWithout) #if the names of the dimensions of megaList are not GGG, LGG, ... use the next two lines to get the model name, this will store it in the results section properly too #modelName <- megaList[[model]][[bandwidth]][[1]] #model <- modelName #calc scv #and put it into the results matrix. Again, this is done by model/BW name not number for if only a subset of models are run results[bandwidth, model, "LOOCV"] <- loocv #by indexing to "loocv" we can add metrics fairly easily, this will still put this result in the right place } } #returns the modified results input results } #adding ranks, the metrics are ranked accross ALL models and bandwidths from lowest to highest #so if bandwidth 7, model 2, GCV Rank = 8, then model 2 using bandwidth 7 has the 8th lowest GCV score of all models and bandwidths rank.results <- function(results, metrics){ for(metric in metrics){ metricVals <- results[,,metric] #pulls out the metrics sortedMetrics <- sort(metricVals, na.last = T) #sort ranks the metrics from smallest to largest and returns them as a vector metricRank <- results[,,metric] #this coppies the NA locations which are dropped in the sort for(rank in 1:length(sortedMetrics)){ index <- which(metricVals == sortedMetrics[rank]) #pulls out the cell number in metricVals that is the rank lowest metricRank[index] <- rank #replaces that cell with its appropriate ranking } results[,,paste0(metric, " Rank")] <- metricRank #puts the new matrix into results } results #and spits out the output } resultsToKeep.gen <- function(results, trueModelNumber, metrics, metricRanks){ uberOutput <- matrix(NA, nrow = 12, ncol = 2 + length(metrics) + length(metricRanks)) #generate the output colnames(uberOutput) <- c("Model Number", "Bandwidth", metrics, metricRanks) #puts the column names in place, the last are where the rankings for each metric will be stored rownames(uberOutput) <- c("True Model AIC", "True Model GCV", "True Model SCV", "True Model B0RMSE", "True Model B1RMSE", "True Model B2RMSE", "AIC", "GCV", "SCV", "B0RMSE", "B1RMSE", "B2RMSE") #input the true data for(metric in metrics){ minMetricTrue <- min(results[,trueModelNumber, metric], na.rm = T) #find the smallest value of the metric for the true model minMetricTrueBW <- which(minMetricTrue == results, arr.ind = T)[1] #this picks out the bandwidth number uberOutput[paste0("True Model ", metric), "Model Number"] <- trueModelNumber #put true model into the output uberOutput[paste0("True Model ", metric), "Bandwidth"] <- minMetricTrueBW #and its bandwidth uberOutput[paste0("True Model ", metric), 3:14] <- results[minMetricTrueBW, trueModelNumber, ] #and filling in every thing else } #now the unrestricted minimization for(metric in metrics){ minMetric <- min(results[,, metric], na.rm = T) minMetricBW <- which(minMetric == results, arr.ind = T)[1] #this picks out the bandwidth number minMetricModel <- which(minMetric == results, arr.ind = T)[2] #and the model number uberOutput[metric, "Model Number"] <- minMetricModel uberOutput[metric, "Bandwidth"] <- minMetricBW #this just returns the bandwidth number (1 through 7) uberOutput[metric, 3:14] <- results[minMetricBW, minMetricModel, ] } uberOutput }
# UI file for Final Project # # created 11/06/2017 # # used for the UI design. # ## UI Function ui<- navbarPage( ##link to css.file theme = "2.css", # theme = shinytheme("simplex"), ##Project Title "World Trade with USA", tabPanel("Home", htmlOutput("blankspace"), titlePanel("World Trade with USA"), h4(htmlOutput("Introduction")), h5(htmlOutput("teammates")), h5(htmlOutput("thanks")) ), tabPanel("Data", tabsetPanel( tabPanel("Origin Data", dataTableOutput("data.origin") ), tabPanel("Processed Data", selectInput(inputId = "data.selection", label = "Select the data.frame", choices = c("data for clustering", "data for 3D globe", "data for 2D globe"), selected ="data for 2D globe"), dataTableOutput("selection") ), tabPanel("Text Data", tags$iframe(style="height:800px; width:100%; scrolling=yes", src=link ) ) # tabPanel("Text Data", # imageOutput("imp_pdf",width="500px",height="500px") # ), # # tabPanel( # # tags$div( # class = "container", # # row( # col(3, textInput("pdfurl", "PDF URL")) # ), # row( # col(6, htmlOutput('pdfviewer')), # col(6, tags$iframe(style="height:600px; width:100%", src="http://localhost/ressources/pdf/R-Intro.pdf")) # ) # ) # ) ) ), # tabsetPanel( # # using iframe along with tags() within tab to display pdf with scroll, height and width could be adjusted # tabPanel("Reference", # tags$iframe(style="height:400px; width:100%; scrolling=yes", # src="https://cran.r-project.org/doc/manuals/r-release/R-intro.pdf")) # # tabPanel("Summary"), # # tabPanel("Plot") # ), tabPanel("Why these goods", titlePanel("Some text Mining result"), selectInput(inputId = "year", label = "Select the year for text mining", choices = c(2003:2017), selected =2017), plotOutput("wordcloud") ), ## 3D Globe tab tabPanel("3D Globe", titlePanel("Products traded between USA and the world"), absolutePanel(id = "controls", class = "panel panel-default", draggable = TRUE, top = 180, left = 60, right = "auto", bottom = "auto", width = 350, height = "auto", h2("3D Explorer"), radioButtons(inputId = "type", label = "Choose import/export", choices = c('Export','Import'), selected ='Import'), sliderInput(inputId = "year_3D", label = "Select a year", value = 1996, min =1996, max =2016), sliderInput(inputId = "number_countries", label = "Top Countries in Trade", value = 10,min = 1,max = 50), selectInput(inputId = "commodity_3D", label = "Select the commodity", choices = c('Annual Aggregate', 'Silk', 'Cotton', 'Wheat', 'Rice', 'Barley', 'Maize', 'Other.Cereals', 'Coffee','Cocoa', 'Tea', 'Milk', 'Juice'), selected ='Coffee') ), globeOutput("Globe",width="100%",height="650px") ## reference: https://rpubs.com/aagarwal29/r3dglobe ), ## end 3D Globe tab ## fitted new data, this panel works, 20171201 ## 2D Map tab tabPanel("2D Map", titlePanel("Products imported to USA from world"), leafletOutput("mymap",width = "100%", height = 600), absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE, draggable = TRUE, top = 180, left = 60, right = "auto", bottom = "auto", width = 350, height = "auto", h2("2D Explorer"), radioButtons(inputId = "type_2D", label = "Choose import/export", choices = c('Export','Import'), selected ='Import'), sliderInput(inputId = "year_2D", label = "Select a year", value = 2017, min =1996, max =2017), sliderInput(inputId = "num_countries", label = "Top Countries in Trade", value = 20,min = 1,max = 50), selectInput(inputId = "commodity_2D", label = "Select the commodity", choices = c('Annual Aggregate', 'Silk', 'Cotton', 'Wheat', 'Rice', 'Barley', 'Maize', 'Other.Cereals', 'Coffee','Cocoa', 'Tea', 'Milk', 'Juice'), selected ='Cocoa') ) ), ## end 2D Map tab ## fitted new data, this panel works, 20171201 ## Summary Statistics tab ##Regional Findings tabset #tabPanel("Regional Findings", # ##Continent & Region # tabPanel("Regional statistics", # titlePanel("Continent & Region"), # sidebarLayout( # sidebarPanel( # selectInput(inputId = "commodity", # label = "choose the commodity", # choices = c("Coffee,Tea"), # selected ='Coffee'), # sliderInput(inputId = "year", # label = "Select a year", # value = 2017, min =1996, max =2017), # width = 3 # # ), # # mainPanel( # # ## todo to be completed with some charts and some graphs # ) # # ) # ), ### Tree Map tabPanel("Market Share", titlePanel("Market Share of Countries"), sidebarLayout( sidebarPanel( selectInput(inputId = "com_tree", label = "Select the commodity", choices = c('Silk', 'Cotton', 'Wheat', 'Rice', 'Barley', 'Maize', 'Other.Cereals', 'Coffee','Cocoa', 'Tea', 'Milk', 'Juice'), # here should delete the "Annual Aggregate" option selected ='Coffee'), sliderInput( inputId = "year_tree", label = "Select a year", value = 2017, min =1996, max =2017), sliderInput(inputId = "number_countries_tree", label = "Top Countries in Trade", value = 10,min = 1,max = 20), width = 3 ), mainPanel( plotOutput("treemap",width = "100%", height = 600), absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE, draggable = TRUE, top = 600, left = 20, right = "auto", bottom = "auto", width = 350, height = "auto", plotOutput("ggplot",width="100%",height="250px")) ) ) ), ### end Tree Map #), ## begin the clustering Panel, based on the kmeans() function tabPanel("Clustering Analysis", titlePanel("Clustering Analysis"), sidebarLayout( sidebarPanel( sliderInput(inputId = "number_clusters", label = "Number of Clusters", value = 5,min = 2,max = 10), sliderInput( inputId = "year_cluster", label = "Select a year", value = 2017, min =1996, max =2017), width = 3 ), mainPanel( plotlyOutput("cluster", width = "100%", height = "400px"), textOutput("text_1"), textOutput("text_2"), dataTableOutput("mytable") ) ) ) ## end Clustering tab )
/Shiny Environment/ui.R
no_license
leemingee/trade2017fall
R
false
false
11,308
r
# UI file for Final Project # # created 11/06/2017 # # used for the UI design. # ## UI Function ui<- navbarPage( ##link to css.file theme = "2.css", # theme = shinytheme("simplex"), ##Project Title "World Trade with USA", tabPanel("Home", htmlOutput("blankspace"), titlePanel("World Trade with USA"), h4(htmlOutput("Introduction")), h5(htmlOutput("teammates")), h5(htmlOutput("thanks")) ), tabPanel("Data", tabsetPanel( tabPanel("Origin Data", dataTableOutput("data.origin") ), tabPanel("Processed Data", selectInput(inputId = "data.selection", label = "Select the data.frame", choices = c("data for clustering", "data for 3D globe", "data for 2D globe"), selected ="data for 2D globe"), dataTableOutput("selection") ), tabPanel("Text Data", tags$iframe(style="height:800px; width:100%; scrolling=yes", src=link ) ) # tabPanel("Text Data", # imageOutput("imp_pdf",width="500px",height="500px") # ), # # tabPanel( # # tags$div( # class = "container", # # row( # col(3, textInput("pdfurl", "PDF URL")) # ), # row( # col(6, htmlOutput('pdfviewer')), # col(6, tags$iframe(style="height:600px; width:100%", src="http://localhost/ressources/pdf/R-Intro.pdf")) # ) # ) # ) ) ), # tabsetPanel( # # using iframe along with tags() within tab to display pdf with scroll, height and width could be adjusted # tabPanel("Reference", # tags$iframe(style="height:400px; width:100%; scrolling=yes", # src="https://cran.r-project.org/doc/manuals/r-release/R-intro.pdf")) # # tabPanel("Summary"), # # tabPanel("Plot") # ), tabPanel("Why these goods", titlePanel("Some text Mining result"), selectInput(inputId = "year", label = "Select the year for text mining", choices = c(2003:2017), selected =2017), plotOutput("wordcloud") ), ## 3D Globe tab tabPanel("3D Globe", titlePanel("Products traded between USA and the world"), absolutePanel(id = "controls", class = "panel panel-default", draggable = TRUE, top = 180, left = 60, right = "auto", bottom = "auto", width = 350, height = "auto", h2("3D Explorer"), radioButtons(inputId = "type", label = "Choose import/export", choices = c('Export','Import'), selected ='Import'), sliderInput(inputId = "year_3D", label = "Select a year", value = 1996, min =1996, max =2016), sliderInput(inputId = "number_countries", label = "Top Countries in Trade", value = 10,min = 1,max = 50), selectInput(inputId = "commodity_3D", label = "Select the commodity", choices = c('Annual Aggregate', 'Silk', 'Cotton', 'Wheat', 'Rice', 'Barley', 'Maize', 'Other.Cereals', 'Coffee','Cocoa', 'Tea', 'Milk', 'Juice'), selected ='Coffee') ), globeOutput("Globe",width="100%",height="650px") ## reference: https://rpubs.com/aagarwal29/r3dglobe ), ## end 3D Globe tab ## fitted new data, this panel works, 20171201 ## 2D Map tab tabPanel("2D Map", titlePanel("Products imported to USA from world"), leafletOutput("mymap",width = "100%", height = 600), absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE, draggable = TRUE, top = 180, left = 60, right = "auto", bottom = "auto", width = 350, height = "auto", h2("2D Explorer"), radioButtons(inputId = "type_2D", label = "Choose import/export", choices = c('Export','Import'), selected ='Import'), sliderInput(inputId = "year_2D", label = "Select a year", value = 2017, min =1996, max =2017), sliderInput(inputId = "num_countries", label = "Top Countries in Trade", value = 20,min = 1,max = 50), selectInput(inputId = "commodity_2D", label = "Select the commodity", choices = c('Annual Aggregate', 'Silk', 'Cotton', 'Wheat', 'Rice', 'Barley', 'Maize', 'Other.Cereals', 'Coffee','Cocoa', 'Tea', 'Milk', 'Juice'), selected ='Cocoa') ) ), ## end 2D Map tab ## fitted new data, this panel works, 20171201 ## Summary Statistics tab ##Regional Findings tabset #tabPanel("Regional Findings", # ##Continent & Region # tabPanel("Regional statistics", # titlePanel("Continent & Region"), # sidebarLayout( # sidebarPanel( # selectInput(inputId = "commodity", # label = "choose the commodity", # choices = c("Coffee,Tea"), # selected ='Coffee'), # sliderInput(inputId = "year", # label = "Select a year", # value = 2017, min =1996, max =2017), # width = 3 # # ), # # mainPanel( # # ## todo to be completed with some charts and some graphs # ) # # ) # ), ### Tree Map tabPanel("Market Share", titlePanel("Market Share of Countries"), sidebarLayout( sidebarPanel( selectInput(inputId = "com_tree", label = "Select the commodity", choices = c('Silk', 'Cotton', 'Wheat', 'Rice', 'Barley', 'Maize', 'Other.Cereals', 'Coffee','Cocoa', 'Tea', 'Milk', 'Juice'), # here should delete the "Annual Aggregate" option selected ='Coffee'), sliderInput( inputId = "year_tree", label = "Select a year", value = 2017, min =1996, max =2017), sliderInput(inputId = "number_countries_tree", label = "Top Countries in Trade", value = 10,min = 1,max = 20), width = 3 ), mainPanel( plotOutput("treemap",width = "100%", height = 600), absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE, draggable = TRUE, top = 600, left = 20, right = "auto", bottom = "auto", width = 350, height = "auto", plotOutput("ggplot",width="100%",height="250px")) ) ) ), ### end Tree Map #), ## begin the clustering Panel, based on the kmeans() function tabPanel("Clustering Analysis", titlePanel("Clustering Analysis"), sidebarLayout( sidebarPanel( sliderInput(inputId = "number_clusters", label = "Number of Clusters", value = 5,min = 2,max = 10), sliderInput( inputId = "year_cluster", label = "Select a year", value = 2017, min =1996, max =2017), width = 3 ), mainPanel( plotlyOutput("cluster", width = "100%", height = "400px"), textOutput("text_1"), textOutput("text_2"), dataTableOutput("mytable") ) ) ) ## end Clustering tab )
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summarize_seahorse.R \name{summarize_seahorse} \alias{summarize_seahorse} \title{Summarize seahorse data by sample and region} \usage{ summarize_seahorse(x, injections = c("basal", "oligomycin", "fccp", "rotenone")) } \arguments{ \item{x}{A data.frame} \item{injections}{The names of the injections} } \description{ This function takes the long format table exported by the XF Wave software for the Mitochondrial stress test and returns a matrix of means and standard deviations for each region of the assay (e.g. basal, oligomycin, fccp, and rotenone). The GroupName column must have the sample names. The final measurement of each region is used for the mean and standard deviation across wells. } \references{ Ramirez AK, Lynes MD, Shamsi F, Xue R, Tseng YH, Kahn CR, Kasif S, Dreyfuss JM. Integrating Extracellular Flux Measurements and Genome-Scale Modeling Reveals Differences between Brown and White Adipocytes. Cell Rep 2017 Dec; 21(11): 3040-3048. } \author{ Alfred Ramirez, Jonathan Dreyfuss }
/man/summarize_seahorse.Rd
permissive
jdreyf/sybilxf
R
false
true
1,089
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summarize_seahorse.R \name{summarize_seahorse} \alias{summarize_seahorse} \title{Summarize seahorse data by sample and region} \usage{ summarize_seahorse(x, injections = c("basal", "oligomycin", "fccp", "rotenone")) } \arguments{ \item{x}{A data.frame} \item{injections}{The names of the injections} } \description{ This function takes the long format table exported by the XF Wave software for the Mitochondrial stress test and returns a matrix of means and standard deviations for each region of the assay (e.g. basal, oligomycin, fccp, and rotenone). The GroupName column must have the sample names. The final measurement of each region is used for the mean and standard deviation across wells. } \references{ Ramirez AK, Lynes MD, Shamsi F, Xue R, Tseng YH, Kahn CR, Kasif S, Dreyfuss JM. Integrating Extracellular Flux Measurements and Genome-Scale Modeling Reveals Differences between Brown and White Adipocytes. Cell Rep 2017 Dec; 21(11): 3040-3048. } \author{ Alfred Ramirez, Jonathan Dreyfuss }
# Test6.R # This program tests two recursive functions on lists. source ("List.R") cons_a_list <- function (r, list) { my_list <- list; if (r != 0) { my_list <- cons_a_list (r - 1, cons (r, list)); } return (my_list); } equal <- function (list1, list2) { if (length (list1) != length (list2)) { result <- 0; } else { if (length (list1) == 0 && length (list2) == 0) { result <- 1; } else { if (head (list1) != head (list2)) { result <- 0; } else { result <- equal (tail (list1), tail (list2)); } } } return (result); } main <- function () { r <- as.integer (readline ()); my_list <- null (); l1 <- cons_a_list (r, my_list); l2 <- cons_a_list (r, my_list); l3 <- cons_a_list (r - 1, my_list); l1_eq_l2 <- equal (l1, l2); l1_eq_l3 <- equal (l1, l3); print (l1_eq_l2); print (l1_eq_l3); }
/Test6.R
no_license
RachieCodes/lexical_analyzer
R
false
false
900
r
# Test6.R # This program tests two recursive functions on lists. source ("List.R") cons_a_list <- function (r, list) { my_list <- list; if (r != 0) { my_list <- cons_a_list (r - 1, cons (r, list)); } return (my_list); } equal <- function (list1, list2) { if (length (list1) != length (list2)) { result <- 0; } else { if (length (list1) == 0 && length (list2) == 0) { result <- 1; } else { if (head (list1) != head (list2)) { result <- 0; } else { result <- equal (tail (list1), tail (list2)); } } } return (result); } main <- function () { r <- as.integer (readline ()); my_list <- null (); l1 <- cons_a_list (r, my_list); l2 <- cons_a_list (r, my_list); l3 <- cons_a_list (r - 1, my_list); l1_eq_l2 <- equal (l1, l2); l1_eq_l3 <- equal (l1, l3); print (l1_eq_l2); print (l1_eq_l3); }
# std.data.R # - T.S.Yo 2006.09.26 # - Source a function, std.data(x), which returns a standardized # data of given array x. # std.data <- function(x) { ######################################################################## # Supplementary functions ######################################################################## # Standarize 1-d array std1d <- function (y) { # Get dimensions of given data lx <- length(y) mx <- mean(y) sdx <- sd(y) # Standardized_value = (original_value - sample_mean) / sample_standard_deviation xx <- (y - mx)/sdx # Return return(xx) } ######################################################################## # Main function ######################################################################## # Check arguments if (!missing(x)) { # Check if dimension exists, if no, make up one if(is.null(dim(x))) { dim(x) <- c(length(x),1) } } # Get dimension and loop stdx <- NULL nx <- dim(x)[2] for(j in 1:nx){ stdx <- cbind(stdx,(std1d(x[,j]))) } return(stdx) }
/practical.proj/nca/src/std.data.r
no_license
tingsyo/r-tools
R
false
false
1,156
r
# std.data.R # - T.S.Yo 2006.09.26 # - Source a function, std.data(x), which returns a standardized # data of given array x. # std.data <- function(x) { ######################################################################## # Supplementary functions ######################################################################## # Standarize 1-d array std1d <- function (y) { # Get dimensions of given data lx <- length(y) mx <- mean(y) sdx <- sd(y) # Standardized_value = (original_value - sample_mean) / sample_standard_deviation xx <- (y - mx)/sdx # Return return(xx) } ######################################################################## # Main function ######################################################################## # Check arguments if (!missing(x)) { # Check if dimension exists, if no, make up one if(is.null(dim(x))) { dim(x) <- c(length(x),1) } } # Get dimension and loop stdx <- NULL nx <- dim(x)[2] for(j in 1:nx){ stdx <- cbind(stdx,(std1d(x[,j]))) } return(stdx) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/spotifyr-functions.R \name{get_spotify_authorization_code} \alias{get_spotify_authorization_code} \title{Get Spotify Authorization Code} \usage{ get_spotify_authorization_code( client_id = Sys.getenv("SPOTIFY_CLIENT_ID"), client_secret = Sys.getenv("SPOTIFY_CLIENT_SECRET"), scope = get_scopes() ) } \arguments{ \item{client_id}{Defaults to System Envioronment variable "SPOTIFY_CLIENT_ID"} \item{client_secret}{Defaults to System Envioronment variable "SPOTIFY_CLIENT_SECRET"} \item{scope}{Space delimited string of spotify scopes, found here: https://developer.spotify.com/documentation/general/guides/scopes/. All scopes are selected by default} } \value{ The Spotify Web API Token2.0 reference class object (see \code{httr::\link[httr]{oauth2.0_token}}), or an error message. } \description{ This function is copied verbatim from the \code{spotifyr} package by \href{https://www.rcharlie.com/spotifyr/index.html}{Charlie Thompson}. It is being used in accordance with its MIT License. All copyright is retained by C Thompson as follows YEAR: 2017 COPYRIGHT HOLDER: Charlie Thompson } \details{ This function creates a Spotify authorization code. See \code{httr::\link[httr]{oauth2.0_token}}. } \examples{ \dontrun{ authorization <- get_spotify_authorization_code() } } \concept{authentication functions}
/man/get_spotify_authorization_code.Rd
permissive
condwanaland/spotty
R
false
true
1,394
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/spotifyr-functions.R \name{get_spotify_authorization_code} \alias{get_spotify_authorization_code} \title{Get Spotify Authorization Code} \usage{ get_spotify_authorization_code( client_id = Sys.getenv("SPOTIFY_CLIENT_ID"), client_secret = Sys.getenv("SPOTIFY_CLIENT_SECRET"), scope = get_scopes() ) } \arguments{ \item{client_id}{Defaults to System Envioronment variable "SPOTIFY_CLIENT_ID"} \item{client_secret}{Defaults to System Envioronment variable "SPOTIFY_CLIENT_SECRET"} \item{scope}{Space delimited string of spotify scopes, found here: https://developer.spotify.com/documentation/general/guides/scopes/. All scopes are selected by default} } \value{ The Spotify Web API Token2.0 reference class object (see \code{httr::\link[httr]{oauth2.0_token}}), or an error message. } \description{ This function is copied verbatim from the \code{spotifyr} package by \href{https://www.rcharlie.com/spotifyr/index.html}{Charlie Thompson}. It is being used in accordance with its MIT License. All copyright is retained by C Thompson as follows YEAR: 2017 COPYRIGHT HOLDER: Charlie Thompson } \details{ This function creates a Spotify authorization code. See \code{httr::\link[httr]{oauth2.0_token}}. } \examples{ \dontrun{ authorization <- get_spotify_authorization_code() } } \concept{authentication functions}
alpha.risk <- function(n) { D1 <- function(n) { d1 <- max(0, d2(n) - 3 * d3(n)) return(d1) } D2 <- function(n) { D2 <- d2(n) + 3 * d3(n) return(D2) } risco <- function(n) { risco <- 1 - (ptukey(D2(n), n, Inf) - ptukey(D1(n), n, Inf)) return(risco) } risk <- rep(0, length(n)) for(i in 1:length(n)) risk[i] <- risco(n[i]) return(risk) }
/IQCC/R/alpha.risk.R
no_license
ingted/R-Examples
R
false
false
460
r
alpha.risk <- function(n) { D1 <- function(n) { d1 <- max(0, d2(n) - 3 * d3(n)) return(d1) } D2 <- function(n) { D2 <- d2(n) + 3 * d3(n) return(D2) } risco <- function(n) { risco <- 1 - (ptukey(D2(n), n, Inf) - ptukey(D1(n), n, Inf)) return(risco) } risk <- rep(0, length(n)) for(i in 1:length(n)) risk[i] <- risco(n[i]) return(risk) }
#***************************************** # # (C) Copyright IBM Corp. 2017 # Author: Bradley J Eck and Ernesto Arandia # #*****************************************/ ## hydraulics functions of epanet toolkit #' ENsolveH #' #' Solves the network hydraulics for all time periods #' #' @export #' @useDynLib epanet2toolkit RENsolveH #' @examples #' # path to Net1.inp example file included with this package #' inp <- file.path( find.package("epanet2toolkit"), "extdata","Net1.inp") #' ENopen( inp, "Net1.rpt","Net1.bin") #' ENsolveH() #' ENsolveQ() #' ENgetnodevalue(2, "EN_PRESSURE") #' ENclose() #' # clean-up the created files #' file.remove("Net1.rpt") #' file.remove("Net1.bin") ENsolveH <- function(){ arg <- .C("RENsolveH", as.integer(-1) ) err <- arg[[1]] check_epanet_error( err ) return( invisible() ) } #'ENsaveH #' #'Saves hydraulic results to binary file #' #' @export #' @useDynLib epanet2toolkit RENsaveH #'@details Must be called before ENreport() if no WQ simulation has been made. #'Should not be called if ENsolveQ() will be used. ENsaveH <- function(){ arg <- .C("RENsaveH", as.integer(-1)) err <- arg[[1]] check_epanet_error( err ) return( invisible() ) } #' Open hydraulics analysis system. #' #' \code{ENopenH} opens the EPANET hydraulics analysis system. #' #' @export #' @useDynLib epanet2toolkit enOpenH #' #' @details Call \code{ENopenH} prior to running the first hydraulic analysis using the #' \code{ENinitH-ENrunH-ENnextH} sequence. Multiple analyses can be made before calling #' \code{ENcloseH} to close the hydraulic analysis system. #' #' Do not call this function if \code{ENsolveH} is being used to run a complete hydraulic analysis. #' #' @seealso \code{ENinitH}, \code{ENrunH}, \code{ENnextH}, \code{ENcloseH} #' #' @examples #' # path to Net1.inp example file included with this package #' inp <- file.path( find.package("epanet2toolkit"), "extdata","Net1.inp") #' ENopen( inp, "Net1.rpt") #' ENopenH() #' ENinitH(0) #' ENrunH() #' ENcloseH() #' ENclose() #' # clean-up the created files #' file.remove("Net1.rpt") ENopenH <- function() { if( getOpenHflag()){ warning("Epanet hydraulic solver was already open") } else { result <- .Call("enOpenH") check_epanet_error(result$errorcode) } return( invisible() ) } #' Initialize hydraulic engine #' #' \code{ENinitH} Initializes storage tank levels, link status and settings, and the simulation clock #' time prior to running a hydraulic analysis. #' #' @export #' @useDynLib epanet2toolkit enInitH #' #' @param flag A two-digit flag indicating if hydraulic results will be saved to the #' hydraulics file (rightmost digit) and if link flows should be re-initialized. #' #' @details Call \code{ENinitH} prior to running a hydraulic analysis using \code{ENrunH} and #' \code{ENnextH}.\code{ENopenH} must have been called prior to calling \code{ENinitH}. Do not call #' \code{ENinitH} if a complete hydraulic analysis is being made with a call to \code{ENsolveH}. #' Values of flag have the following meanings: #' #' \tabular{ll}{ #' 00 \tab do not re-initialize flows, do not save results to file\cr #' 01 \tab do not re-initialize flows, save results to file\cr #' 10 \tab re-initialize flows, do not save results to file\cr #' 11 \tab re-initialize flows, save results to file #' } #' #' Set \code{flag} to 1 (or 11) if you will be making a subsequent water quality run, using #' \code{ENreport} to generate a report, or using \code{ENsavehydfile} to save the binary #' hydraulics file. #' #' @seealso \code{ENopenH}, \code{ENrunH}, \code{ENnextH}, \code{ENcloseH} #' #' @examples #' # path to Net1.inp example file included with this package #' inp <- file.path( find.package("epanet2toolkit"), "extdata","Net1.inp") #' ENopen( inp, "Net1.rpt") #' ENopenH() #' ENinitH(0) #' ENrunH() #' ENcloseH() #' ENclose() #' # clean-up the created files #' file.remove("Net1.rpt") ENinitH <- function(flag) { # check the arguments if (missing(flag)) { stop("Need to specify the initialization flag.") } if (is.numeric(flag)) { flag = as.integer(flag) } else { stop("The initialization flag must be an integer.") } result <- .Call("enInitH", flag) check_epanet_error(result$errorcode) return(invisible()) } #' run hydraulics engine #' #' \code{ENrunH} Runs a single period hydraulic analysis, retrieving the #' current simulation clock time \code{t}. #' #' @export #' @useDynLib epanet2toolkit enRunH #' #' @details Use \code{ENrunH} along with \code{ENnextH} in a while loop to #' analyze hydraulics in each period of an extended period simulation. #' This process automatically updates the simulation clock time so treat #' \code{t} as a read-only variable. #' #' \code{ENinitH} must have been called prior to running the #' \code{ENrunH-ENnextH} loop. #' #' See \code{ENnextH} for an example of using this function. #' #' @seealso \code{ENopenH}, \code{ENinitH}, \code{ENnextH}, \code{ENcloseH} #' #' @examples #' # path to Net1.inp example file included with this package #' inp <- file.path( find.package("epanet2toolkit"), "extdata","Net1.inp") #' ENopen( inp, "Net1.rpt") #' ENopenH() #' ENinitH(0) #' ENrunH() #' ENcloseH() #' ENclose() #' # clean-up the created files #' file.remove("Net1.rpt") ENrunH <- function() { result <- .Call("enRunH") check_epanet_error(result$errorcode) return(result$value) } #' determine the next hydraulic step #' #' \code{ENnextH} determines the length of time until the next #' hydraulic event occurs in an extended period simulation. #' #' @export #' @useDynLib epanet2toolkit enNextH #' #' @return An integer, the time (in seconds) until next hydraulic event #' occurs or 0 if at the end of the simulation period. #' #' @details This function is used in conjunction with \code{ENrunH} to #' perform an extended period hydraulic analysis (see example below). #' #' The return value is automatically computed as the smaller of: #' #' \itemize{ #' \item the time interval until the next hydraulic time step begins #' \item the time interval until the next reporting time step begins #' \item the time interval until the next change in demands occurs #' \item the time interval until a tank becomes full or empty #' \item the time interval until a control or rule fires #' } #' #' @examples #' # path to Net1.inp example file included with this package #' inp <- file.path( find.package("epanet2toolkit"), "extdata","Net1.inp") #' ENopen( inp, "Net1.rpt") #' # store simulation times #' t = NULL #' ENopenH() #' ENinitH(11) #' repeat { #' t <- c(t, ENrunH()) #' tstep <- ENnextH() #' if (tstep == 0) { #' break #' } #' } #' ENcloseH() #' ENclose() #' # clean-up the created files #' file.remove("Net1.rpt") #' #' @seealso \code{ENopenH}, \code{ENinitH}, \code{ENrunH}, \code{ENcloseH}, \code{ENsettimeparam} #' ENnextH <- function() { result <- .Call("enNextH") check_epanet_error(result$errorcode) return(result$value) } #' close hydraulics engine #' #' \code{ENcloseH} closes the hydraulic analysis system, freeing all #' allocated memory #' #' @export #' @useDynLib epanet2toolkit enCloseH #' #' @details Call \code{ENcloseH} after all hydraulics analyses have been made using #' \code{ENinitH-ENrunH-ENnextH}. Do not call this function if \code{ENsolveH} is being used. #' #' @seealso \code{ENopenH}, \code{ENinitH}, \code{ENrunH}, \code{ENnextH} #' ENcloseH <- function() { if( !getOpenHflag()){ warning("Epanet hydraulics already closed") } else { result <- .Call("enCloseH") check_epanet_error(result$errorcode) } return( invisible() ) }
/R/hydraulics.r
no_license
DrRoad/epanet2toolkit
R
false
false
7,899
r
#***************************************** # # (C) Copyright IBM Corp. 2017 # Author: Bradley J Eck and Ernesto Arandia # #*****************************************/ ## hydraulics functions of epanet toolkit #' ENsolveH #' #' Solves the network hydraulics for all time periods #' #' @export #' @useDynLib epanet2toolkit RENsolveH #' @examples #' # path to Net1.inp example file included with this package #' inp <- file.path( find.package("epanet2toolkit"), "extdata","Net1.inp") #' ENopen( inp, "Net1.rpt","Net1.bin") #' ENsolveH() #' ENsolveQ() #' ENgetnodevalue(2, "EN_PRESSURE") #' ENclose() #' # clean-up the created files #' file.remove("Net1.rpt") #' file.remove("Net1.bin") ENsolveH <- function(){ arg <- .C("RENsolveH", as.integer(-1) ) err <- arg[[1]] check_epanet_error( err ) return( invisible() ) } #'ENsaveH #' #'Saves hydraulic results to binary file #' #' @export #' @useDynLib epanet2toolkit RENsaveH #'@details Must be called before ENreport() if no WQ simulation has been made. #'Should not be called if ENsolveQ() will be used. ENsaveH <- function(){ arg <- .C("RENsaveH", as.integer(-1)) err <- arg[[1]] check_epanet_error( err ) return( invisible() ) } #' Open hydraulics analysis system. #' #' \code{ENopenH} opens the EPANET hydraulics analysis system. #' #' @export #' @useDynLib epanet2toolkit enOpenH #' #' @details Call \code{ENopenH} prior to running the first hydraulic analysis using the #' \code{ENinitH-ENrunH-ENnextH} sequence. Multiple analyses can be made before calling #' \code{ENcloseH} to close the hydraulic analysis system. #' #' Do not call this function if \code{ENsolveH} is being used to run a complete hydraulic analysis. #' #' @seealso \code{ENinitH}, \code{ENrunH}, \code{ENnextH}, \code{ENcloseH} #' #' @examples #' # path to Net1.inp example file included with this package #' inp <- file.path( find.package("epanet2toolkit"), "extdata","Net1.inp") #' ENopen( inp, "Net1.rpt") #' ENopenH() #' ENinitH(0) #' ENrunH() #' ENcloseH() #' ENclose() #' # clean-up the created files #' file.remove("Net1.rpt") ENopenH <- function() { if( getOpenHflag()){ warning("Epanet hydraulic solver was already open") } else { result <- .Call("enOpenH") check_epanet_error(result$errorcode) } return( invisible() ) } #' Initialize hydraulic engine #' #' \code{ENinitH} Initializes storage tank levels, link status and settings, and the simulation clock #' time prior to running a hydraulic analysis. #' #' @export #' @useDynLib epanet2toolkit enInitH #' #' @param flag A two-digit flag indicating if hydraulic results will be saved to the #' hydraulics file (rightmost digit) and if link flows should be re-initialized. #' #' @details Call \code{ENinitH} prior to running a hydraulic analysis using \code{ENrunH} and #' \code{ENnextH}.\code{ENopenH} must have been called prior to calling \code{ENinitH}. Do not call #' \code{ENinitH} if a complete hydraulic analysis is being made with a call to \code{ENsolveH}. #' Values of flag have the following meanings: #' #' \tabular{ll}{ #' 00 \tab do not re-initialize flows, do not save results to file\cr #' 01 \tab do not re-initialize flows, save results to file\cr #' 10 \tab re-initialize flows, do not save results to file\cr #' 11 \tab re-initialize flows, save results to file #' } #' #' Set \code{flag} to 1 (or 11) if you will be making a subsequent water quality run, using #' \code{ENreport} to generate a report, or using \code{ENsavehydfile} to save the binary #' hydraulics file. #' #' @seealso \code{ENopenH}, \code{ENrunH}, \code{ENnextH}, \code{ENcloseH} #' #' @examples #' # path to Net1.inp example file included with this package #' inp <- file.path( find.package("epanet2toolkit"), "extdata","Net1.inp") #' ENopen( inp, "Net1.rpt") #' ENopenH() #' ENinitH(0) #' ENrunH() #' ENcloseH() #' ENclose() #' # clean-up the created files #' file.remove("Net1.rpt") ENinitH <- function(flag) { # check the arguments if (missing(flag)) { stop("Need to specify the initialization flag.") } if (is.numeric(flag)) { flag = as.integer(flag) } else { stop("The initialization flag must be an integer.") } result <- .Call("enInitH", flag) check_epanet_error(result$errorcode) return(invisible()) } #' run hydraulics engine #' #' \code{ENrunH} Runs a single period hydraulic analysis, retrieving the #' current simulation clock time \code{t}. #' #' @export #' @useDynLib epanet2toolkit enRunH #' #' @details Use \code{ENrunH} along with \code{ENnextH} in a while loop to #' analyze hydraulics in each period of an extended period simulation. #' This process automatically updates the simulation clock time so treat #' \code{t} as a read-only variable. #' #' \code{ENinitH} must have been called prior to running the #' \code{ENrunH-ENnextH} loop. #' #' See \code{ENnextH} for an example of using this function. #' #' @seealso \code{ENopenH}, \code{ENinitH}, \code{ENnextH}, \code{ENcloseH} #' #' @examples #' # path to Net1.inp example file included with this package #' inp <- file.path( find.package("epanet2toolkit"), "extdata","Net1.inp") #' ENopen( inp, "Net1.rpt") #' ENopenH() #' ENinitH(0) #' ENrunH() #' ENcloseH() #' ENclose() #' # clean-up the created files #' file.remove("Net1.rpt") ENrunH <- function() { result <- .Call("enRunH") check_epanet_error(result$errorcode) return(result$value) } #' determine the next hydraulic step #' #' \code{ENnextH} determines the length of time until the next #' hydraulic event occurs in an extended period simulation. #' #' @export #' @useDynLib epanet2toolkit enNextH #' #' @return An integer, the time (in seconds) until next hydraulic event #' occurs or 0 if at the end of the simulation period. #' #' @details This function is used in conjunction with \code{ENrunH} to #' perform an extended period hydraulic analysis (see example below). #' #' The return value is automatically computed as the smaller of: #' #' \itemize{ #' \item the time interval until the next hydraulic time step begins #' \item the time interval until the next reporting time step begins #' \item the time interval until the next change in demands occurs #' \item the time interval until a tank becomes full or empty #' \item the time interval until a control or rule fires #' } #' #' @examples #' # path to Net1.inp example file included with this package #' inp <- file.path( find.package("epanet2toolkit"), "extdata","Net1.inp") #' ENopen( inp, "Net1.rpt") #' # store simulation times #' t = NULL #' ENopenH() #' ENinitH(11) #' repeat { #' t <- c(t, ENrunH()) #' tstep <- ENnextH() #' if (tstep == 0) { #' break #' } #' } #' ENcloseH() #' ENclose() #' # clean-up the created files #' file.remove("Net1.rpt") #' #' @seealso \code{ENopenH}, \code{ENinitH}, \code{ENrunH}, \code{ENcloseH}, \code{ENsettimeparam} #' ENnextH <- function() { result <- .Call("enNextH") check_epanet_error(result$errorcode) return(result$value) } #' close hydraulics engine #' #' \code{ENcloseH} closes the hydraulic analysis system, freeing all #' allocated memory #' #' @export #' @useDynLib epanet2toolkit enCloseH #' #' @details Call \code{ENcloseH} after all hydraulics analyses have been made using #' \code{ENinitH-ENrunH-ENnextH}. Do not call this function if \code{ENsolveH} is being used. #' #' @seealso \code{ENopenH}, \code{ENinitH}, \code{ENrunH}, \code{ENnextH} #' ENcloseH <- function() { if( !getOpenHflag()){ warning("Epanet hydraulics already closed") } else { result <- .Call("enCloseH") check_epanet_error(result$errorcode) } return( invisible() ) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/signals.R \name{add.signal} \alias{add.signal} \title{add a signal to a strategy} \usage{ add.signal(strategy, name, arguments, parameters = NULL, label = NULL, ..., enabled = TRUE, indexnum = NULL, store = FALSE) } \arguments{ \item{strategy}{an object (or the name of an object) of type 'strategy' to add the signal to} \item{name}{name of the signal, must correspond to an R function} \item{arguments}{named list of default arguments to be passed to an signal function when executed} \item{parameters}{vector of strings naming parameters to be saved for apply-time definition,default NULL, only needed if you need special names to avoid argument collision} \item{label}{arbitrary text label for signal output, default NULL} \item{...}{any other passthru parameters} \item{enabled}{TRUE/FALSE whether the signal is enabled for use in applying the strategy, default TRUE} \item{indexnum}{if you are updating a specific signal, the index number in the $signals list to update} \item{store}{TRUE/FALSE whether to store the strategy in the .strategy environment, or return it. default FALSE} } \value{ if \code{strategy} was the name of a strategy, the name. It it was a strategy, the updated strategy. } \description{ This adds a signal definition to a strategy object. } \details{ Signals denote times at which the strategy \emph{may} want to take action. Common signals types from the literature include crossovers, thresholds, or other interactions between your \code{mktdata} and your indicators. if \code{label} is not supplied, NULL default will be converted to '<name>.sig' if the signal function returns one named column, we use that, and ignore the label. If the signal function returns multiple columns, the label will be \code{\link{paste}}'d to either the returned column names or the respective column number. } \seealso{ \code{\link{applySignals}} \code{\link{add.indicator}} \code{link{add.rule}} \code{\link{sigComparison}} \code{\link{sigCrossover}} \code{\link{sigFormula}} \code{\link{sigPeak}} \code{\link{sigThreshold}} }
/man/add.signal.Rd
no_license
zhangyehui1968/quantstrat
R
false
true
2,140
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/signals.R \name{add.signal} \alias{add.signal} \title{add a signal to a strategy} \usage{ add.signal(strategy, name, arguments, parameters = NULL, label = NULL, ..., enabled = TRUE, indexnum = NULL, store = FALSE) } \arguments{ \item{strategy}{an object (or the name of an object) of type 'strategy' to add the signal to} \item{name}{name of the signal, must correspond to an R function} \item{arguments}{named list of default arguments to be passed to an signal function when executed} \item{parameters}{vector of strings naming parameters to be saved for apply-time definition,default NULL, only needed if you need special names to avoid argument collision} \item{label}{arbitrary text label for signal output, default NULL} \item{...}{any other passthru parameters} \item{enabled}{TRUE/FALSE whether the signal is enabled for use in applying the strategy, default TRUE} \item{indexnum}{if you are updating a specific signal, the index number in the $signals list to update} \item{store}{TRUE/FALSE whether to store the strategy in the .strategy environment, or return it. default FALSE} } \value{ if \code{strategy} was the name of a strategy, the name. It it was a strategy, the updated strategy. } \description{ This adds a signal definition to a strategy object. } \details{ Signals denote times at which the strategy \emph{may} want to take action. Common signals types from the literature include crossovers, thresholds, or other interactions between your \code{mktdata} and your indicators. if \code{label} is not supplied, NULL default will be converted to '<name>.sig' if the signal function returns one named column, we use that, and ignore the label. If the signal function returns multiple columns, the label will be \code{\link{paste}}'d to either the returned column names or the respective column number. } \seealso{ \code{\link{applySignals}} \code{\link{add.indicator}} \code{link{add.rule}} \code{\link{sigComparison}} \code{\link{sigCrossover}} \code{\link{sigFormula}} \code{\link{sigPeak}} \code{\link{sigThreshold}} }
library(vegalite) ### Name: calculate ### Title: Derive new fields ### Aliases: calculate ### ** Examples vegalite() %>% add_data("https://vega.github.io/vega-editor/app/data/population.json") %>% add_filter("datum.year == 2000") %>% calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>% encode_x("gender", "nominal") %>% encode_y("people", "quantitative", aggregate="sum") %>% encode_color("gender", "nominal") %>% scale_x_ordinal(band_size=6) %>% scale_color_nominal(range=c("#EA98D2", "#659CCA")) %>% facet_col("age", "ordinal", padding=4) %>% axis_x(remove=TRUE) %>% axis_y(title="population", grid=FALSE) %>% axis_facet_col(orient="bottom", axisWidth=1, offset=-8) %>% facet_cell(stroke_width=0) %>% mark_bar()
/data/genthat_extracted_code/vegalite/examples/calculate.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
759
r
library(vegalite) ### Name: calculate ### Title: Derive new fields ### Aliases: calculate ### ** Examples vegalite() %>% add_data("https://vega.github.io/vega-editor/app/data/population.json") %>% add_filter("datum.year == 2000") %>% calculate("gender", 'datum.sex == 2 ? "Female" : "Male"') %>% encode_x("gender", "nominal") %>% encode_y("people", "quantitative", aggregate="sum") %>% encode_color("gender", "nominal") %>% scale_x_ordinal(band_size=6) %>% scale_color_nominal(range=c("#EA98D2", "#659CCA")) %>% facet_col("age", "ordinal", padding=4) %>% axis_x(remove=TRUE) %>% axis_y(title="population", grid=FALSE) %>% axis_facet_col(orient="bottom", axisWidth=1, offset=-8) %>% facet_cell(stroke_width=0) %>% mark_bar()
#logistic regression model library(rpart) library(ROSE) library(unbalanced) library(randomForest) library(DataExplorer) library(tree) # Classification Tree library(randomForest) # Random Forest library(gbm) # Boosting library(plyr) library(caret) library(rattle) library(RColorBrewer) library(ada) library(earth) set.seed(124) datafr = balanced_state_PA datafr$Bump = NULL datafr$Roundabout = NULL #Splitting the data tr_ic = sample(x = 1:nrow(datafr), size = (0.65*nrow(datafr))) logit_training_data <- datafr[tr_ic,] logit_test_data <- datafr[-tr_ic,] str(logit_training_data) logit_training_data$Sig = as.factor(as.numeric(logit_training_data$Sig)-1) logit_test_data$Sig = as.factor(as.numeric(logit_test_data$Sig)-1) logit_model_1 = glm(Sig~., family = "binomial", data= logit_training_data) model_predictions=predict(logit_model_1, logit_test_data , type="response") a = 0.45 model_predictions_class = model_predictions model_predictions_class[model_predictions_class > a] = 1 model_predictions_class[model_predictions_class < a] = 0 table(model_predictions_class,logit_test_data$Sig) #Testing Error mean_logit = mean(model_predictions_class!=logit_test_data$Sig) accuracy_logit = 1- mean_logit accuracy_logit
/Pennsylvania/logit_model.R
no_license
adithyanarayanan/Predictive-Modeling-of-Traffic-Accident-Severity
R
false
false
1,283
r
#logistic regression model library(rpart) library(ROSE) library(unbalanced) library(randomForest) library(DataExplorer) library(tree) # Classification Tree library(randomForest) # Random Forest library(gbm) # Boosting library(plyr) library(caret) library(rattle) library(RColorBrewer) library(ada) library(earth) set.seed(124) datafr = balanced_state_PA datafr$Bump = NULL datafr$Roundabout = NULL #Splitting the data tr_ic = sample(x = 1:nrow(datafr), size = (0.65*nrow(datafr))) logit_training_data <- datafr[tr_ic,] logit_test_data <- datafr[-tr_ic,] str(logit_training_data) logit_training_data$Sig = as.factor(as.numeric(logit_training_data$Sig)-1) logit_test_data$Sig = as.factor(as.numeric(logit_test_data$Sig)-1) logit_model_1 = glm(Sig~., family = "binomial", data= logit_training_data) model_predictions=predict(logit_model_1, logit_test_data , type="response") a = 0.45 model_predictions_class = model_predictions model_predictions_class[model_predictions_class > a] = 1 model_predictions_class[model_predictions_class < a] = 0 table(model_predictions_class,logit_test_data$Sig) #Testing Error mean_logit = mean(model_predictions_class!=logit_test_data$Sig) accuracy_logit = 1- mean_logit accuracy_logit
library(archdata) ### Name: EWBurials ### Title: Ernest Witte Cemetery, Austin, County, Texas, U.S.A. ### Aliases: EWBurials ### Keywords: datasets ### ** Examples data(EWBurials) xtabs(~Age+Sex+Group, EWBurials) library(circular) plot(EWBurials$Direction)
/data/genthat_extracted_code/archdata/examples/EWBurials.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
265
r
library(archdata) ### Name: EWBurials ### Title: Ernest Witte Cemetery, Austin, County, Texas, U.S.A. ### Aliases: EWBurials ### Keywords: datasets ### ** Examples data(EWBurials) xtabs(~Age+Sex+Group, EWBurials) library(circular) plot(EWBurials$Direction)
#================================================================= #Simple fit exploration of SR data #Author: Catarina Wor #Date: April 10th 2018 #================================================================= #load in required packages and libraries library(ggplot2) library(TMB) #library(TMBhelper) -- not available in most recent R versions library(bayesplot) library(tmbstan) library(reshape) library(xtable) #load in directories.R source("C:/Users/worc/Documents/HarrisonSR/R/directories.R") #source("/Users/catarinawor/Documents/work/Chinook/srkf/R/directories.R") #setwd(model_dir) source("calc_quantile.R") source("TMB_functions.R") #read in simple data set SR<-read.csv("../data/Harrison_simples_Apr18.csv") iteracs=100000 # LM version #simple model srm<-lm(log(SR$R/SR$S_adj)~ SR$S_adj) a_srm<-srm$coefficients[1] b_srm<--srm$coefficients[2] alpha<-exp(a_srm) u_msy=.5*a_srm-0.07*a_srm^2 predR1<- SR$S_adj*exp(a_srm-b_srm*SR$S_adj) mydata<-list(obs_logR=log(SR$R),obs_S=SR$S_adj) parameters_simple <- list( alpha=(a_srm), logbeta = log(b_srm), logSigObs= log(.4) ) simpl<-list( dat=mydata, params=parameters_simple, rndm=NULL, dll="Ricker_simple", DIR=model_dir ) simpleobj<-runTMB(simpl) simpleobj$report() #MCMC simpleB<-list( obj=simpleobj, nchain=3, iter=iteracs, lowbd=c(0.1,-13.5,-6.0), hibd=c(4.0,-8.5,5.0) ) posterior_simple<-posteriorsdf(simpleB) #plots #plot_posteriors(posterior_simple$posteriors) #posteriors of derived quantities -- the interesting ones simpdf<-posterior_simple$posteriors a<-(simpdf$value[simpdf$parameters=="alpha"]) alpha<-exp(simpdf$value[simpdf$parameters=="alpha"]) beta<-exp(simpdf$value[simpdf$parameters=="logbeta"]) Smax<-1/exp(simpdf$value[simpdf$parameters=="logbeta"]) sig<-exp(simpdf$value[simpdf$parameters=="logSigObs"]) umsy_simple<-.5*a-0.07*a^2 Rprdbsimple<-matrix(NA,ncol=length(SR$BroodYear),nrow=length(sig)) meanRsimple<-NULL varRsimple<-NULL for(i in 1:length(sig)){ Rprdbsimple[i,]<-as.numeric(SR$S_adj*exp(a[i])*exp(-SR$S_adj* beta[i])) meanRsimple[i]<-mean(Rprdbsimple[i,]/SR$R) varRsimple[i]<-var(Rprdbsimple[i,]/SR$R) } #============================================================================================================= #Recursive Bayes model - testing the scenario with low observation error only mydata3<-list(obs_logR=log(SR$R),obs_S=SR$S_adj,prbeta1=2.0,prbeta2=3.0) parameters_recursive <- list( alphao=a_srm, logbeta = log(b_srm), rho=.2, logvarphi= 0.1, alpha=rep(0.9,length(SR$R)) ) recursive3<-list( dat=mydata3, params=parameters_recursive, rndm="alpha", dll="Rickerkf_ratiovar", DIR=model_dir ) compile("Rickerkf_ratiovar.cpp",libtmb=FALSE, "-O1 -g", DLLFLAGS="") dyn.load(dynlib("Rickerkf_ratiovar")) obj3<-MakeADFun(mydata3,parameters_recursive,random="alpha", DLL="Rickerkf_ratiovar") newtonOption(obj3, smartsearch=FALSE) opt<-nlminb(obj3$par,obj3$fn,obj3$gr) repkf3<-obj3$report() sdreport(obj3) recursiveB3<-list( obj=obj3, nchain=3, iter=iteracs, lowbd=c(0.1,-13.5,0.0,-3.0),#rep(0.001,length(SR$R))), hibd=c(4.0,-9.,1.0,5.0)#,rep(4.0,length(SR$R))) ) posterior_recursive3<-posteriorsdf(recursiveB3) recrsdf3<-posterior_recursive3$posteriors a_rb3<-(recrsdf3$value[recrsdf3$parameters=="alphao"]) beta_rb3<-exp(recrsdf3$value[recrsdf3$parameters=="logbeta"]) Smax_rb3<-1/exp(recrsdf3$value[recrsdf3$parameters=="logbeta"]) rho_rb3<-(recrsdf3$value[recrsdf3$parameters=="rho"]) soa3<-recrsdf3[grep("alpha",recrsdf3$parameters),] soa3$umsy<-.5*soa3$value-0.07*soa3$value^2 umsyposteriorsummary3<-aggregate(soa3$umsy,list(soa3$parameters),function(x){quantile(x,probs=c(0.025,.5,.975))}) umsyposteriorsummary3<-umsyposteriorsummary3[c(1,12,23,25:30,2:11,13:22,24),] #==================================================================================================================== #model with posfun on alpha parameters_recursive_loga <- list( logalphao=log(a_srm), logbeta = log(b_srm), rho=.2, logvarphi= 0.1, logalpha=log(rep(0.9,length(SR$R))) ) compile("Rickerkf_ratiovar_aprior.cpp",libtmb=FALSE, "-O1 -g", DLLFLAGS="") dyn.load(dynlib("Rickerkf_ratiovar_aprior")) obj3_aprior<-MakeADFun(mydata3,parameters_recursive_loga ,random="logalpha",DLL="Rickerkf_ratiovar_aprior") #newtonOption(obj3_aprior, smartsearch=FALSE) obj3_aprior$fn() obj3_aprior$gr() opt_apr<-nlminb(obj3_aprior$par,obj3_aprior$fn,obj3_aprior$gr) repkf3_aprior<-obj3_aprior$report() sdreport(obj3_aprior) recursiveB3_aprior<-list( obj=obj3_aprior, nchain=3, iter=iteracs, lowbd=c(0.1,-13.5,0.0,-3.0), hibd=c(4.0,-9.,1.0,5.0) ) posterior_recursive3_aprior<-posteriorsdf(recursiveB3_aprior) #============================================================================== #plots dfac<-data.frame(broodyear=rep(SR$BroodYear,8), a=c(posterior_recursive3$fit_summary$summary[5:34,6], exp(posterior_recursive3_aprior$fit_summary$summary[5:34,6]) ), #,umsyposteriorsummary3$x[,2] scn=rep(c("no bounds","bounds"),each=length(SR$BroodYear)), lower=c(posterior_recursive3$fit_summary$summary[5:34,4], exp(posterior_recursive3_aprior$fit_summary$summary[5:34,4])), upper=c(posterior_recursive3$fit_summary$summary[5:34,8], exp(posterior_recursive3_aprior$fit_summary$summary[5:34,8]))) pa<-ggplot(dfac) pa<-pa+geom_ribbon(aes(x=broodyear,ymin=lower,ymax=upper, fill=scn),alpha=.4) pa<-pa+geom_line(aes(x=broodyear,y=a,col=scn), size=1.2) pa<-pa+theme_bw(16) pa<-pa+labs(title = "Recursive Bayes model - a time series", y = expression(a[t]), x = "Brood year") pa moltentmp<-melt(soa[,-c(1,2)]) moltentmp$iterations<-paste(soa$iterations,soa$chains,sep=".") tmp<-cast(moltentmp[moltentmp$variable=="value",],formula = iterations~ parameters , mean, value = 'value') tmpu<-cast(moltentmp[moltentmp$variable=="umsy",],formula = iterations~ parameters , mean, value = 'value') moltentmp1<-melt(soa1[,-c(1,2)]) moltentmp1$iterations<-paste(soa1$iterations,soa1$chains,sep=".") tmp1<-cast(moltentmp1[moltentmp1$variable=="value",],formula = iterations~ parameters , mean, value = 'value') tmpu1<-cast(moltentmp1[moltentmp1$variable=="umsy",],formula = iterations~ parameters , mean, value = 'value') moltentmp2<-melt(soa2[,-c(1,2)]) moltentmp2$iterations<-paste(soa2$iterations,soa2$chains,sep=".") tmp2<-cast(moltentmp2[moltentmp2$variable=="value",],formula = iterations~ parameters , mean, value = 'value') tmpu2<-cast(moltentmp2[moltentmp2$variable=="umsy",],formula = iterations~ parameters , mean, value = 'value') moltentmp3<-melt(soa3[,-c(1,2)]) moltentmp3$iterations<-paste(soa3$iterations,soa3$chains,sep=".") tmp3<-cast(moltentmp3[moltentmp3$variable=="value",],formula = iterations~ parameters , mean, value = 'value') tmpu3<-cast(moltentmp3[moltentmp3$variable=="umsy",],formula = iterations~ parameters , mean, value = 'value') sum(moltentmp$value[moltentmp$variable=="umsy"]>0) summary(moltentmp) dim(moltentmp[moltentmp$variable=="umsy",]) dim(tmpu) head(tmp[,c(2,13,24,26:31,3:12,14:23,25)]) head(tmpu[,c(2,13,24,26:31,3:12,14:23,25)]) Rprdb0<-matrix(NA,ncol=length(SR$BroodYear),nrow=nrow(tmp)) Rprdb1<-matrix(NA,ncol=length(SR$BroodYear),nrow=nrow(tmp)) Rprdb2<-matrix(NA,ncol=length(SR$BroodYear),nrow=nrow(tmp)) Rprdb3<-matrix(NA,ncol=length(SR$BroodYear),nrow=nrow(tmp)) meanR0<-NULL meanR1<-NULL meanR2<-NULL meanR3<-NULL varR0<-NULL varR1<-NULL varR2<-NULL varR3<-NULL avga0<-NULL avga1<-NULL avga2<-NULL avga3<-NULL avgu0<-NULL avgu1<-NULL avgu2<-NULL avgu3<-NULL i=1 for(i in 1:nrow(tmp)){# Rprdb0[i,]<-as.numeric(SR$S_adj*exp(tmp[i,c(2,13,24,26:31,3:12,14:23,25)])*exp(-SR$S_adj* beta_rb[i])) Rprdb1[i,]<-as.numeric(SR$S_adj*exp(tmp1[i,c(2,13,24,26:31,3:12,14:23,25)])*exp(-SR$S_adj* beta_rb1[i])) Rprdb2[i,]<-as.numeric(SR$S_adj*exp(tmp2[i,c(2,13,24,26:31,3:12,14:23,25)])*exp(-SR$S_adj* beta_rb2[i])) Rprdb3[i,]<-as.numeric(SR$S_adj*exp(tmp3[i,c(2,13,24,26:31,3:12,14:23,25)])*exp(-SR$S_adj* beta_rb3[i])) meanR0[i]<-mean(Rprdb0[i,]/SR$R) meanR1[i]<-mean(Rprdb1[i,]/SR$R) meanR2[i]<-mean(Rprdb2[i,]/SR$R) meanR3[i]<-mean(Rprdb3[i,]/SR$R) varR0[i]<-var(Rprdb0[i,]/SR$R) varR1[i]<-var(Rprdb1[i,]/SR$R) varR2[i]<-var(Rprdb2[i,]/SR$R) varR3[i]<-var(Rprdb3[i,]/SR$R) avga0[i]<-mean(exp(unlist(tmp[i,c(21:23,25)]))) avga1[i]<-mean(exp(unlist(tmp1[i,c(21:23,25)]))) avga2[i]<-mean(exp(unlist(tmp2[i,c(21:23,25)]))) avga3[i]<-mean(exp(unlist(tmp3[i,c(21:23,25)]))) avgu0[i]<-mean(unlist(tmpu[i,c(21:23,25)])) avgu1[i]<-mean(unlist(tmpu1[i,c(21:23,25)])) avgu2[i]<-mean(unlist(tmpu2[i,c(21:23,25)])) avgu3[i]<-mean(unlist(tmpu3[i,c(21:23,25)])) } meanu<-c(quantile(umsy_simple,.5),quantile(avgu0,.5),quantile(avgu2,.5),quantile(avgu3,.5)) # meana<-c(quantile(alpha,.5),quantile(avga0,.5),quantile(avga2,.5),quantile(avga3,.5)) # meanR<-c(quantile(meanRsimple,.5),quantile(meanR0,.5),quantile(meanR2,.5),quantile(meanR3,.5))#quantile(meanR1,.5), varR<-c(quantile(varRsimple,.5),quantile(varR0,.5),quantile(varR2,.5),quantile(varR3,.5)) #,quantile(varR1,.5) bs<-c(quantile(Smax,.5),quantile(Smax_rb,.5),quantile(Smax_rb2,.5),quantile(Smax_rb3,.5)) vraptab<- data.frame(scenario=rep(c("simple Ricker", "RB base 3, 3","RB high obs 3, 2","RB low obs 2, 3"),1),aavg4=meana, b=bs,meanR=meanR,varR=varR,uavg4=meanu) colnames(vraptab)<-c("scenario","$\\widetilde{a_{avg4}}$","$\\widetilde{b}$","meanR","varR","$U_{MSY avg}$") vraptabtmp<-xtable(vraptab, digits=matrix(c(2,rep(2,nrow(vraptab)-1)),ncol=ncol(vraptab)+1,nrow=nrow(vraptab),byrow=F),sanitize.text.function=function(x){x}, ,caption = "Median parameter estimates across simple Ricker recruitment model and three time-varying productivity scenarios (RB) with different $\\rho$ priors. Parameters were transformed to meet VRAP requirements.", label="vraptab" ) print(vraptabtmp,sanitize.text.function = function(x) {x}, include.rownames = FALSE, file="../tex/vrap_params2.tex",caption.placement = "top", label="vraptab") #======================================================================== #Comparison tables #parameter estimates tab<-data.frame(Parameter=c("b","$S_{max}$","$\\rho$",paste("$\\alpha$",SR$BroodYear)), "base" =c(apply(cbind(beta_rb,Smax_rb,rho_rb),2,function(x) quantile(x, .5)),exp(posterior_recursive$fit_summary$summary[5:34,6])), "uninformative"=c(apply(cbind(beta_rb1,Smax_rb1,rho_rb1),2,function(x) quantile(x, .5)),exp(posterior_recursive1$fit_summary$summary[5:34,6])), "low"=c(apply(cbind(beta_rb2,Smax_rb2,rho_rb2),2,function(x) quantile(x, .5)),exp(posterior_recursive2$fit_summary$summary[5:34,6])), "high"=c(apply(cbind(beta_rb3,Smax_rb3,rho_rb3),2,function(x) quantile(x, .5)),exp(posterior_recursive3$fit_summary$summary[5:34,6]))) tabtmp<-xtable(tab, digits=matrix(c(-2,rep(2,nrow(tab)-1)),ncol=ncol(tab)+1,nrow=nrow(tab),byrow=F) ,caption = "Median parameter estimates for the recursive Bayes model across four $\\rho$ prior scenarios. " ) print(tabtmp,sanitize.text.function = function(x) {x}, include.rownames = FALSE, file="../tex/recursive_tab_params_comparison.tex",caption.placement = "top", label="parreccomp") #rho estimates, priors, posteriors #table for vrap #======================================================================== #Comparison figures #MLE versus posteriors rhodfp<-rbind(rhodf0p,rhodf1p,rhodf2p,rhodf3p) names(rhodfp) linerhodf<-data.frame(scn=rep(c("uninformative 1, 1","base 3, 3","low obs 2, 3","high obs 3, 2"),each=2), vals=c(quantile(rho_rb1,probs=c(.5)),repkf1$rho, quantile(rho_rb,probs=c(.5)),repkf$rho, quantile(rho_rb3,probs=c(.5)),repkf3$rho, quantile(rho_rb2,probs=c(.5)),repkf2$rho), type=as.factor(rep(c("Median posterior","MLE"),4))) rhodf3p$median<-quantile(rho_rb3,probs=c(.5)) rhodf3p$mle<-repkf3$rho pr<-ggplot(rhodfp,aes(value)) pr<-pr+geom_density(size=1.2,aes(fill=distribution),alpha=.6) pr<-pr+facet_wrap(~ scn) pr<-pr+geom_vline(data=linerhodf,aes(xintercept=vals,color=type),size=1.3) pr<-pr+ scale_color_manual(values=c("blue4","red")) #+ scale_color_manual(values=c("red","black","#E69F00", "#56B4E9"))scale_fill_brewer(palette="Dark2")+ pr<-pr+theme_bw(16) pr<-pr+labs(x = expression(paste(rho,"values"))) pr ggsave("../figs/priors_posteriors_rho.pdf", plot=pr, width=10,height=8) dfu<-data.frame(broodyear=rep(SR$BroodYear,8), umsy=c(umsyposteriorsummary$x[,2],umsyposteriorsummary1$x[,2], umsyposteriorsummary2$x[,2],umsyposteriorsummary3$x[,2], repkf$umsy,repkf1$umsy,repkf2$umsy,repkf3$umsy ), #,umsyposteriorsummary3$x[,2] scn=rep(rep(c("base 3, 3","uninformative 1, 1","low obs 3, 2","high obs 2, 3"),each=length(SR$BroodYear)),2),#,"2,3" lower=c(umsyposteriorsummary$x[,1],umsyposteriorsummary1$x[,1], umsyposteriorsummary2$x[,1],umsyposteriorsummary3$x[,1], rep(NA,each=length(SR$BroodYear)*4)), upper=c(umsyposteriorsummary$x[,3],umsyposteriorsummary1$x[,3], umsyposteriorsummary2$x[,3],umsyposteriorsummary3$x[,3], rep(NA,each=length(SR$BroodYear)*4)), type=rep(c("Posterior", "MLE"),each=length(SR$BroodYear)*4) ) pu<-ggplot(dfu) pu<-pu+geom_ribbon(aes(x=broodyear,ymin=lower,ymax=upper, fill=scn),alpha=.4) pu<-pu+geom_line(aes(x=broodyear,y=umsy,col=scn), size=1.2) pu<-pu+ facet_wrap(~type) pu<-pu+theme_bw(16) pu<-pu+labs(title = "Recursive Bayes model - Umsy time series", y = expression(u[MSY]), x = "Brood year") pu dfa<-data.frame(broodyear=rep(SR$BroodYear,8), a=c(posterior_recursive$fit_summary$summary[5:34,6], posterior_recursive1$fit_summary$summary[5:34,6], posterior_recursive2$fit_summary$summary[5:34,6], posterior_recursive3$fit_summary$summary[5:34,6], repkf$alpha,repkf1$alpha,repkf2$alpha,repkf3$alpha ), #,umsyposteriorsummary3$x[,2] scn=rep(rep(c("base 3, 3","uninformative 1, 1","low obs 3, 2","high obs 2, 3"),each=length(SR$BroodYear)),2),#,"2,3" lower=c(posterior_recursive$fit_summary$summary[5:34,4], posterior_recursive1$fit_summary$summary[5:34,4], posterior_recursive2$fit_summary$summary[5:34,4], posterior_recursive3$fit_summary$summary[5:34,4], rep(NA,each=length(SR$BroodYear)*4)), upper=c(posterior_recursive$fit_summary$summary[5:34,8], posterior_recursive1$fit_summary$summary[5:34,8], posterior_recursive2$fit_summary$summary[5:34,8], posterior_recursive3$fit_summary$summary[5:34,8], rep(NA,each=length(SR$BroodYear)*4)), type=rep(c("Posterior", "MLE"),each=length(SR$BroodYear)*4) ) pa<-ggplot(dfu) pa<-pa+geom_ribbon(aes(x=broodyear,ymin=lower,ymax=upper, fill=scn),alpha=.4) pa<-pa+geom_line(aes(x=broodyear,y=umsy,col=scn), size=1.2) pa<-pa+ facet_wrap(~type) pa<-pa+theme_bw(16) pa<-pa+labs(title = "Recursive Bayes model - a time series", y = expression(a[t]), x = "Brood year") pa ggsave("../recursive_a_priors.pdf", plot=pa, width=10,height=7)
/R/SRkf_a_play.R
no_license
Pacific-salmon-assess/RickerModels
R
false
false
14,995
r
#================================================================= #Simple fit exploration of SR data #Author: Catarina Wor #Date: April 10th 2018 #================================================================= #load in required packages and libraries library(ggplot2) library(TMB) #library(TMBhelper) -- not available in most recent R versions library(bayesplot) library(tmbstan) library(reshape) library(xtable) #load in directories.R source("C:/Users/worc/Documents/HarrisonSR/R/directories.R") #source("/Users/catarinawor/Documents/work/Chinook/srkf/R/directories.R") #setwd(model_dir) source("calc_quantile.R") source("TMB_functions.R") #read in simple data set SR<-read.csv("../data/Harrison_simples_Apr18.csv") iteracs=100000 # LM version #simple model srm<-lm(log(SR$R/SR$S_adj)~ SR$S_adj) a_srm<-srm$coefficients[1] b_srm<--srm$coefficients[2] alpha<-exp(a_srm) u_msy=.5*a_srm-0.07*a_srm^2 predR1<- SR$S_adj*exp(a_srm-b_srm*SR$S_adj) mydata<-list(obs_logR=log(SR$R),obs_S=SR$S_adj) parameters_simple <- list( alpha=(a_srm), logbeta = log(b_srm), logSigObs= log(.4) ) simpl<-list( dat=mydata, params=parameters_simple, rndm=NULL, dll="Ricker_simple", DIR=model_dir ) simpleobj<-runTMB(simpl) simpleobj$report() #MCMC simpleB<-list( obj=simpleobj, nchain=3, iter=iteracs, lowbd=c(0.1,-13.5,-6.0), hibd=c(4.0,-8.5,5.0) ) posterior_simple<-posteriorsdf(simpleB) #plots #plot_posteriors(posterior_simple$posteriors) #posteriors of derived quantities -- the interesting ones simpdf<-posterior_simple$posteriors a<-(simpdf$value[simpdf$parameters=="alpha"]) alpha<-exp(simpdf$value[simpdf$parameters=="alpha"]) beta<-exp(simpdf$value[simpdf$parameters=="logbeta"]) Smax<-1/exp(simpdf$value[simpdf$parameters=="logbeta"]) sig<-exp(simpdf$value[simpdf$parameters=="logSigObs"]) umsy_simple<-.5*a-0.07*a^2 Rprdbsimple<-matrix(NA,ncol=length(SR$BroodYear),nrow=length(sig)) meanRsimple<-NULL varRsimple<-NULL for(i in 1:length(sig)){ Rprdbsimple[i,]<-as.numeric(SR$S_adj*exp(a[i])*exp(-SR$S_adj* beta[i])) meanRsimple[i]<-mean(Rprdbsimple[i,]/SR$R) varRsimple[i]<-var(Rprdbsimple[i,]/SR$R) } #============================================================================================================= #Recursive Bayes model - testing the scenario with low observation error only mydata3<-list(obs_logR=log(SR$R),obs_S=SR$S_adj,prbeta1=2.0,prbeta2=3.0) parameters_recursive <- list( alphao=a_srm, logbeta = log(b_srm), rho=.2, logvarphi= 0.1, alpha=rep(0.9,length(SR$R)) ) recursive3<-list( dat=mydata3, params=parameters_recursive, rndm="alpha", dll="Rickerkf_ratiovar", DIR=model_dir ) compile("Rickerkf_ratiovar.cpp",libtmb=FALSE, "-O1 -g", DLLFLAGS="") dyn.load(dynlib("Rickerkf_ratiovar")) obj3<-MakeADFun(mydata3,parameters_recursive,random="alpha", DLL="Rickerkf_ratiovar") newtonOption(obj3, smartsearch=FALSE) opt<-nlminb(obj3$par,obj3$fn,obj3$gr) repkf3<-obj3$report() sdreport(obj3) recursiveB3<-list( obj=obj3, nchain=3, iter=iteracs, lowbd=c(0.1,-13.5,0.0,-3.0),#rep(0.001,length(SR$R))), hibd=c(4.0,-9.,1.0,5.0)#,rep(4.0,length(SR$R))) ) posterior_recursive3<-posteriorsdf(recursiveB3) recrsdf3<-posterior_recursive3$posteriors a_rb3<-(recrsdf3$value[recrsdf3$parameters=="alphao"]) beta_rb3<-exp(recrsdf3$value[recrsdf3$parameters=="logbeta"]) Smax_rb3<-1/exp(recrsdf3$value[recrsdf3$parameters=="logbeta"]) rho_rb3<-(recrsdf3$value[recrsdf3$parameters=="rho"]) soa3<-recrsdf3[grep("alpha",recrsdf3$parameters),] soa3$umsy<-.5*soa3$value-0.07*soa3$value^2 umsyposteriorsummary3<-aggregate(soa3$umsy,list(soa3$parameters),function(x){quantile(x,probs=c(0.025,.5,.975))}) umsyposteriorsummary3<-umsyposteriorsummary3[c(1,12,23,25:30,2:11,13:22,24),] #==================================================================================================================== #model with posfun on alpha parameters_recursive_loga <- list( logalphao=log(a_srm), logbeta = log(b_srm), rho=.2, logvarphi= 0.1, logalpha=log(rep(0.9,length(SR$R))) ) compile("Rickerkf_ratiovar_aprior.cpp",libtmb=FALSE, "-O1 -g", DLLFLAGS="") dyn.load(dynlib("Rickerkf_ratiovar_aprior")) obj3_aprior<-MakeADFun(mydata3,parameters_recursive_loga ,random="logalpha",DLL="Rickerkf_ratiovar_aprior") #newtonOption(obj3_aprior, smartsearch=FALSE) obj3_aprior$fn() obj3_aprior$gr() opt_apr<-nlminb(obj3_aprior$par,obj3_aprior$fn,obj3_aprior$gr) repkf3_aprior<-obj3_aprior$report() sdreport(obj3_aprior) recursiveB3_aprior<-list( obj=obj3_aprior, nchain=3, iter=iteracs, lowbd=c(0.1,-13.5,0.0,-3.0), hibd=c(4.0,-9.,1.0,5.0) ) posterior_recursive3_aprior<-posteriorsdf(recursiveB3_aprior) #============================================================================== #plots dfac<-data.frame(broodyear=rep(SR$BroodYear,8), a=c(posterior_recursive3$fit_summary$summary[5:34,6], exp(posterior_recursive3_aprior$fit_summary$summary[5:34,6]) ), #,umsyposteriorsummary3$x[,2] scn=rep(c("no bounds","bounds"),each=length(SR$BroodYear)), lower=c(posterior_recursive3$fit_summary$summary[5:34,4], exp(posterior_recursive3_aprior$fit_summary$summary[5:34,4])), upper=c(posterior_recursive3$fit_summary$summary[5:34,8], exp(posterior_recursive3_aprior$fit_summary$summary[5:34,8]))) pa<-ggplot(dfac) pa<-pa+geom_ribbon(aes(x=broodyear,ymin=lower,ymax=upper, fill=scn),alpha=.4) pa<-pa+geom_line(aes(x=broodyear,y=a,col=scn), size=1.2) pa<-pa+theme_bw(16) pa<-pa+labs(title = "Recursive Bayes model - a time series", y = expression(a[t]), x = "Brood year") pa moltentmp<-melt(soa[,-c(1,2)]) moltentmp$iterations<-paste(soa$iterations,soa$chains,sep=".") tmp<-cast(moltentmp[moltentmp$variable=="value",],formula = iterations~ parameters , mean, value = 'value') tmpu<-cast(moltentmp[moltentmp$variable=="umsy",],formula = iterations~ parameters , mean, value = 'value') moltentmp1<-melt(soa1[,-c(1,2)]) moltentmp1$iterations<-paste(soa1$iterations,soa1$chains,sep=".") tmp1<-cast(moltentmp1[moltentmp1$variable=="value",],formula = iterations~ parameters , mean, value = 'value') tmpu1<-cast(moltentmp1[moltentmp1$variable=="umsy",],formula = iterations~ parameters , mean, value = 'value') moltentmp2<-melt(soa2[,-c(1,2)]) moltentmp2$iterations<-paste(soa2$iterations,soa2$chains,sep=".") tmp2<-cast(moltentmp2[moltentmp2$variable=="value",],formula = iterations~ parameters , mean, value = 'value') tmpu2<-cast(moltentmp2[moltentmp2$variable=="umsy",],formula = iterations~ parameters , mean, value = 'value') moltentmp3<-melt(soa3[,-c(1,2)]) moltentmp3$iterations<-paste(soa3$iterations,soa3$chains,sep=".") tmp3<-cast(moltentmp3[moltentmp3$variable=="value",],formula = iterations~ parameters , mean, value = 'value') tmpu3<-cast(moltentmp3[moltentmp3$variable=="umsy",],formula = iterations~ parameters , mean, value = 'value') sum(moltentmp$value[moltentmp$variable=="umsy"]>0) summary(moltentmp) dim(moltentmp[moltentmp$variable=="umsy",]) dim(tmpu) head(tmp[,c(2,13,24,26:31,3:12,14:23,25)]) head(tmpu[,c(2,13,24,26:31,3:12,14:23,25)]) Rprdb0<-matrix(NA,ncol=length(SR$BroodYear),nrow=nrow(tmp)) Rprdb1<-matrix(NA,ncol=length(SR$BroodYear),nrow=nrow(tmp)) Rprdb2<-matrix(NA,ncol=length(SR$BroodYear),nrow=nrow(tmp)) Rprdb3<-matrix(NA,ncol=length(SR$BroodYear),nrow=nrow(tmp)) meanR0<-NULL meanR1<-NULL meanR2<-NULL meanR3<-NULL varR0<-NULL varR1<-NULL varR2<-NULL varR3<-NULL avga0<-NULL avga1<-NULL avga2<-NULL avga3<-NULL avgu0<-NULL avgu1<-NULL avgu2<-NULL avgu3<-NULL i=1 for(i in 1:nrow(tmp)){# Rprdb0[i,]<-as.numeric(SR$S_adj*exp(tmp[i,c(2,13,24,26:31,3:12,14:23,25)])*exp(-SR$S_adj* beta_rb[i])) Rprdb1[i,]<-as.numeric(SR$S_adj*exp(tmp1[i,c(2,13,24,26:31,3:12,14:23,25)])*exp(-SR$S_adj* beta_rb1[i])) Rprdb2[i,]<-as.numeric(SR$S_adj*exp(tmp2[i,c(2,13,24,26:31,3:12,14:23,25)])*exp(-SR$S_adj* beta_rb2[i])) Rprdb3[i,]<-as.numeric(SR$S_adj*exp(tmp3[i,c(2,13,24,26:31,3:12,14:23,25)])*exp(-SR$S_adj* beta_rb3[i])) meanR0[i]<-mean(Rprdb0[i,]/SR$R) meanR1[i]<-mean(Rprdb1[i,]/SR$R) meanR2[i]<-mean(Rprdb2[i,]/SR$R) meanR3[i]<-mean(Rprdb3[i,]/SR$R) varR0[i]<-var(Rprdb0[i,]/SR$R) varR1[i]<-var(Rprdb1[i,]/SR$R) varR2[i]<-var(Rprdb2[i,]/SR$R) varR3[i]<-var(Rprdb3[i,]/SR$R) avga0[i]<-mean(exp(unlist(tmp[i,c(21:23,25)]))) avga1[i]<-mean(exp(unlist(tmp1[i,c(21:23,25)]))) avga2[i]<-mean(exp(unlist(tmp2[i,c(21:23,25)]))) avga3[i]<-mean(exp(unlist(tmp3[i,c(21:23,25)]))) avgu0[i]<-mean(unlist(tmpu[i,c(21:23,25)])) avgu1[i]<-mean(unlist(tmpu1[i,c(21:23,25)])) avgu2[i]<-mean(unlist(tmpu2[i,c(21:23,25)])) avgu3[i]<-mean(unlist(tmpu3[i,c(21:23,25)])) } meanu<-c(quantile(umsy_simple,.5),quantile(avgu0,.5),quantile(avgu2,.5),quantile(avgu3,.5)) # meana<-c(quantile(alpha,.5),quantile(avga0,.5),quantile(avga2,.5),quantile(avga3,.5)) # meanR<-c(quantile(meanRsimple,.5),quantile(meanR0,.5),quantile(meanR2,.5),quantile(meanR3,.5))#quantile(meanR1,.5), varR<-c(quantile(varRsimple,.5),quantile(varR0,.5),quantile(varR2,.5),quantile(varR3,.5)) #,quantile(varR1,.5) bs<-c(quantile(Smax,.5),quantile(Smax_rb,.5),quantile(Smax_rb2,.5),quantile(Smax_rb3,.5)) vraptab<- data.frame(scenario=rep(c("simple Ricker", "RB base 3, 3","RB high obs 3, 2","RB low obs 2, 3"),1),aavg4=meana, b=bs,meanR=meanR,varR=varR,uavg4=meanu) colnames(vraptab)<-c("scenario","$\\widetilde{a_{avg4}}$","$\\widetilde{b}$","meanR","varR","$U_{MSY avg}$") vraptabtmp<-xtable(vraptab, digits=matrix(c(2,rep(2,nrow(vraptab)-1)),ncol=ncol(vraptab)+1,nrow=nrow(vraptab),byrow=F),sanitize.text.function=function(x){x}, ,caption = "Median parameter estimates across simple Ricker recruitment model and three time-varying productivity scenarios (RB) with different $\\rho$ priors. Parameters were transformed to meet VRAP requirements.", label="vraptab" ) print(vraptabtmp,sanitize.text.function = function(x) {x}, include.rownames = FALSE, file="../tex/vrap_params2.tex",caption.placement = "top", label="vraptab") #======================================================================== #Comparison tables #parameter estimates tab<-data.frame(Parameter=c("b","$S_{max}$","$\\rho$",paste("$\\alpha$",SR$BroodYear)), "base" =c(apply(cbind(beta_rb,Smax_rb,rho_rb),2,function(x) quantile(x, .5)),exp(posterior_recursive$fit_summary$summary[5:34,6])), "uninformative"=c(apply(cbind(beta_rb1,Smax_rb1,rho_rb1),2,function(x) quantile(x, .5)),exp(posterior_recursive1$fit_summary$summary[5:34,6])), "low"=c(apply(cbind(beta_rb2,Smax_rb2,rho_rb2),2,function(x) quantile(x, .5)),exp(posterior_recursive2$fit_summary$summary[5:34,6])), "high"=c(apply(cbind(beta_rb3,Smax_rb3,rho_rb3),2,function(x) quantile(x, .5)),exp(posterior_recursive3$fit_summary$summary[5:34,6]))) tabtmp<-xtable(tab, digits=matrix(c(-2,rep(2,nrow(tab)-1)),ncol=ncol(tab)+1,nrow=nrow(tab),byrow=F) ,caption = "Median parameter estimates for the recursive Bayes model across four $\\rho$ prior scenarios. " ) print(tabtmp,sanitize.text.function = function(x) {x}, include.rownames = FALSE, file="../tex/recursive_tab_params_comparison.tex",caption.placement = "top", label="parreccomp") #rho estimates, priors, posteriors #table for vrap #======================================================================== #Comparison figures #MLE versus posteriors rhodfp<-rbind(rhodf0p,rhodf1p,rhodf2p,rhodf3p) names(rhodfp) linerhodf<-data.frame(scn=rep(c("uninformative 1, 1","base 3, 3","low obs 2, 3","high obs 3, 2"),each=2), vals=c(quantile(rho_rb1,probs=c(.5)),repkf1$rho, quantile(rho_rb,probs=c(.5)),repkf$rho, quantile(rho_rb3,probs=c(.5)),repkf3$rho, quantile(rho_rb2,probs=c(.5)),repkf2$rho), type=as.factor(rep(c("Median posterior","MLE"),4))) rhodf3p$median<-quantile(rho_rb3,probs=c(.5)) rhodf3p$mle<-repkf3$rho pr<-ggplot(rhodfp,aes(value)) pr<-pr+geom_density(size=1.2,aes(fill=distribution),alpha=.6) pr<-pr+facet_wrap(~ scn) pr<-pr+geom_vline(data=linerhodf,aes(xintercept=vals,color=type),size=1.3) pr<-pr+ scale_color_manual(values=c("blue4","red")) #+ scale_color_manual(values=c("red","black","#E69F00", "#56B4E9"))scale_fill_brewer(palette="Dark2")+ pr<-pr+theme_bw(16) pr<-pr+labs(x = expression(paste(rho,"values"))) pr ggsave("../figs/priors_posteriors_rho.pdf", plot=pr, width=10,height=8) dfu<-data.frame(broodyear=rep(SR$BroodYear,8), umsy=c(umsyposteriorsummary$x[,2],umsyposteriorsummary1$x[,2], umsyposteriorsummary2$x[,2],umsyposteriorsummary3$x[,2], repkf$umsy,repkf1$umsy,repkf2$umsy,repkf3$umsy ), #,umsyposteriorsummary3$x[,2] scn=rep(rep(c("base 3, 3","uninformative 1, 1","low obs 3, 2","high obs 2, 3"),each=length(SR$BroodYear)),2),#,"2,3" lower=c(umsyposteriorsummary$x[,1],umsyposteriorsummary1$x[,1], umsyposteriorsummary2$x[,1],umsyposteriorsummary3$x[,1], rep(NA,each=length(SR$BroodYear)*4)), upper=c(umsyposteriorsummary$x[,3],umsyposteriorsummary1$x[,3], umsyposteriorsummary2$x[,3],umsyposteriorsummary3$x[,3], rep(NA,each=length(SR$BroodYear)*4)), type=rep(c("Posterior", "MLE"),each=length(SR$BroodYear)*4) ) pu<-ggplot(dfu) pu<-pu+geom_ribbon(aes(x=broodyear,ymin=lower,ymax=upper, fill=scn),alpha=.4) pu<-pu+geom_line(aes(x=broodyear,y=umsy,col=scn), size=1.2) pu<-pu+ facet_wrap(~type) pu<-pu+theme_bw(16) pu<-pu+labs(title = "Recursive Bayes model - Umsy time series", y = expression(u[MSY]), x = "Brood year") pu dfa<-data.frame(broodyear=rep(SR$BroodYear,8), a=c(posterior_recursive$fit_summary$summary[5:34,6], posterior_recursive1$fit_summary$summary[5:34,6], posterior_recursive2$fit_summary$summary[5:34,6], posterior_recursive3$fit_summary$summary[5:34,6], repkf$alpha,repkf1$alpha,repkf2$alpha,repkf3$alpha ), #,umsyposteriorsummary3$x[,2] scn=rep(rep(c("base 3, 3","uninformative 1, 1","low obs 3, 2","high obs 2, 3"),each=length(SR$BroodYear)),2),#,"2,3" lower=c(posterior_recursive$fit_summary$summary[5:34,4], posterior_recursive1$fit_summary$summary[5:34,4], posterior_recursive2$fit_summary$summary[5:34,4], posterior_recursive3$fit_summary$summary[5:34,4], rep(NA,each=length(SR$BroodYear)*4)), upper=c(posterior_recursive$fit_summary$summary[5:34,8], posterior_recursive1$fit_summary$summary[5:34,8], posterior_recursive2$fit_summary$summary[5:34,8], posterior_recursive3$fit_summary$summary[5:34,8], rep(NA,each=length(SR$BroodYear)*4)), type=rep(c("Posterior", "MLE"),each=length(SR$BroodYear)*4) ) pa<-ggplot(dfu) pa<-pa+geom_ribbon(aes(x=broodyear,ymin=lower,ymax=upper, fill=scn),alpha=.4) pa<-pa+geom_line(aes(x=broodyear,y=umsy,col=scn), size=1.2) pa<-pa+ facet_wrap(~type) pa<-pa+theme_bw(16) pa<-pa+labs(title = "Recursive Bayes model - a time series", y = expression(a[t]), x = "Brood year") pa ggsave("../recursive_a_priors.pdf", plot=pa, width=10,height=7)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make.COSERO.input.r \name{make.COSERO.input} \alias{make.COSERO.input} \title{Create a T_IZ or P_IZ file from a set of INCA-Files} \usage{ make.COSERO.input( f, shape, nzraster, output = NULL, otf = FALSE, sortbynz = TRUE, fillmissing = TRUE, ... ) } \arguments{ \item{f}{A character vector of INCA files} \item{shape}{Absolute path to a shapefile (points) at which locations the values of the INCA files are extracted} \item{nzraster}{Path to a raster with the nz information} \item{output}{name of the outputfile or NULL} \item{otf}{logical. write on-the-fly (don't hold the whole matrix in memory)} \item{sortbynz}{logical. Should the output be sorted according to the nz information?} \item{fillmissing}{logical. Should missing time steps be filled in the output? See details.} \item{...}{parameters passed to readINCABIL} } \value{ if output == NULL the IZ-matrix is returned. Else the IZ-File is written and the IZ-matrix is returned. } \description{ Create a T_IZ or P_IZ file from a set of INCA-Files } \details{ Reads a set of INCA files (Precipitation or Temperature) and calculates a matrix that can be read by COSERO. The shapefile must be projected in the same coordinate system as the nzraster as well as the INCA files. Normally this is "+proj=lcc +lat_1=46 +lat_2=49 +lat_0=47.5 +lon_0=13.33333333333333 +x_0=400000 +y_0=400000 +ellps=bessel +units=m +no_defs" \code{fillmissing}: If fillmissing == TRUE the output will be checked on missing timesteps and - if any - will be filled using \code{\link{fill.missing}}. Note that if otf == TRUE, the filled matrix will be written to a copy of the otf-output } \seealso{ \link{readINCABIL}, \link{fill.missing} } \author{ Simon Frey }
/man/make.COSERO.input.Rd
no_license
freysimon/ZAMGR
R
false
true
1,815
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make.COSERO.input.r \name{make.COSERO.input} \alias{make.COSERO.input} \title{Create a T_IZ or P_IZ file from a set of INCA-Files} \usage{ make.COSERO.input( f, shape, nzraster, output = NULL, otf = FALSE, sortbynz = TRUE, fillmissing = TRUE, ... ) } \arguments{ \item{f}{A character vector of INCA files} \item{shape}{Absolute path to a shapefile (points) at which locations the values of the INCA files are extracted} \item{nzraster}{Path to a raster with the nz information} \item{output}{name of the outputfile or NULL} \item{otf}{logical. write on-the-fly (don't hold the whole matrix in memory)} \item{sortbynz}{logical. Should the output be sorted according to the nz information?} \item{fillmissing}{logical. Should missing time steps be filled in the output? See details.} \item{...}{parameters passed to readINCABIL} } \value{ if output == NULL the IZ-matrix is returned. Else the IZ-File is written and the IZ-matrix is returned. } \description{ Create a T_IZ or P_IZ file from a set of INCA-Files } \details{ Reads a set of INCA files (Precipitation or Temperature) and calculates a matrix that can be read by COSERO. The shapefile must be projected in the same coordinate system as the nzraster as well as the INCA files. Normally this is "+proj=lcc +lat_1=46 +lat_2=49 +lat_0=47.5 +lon_0=13.33333333333333 +x_0=400000 +y_0=400000 +ellps=bessel +units=m +no_defs" \code{fillmissing}: If fillmissing == TRUE the output will be checked on missing timesteps and - if any - will be filled using \code{\link{fill.missing}}. Note that if otf == TRUE, the filled matrix will be written to a copy of the otf-output } \seealso{ \link{readINCABIL}, \link{fill.missing} } \author{ Simon Frey }
context("test-layer-spatial-raster.R") # max test length was exceeded on CRAN, so these tests are skipped if (identical(Sys.getenv("NOT_CRAN"), "true")) { test_that("layer-spatial works for raster objects", { load_longlake_data() # should have little grey thing around it print( ggplot() + layer_spatial(longlake_osm) + layer_spatial(longlake_depthdf) + labs(caption = "Should have a little grey area around the sides, roughly N-S projection") ) # should not have little grey thing around it print( ggplot() + annotation_spatial(longlake_osm) + layer_spatial(longlake_depthdf) + labs(caption = "Should have no grey area around the sides, roughly N-S projection") ) # grey thing print( ggplot() + layer_spatial(longlake_osm) + layer_spatial(longlake_depthdf) + coord_sf(crs = 3978) + labs(caption = "Should have a little grey area around the sides, rotated projection") ) # no grey thing print( ggplot() + annotation_spatial(longlake_osm) + layer_spatial(longlake_depthdf) + coord_sf(crs = 3978) + labs(caption = "Should have no grey area around the sides, rotated projection") ) # with alpha print( ggplot() + annotation_spatial(longlake_osm, alpha = 0.7) + layer_spatial(longlake_depthdf) + coord_sf(crs = 3978) + labs(caption = "Should have no grey area around the sides, rotated projection, slight transparency") ) # with aesthetics print( ggplot() + layer_spatial(longlake_osm, aes()) + layer_spatial(longlake_depthdf) ) print( ggplot() + layer_spatial(longlake_osm, aes(alpha = stat(band1), fill = NULL)) + layer_spatial(longlake_depthdf) ) print( ggplot() + layer_spatial(longlake_depth_raster) + layer_spatial(longlake_depthdf) + coord_sf(crs = 3978) ) # still a problem with "no non-missing arguments to max()" # expect_silent( # print( # ggplot() + # annotation_spatial(longlake_osm) + # layer_spatial(longlake_depthdf) + # coord_sf(crs = 3978) # ) # ) # # expect_silent( # print( # ggplot() + # layer_spatial(longlake_osm) + # layer_spatial(longlake_depthdf) # ) # ) # graphical tests so... expect_true(TRUE) }) test_that("layer-spatial works for raster objects", { load_longlake_data() # should have little grey thing around it print( ggplot() + layer_spatial(longlake_osm, lazy = TRUE) + layer_spatial(longlake_depthdf) + labs(caption = "Should have a little grey area around the sides, roughly N-S projection") ) # try on gr device with no pixel concept withr::with_pdf(file.path(tempdir(), "test.pdf"), { print( ggplot() + layer_spatial(longlake_osm, lazy = TRUE) + layer_spatial(longlake_depthdf) + labs(caption = "Should have a little grey area around the sides, roughly N-S projection") ) }, height = 10, width = 10) withr::with_cairo_pdf(file.path(tempdir(), "test.pdf"), { print( ggplot() + layer_spatial(longlake_osm, lazy = TRUE) + layer_spatial(longlake_depthdf) + labs(caption = "Should have a little grey area around the sides, roughly N-S projection") ) }, height = 10, width = 10) withr::with_png(file.path(tempdir(), "test.pdf"), { print( ggplot() + layer_spatial(longlake_osm, lazy = TRUE) + layer_spatial(longlake_depthdf) + labs(caption = "Should have a little grey area around the sides, roughly N-S projection") ) }, res = 300) # should not have little grey thing around it print( ggplot() + annotation_spatial(longlake_osm, lazy = TRUE) + layer_spatial(longlake_depthdf) + labs(caption = "Should have no grey area around the sides, roughly N-S projection") ) # grey thing print( ggplot() + layer_spatial(longlake_osm, lazy = TRUE) + layer_spatial(longlake_depthdf) + coord_sf(crs = 3978) + labs(caption = "Should have a little grey area around the sides, rotated projection") ) # no grey thing print( ggplot() + annotation_spatial(longlake_osm, lazy = TRUE) + layer_spatial(longlake_depthdf) + coord_sf(crs = 3978) + labs(caption = "Should have no grey area around the sides, rotated projection") ) # with alpha print( ggplot() + annotation_spatial(longlake_osm, alpha = 0.7, lazy = TRUE) + layer_spatial(longlake_depthdf) + coord_sf(crs = 3978) + labs(caption = "Should have no grey area around the sides, rotated projection, slight transparency") ) # with aesthetics (currently not implemented) # print( # ggplot() + # layer_spatial(longlake_osm, aes(), lazy = TRUE) + # layer_spatial(longlake_depthdf) # ) # # print( # ggplot() + # layer_spatial(longlake_osm, aes(alpha = stat(band1), fill = NULL), lazy = TRUE) + # layer_spatial(longlake_depthdf) # ) # # print( # ggplot() + # layer_spatial(longlake_depth_raster, lazy = TRUE) + # layer_spatial(longlake_depthdf) + # coord_sf(crs = 3978) # ) # graphical tests so... expect_true(TRUE) }) test_that("layer-spatial works for stars objects", { stars_rast <- stars::read_stars(system.file("longlake/longlake_depth.tif", package = "ggspatial")) stars_rgb <- stars::read_stars(system.file("longlake/longlake.tif", package = "ggspatial")) print(ggplot() + layer_spatial(stars_rast) + labs(caption = "longlake raster read by stars")) print(ggplot() + layer_spatial(stars_rgb) + labs(caption = "longlake rgb read by stars")) load_longlake_data() print( ggplot() + annotation_spatial(stars_rgb) + layer_spatial(longlake_depthdf) + labs(caption = "annotation stars rgb") ) print( ggplot() + annotation_spatial(stars_rgb, lazy = TRUE) + layer_spatial(longlake_depthdf) + labs(caption = "annotation stars rgb with lazy=TRUE") ) # graphical tests so... expect_true(TRUE) }) test_that("layer-spatial raster does not throw warnings", { load_longlake_data() # this error doesn't show up unless there's a wrapper around the call # such as in testthat or RMarkdown. this is replicated below # the culprit is raster::projectRaster() # # withCallingHandlers( # print( # ggplot() + # annotation_map_tile(type = "osm", progress = "none") + # layer_spatial(longlake_depthdf) + # coord_sf(crs = 26920) + # labs(caption = "just checking whether it prints without any messages") # ), # warning = function(w) stop(w) # ) expect_silent( print( ggplot() + layer_spatial(longlake_osm) + labs(caption = "just checking whether it prints without any messages") ) ) expect_silent( print( ggplot() + layer_spatial(longlake_depth_raster) + labs(caption = "just checking whether it prints without any messages") ) ) expect_silent( print( ggplot() + annotation_map_tile(type = "osm", progress = "none") + layer_spatial(longlake_depthdf) + coord_sf(crs = 26920) + labs(caption = "just checking whether it prints without any messages") ) ) }) }
/tests/testthat/test-layer-spatial-raster.R
no_license
edzer/ggspatial
R
false
false
7,799
r
context("test-layer-spatial-raster.R") # max test length was exceeded on CRAN, so these tests are skipped if (identical(Sys.getenv("NOT_CRAN"), "true")) { test_that("layer-spatial works for raster objects", { load_longlake_data() # should have little grey thing around it print( ggplot() + layer_spatial(longlake_osm) + layer_spatial(longlake_depthdf) + labs(caption = "Should have a little grey area around the sides, roughly N-S projection") ) # should not have little grey thing around it print( ggplot() + annotation_spatial(longlake_osm) + layer_spatial(longlake_depthdf) + labs(caption = "Should have no grey area around the sides, roughly N-S projection") ) # grey thing print( ggplot() + layer_spatial(longlake_osm) + layer_spatial(longlake_depthdf) + coord_sf(crs = 3978) + labs(caption = "Should have a little grey area around the sides, rotated projection") ) # no grey thing print( ggplot() + annotation_spatial(longlake_osm) + layer_spatial(longlake_depthdf) + coord_sf(crs = 3978) + labs(caption = "Should have no grey area around the sides, rotated projection") ) # with alpha print( ggplot() + annotation_spatial(longlake_osm, alpha = 0.7) + layer_spatial(longlake_depthdf) + coord_sf(crs = 3978) + labs(caption = "Should have no grey area around the sides, rotated projection, slight transparency") ) # with aesthetics print( ggplot() + layer_spatial(longlake_osm, aes()) + layer_spatial(longlake_depthdf) ) print( ggplot() + layer_spatial(longlake_osm, aes(alpha = stat(band1), fill = NULL)) + layer_spatial(longlake_depthdf) ) print( ggplot() + layer_spatial(longlake_depth_raster) + layer_spatial(longlake_depthdf) + coord_sf(crs = 3978) ) # still a problem with "no non-missing arguments to max()" # expect_silent( # print( # ggplot() + # annotation_spatial(longlake_osm) + # layer_spatial(longlake_depthdf) + # coord_sf(crs = 3978) # ) # ) # # expect_silent( # print( # ggplot() + # layer_spatial(longlake_osm) + # layer_spatial(longlake_depthdf) # ) # ) # graphical tests so... expect_true(TRUE) }) test_that("layer-spatial works for raster objects", { load_longlake_data() # should have little grey thing around it print( ggplot() + layer_spatial(longlake_osm, lazy = TRUE) + layer_spatial(longlake_depthdf) + labs(caption = "Should have a little grey area around the sides, roughly N-S projection") ) # try on gr device with no pixel concept withr::with_pdf(file.path(tempdir(), "test.pdf"), { print( ggplot() + layer_spatial(longlake_osm, lazy = TRUE) + layer_spatial(longlake_depthdf) + labs(caption = "Should have a little grey area around the sides, roughly N-S projection") ) }, height = 10, width = 10) withr::with_cairo_pdf(file.path(tempdir(), "test.pdf"), { print( ggplot() + layer_spatial(longlake_osm, lazy = TRUE) + layer_spatial(longlake_depthdf) + labs(caption = "Should have a little grey area around the sides, roughly N-S projection") ) }, height = 10, width = 10) withr::with_png(file.path(tempdir(), "test.pdf"), { print( ggplot() + layer_spatial(longlake_osm, lazy = TRUE) + layer_spatial(longlake_depthdf) + labs(caption = "Should have a little grey area around the sides, roughly N-S projection") ) }, res = 300) # should not have little grey thing around it print( ggplot() + annotation_spatial(longlake_osm, lazy = TRUE) + layer_spatial(longlake_depthdf) + labs(caption = "Should have no grey area around the sides, roughly N-S projection") ) # grey thing print( ggplot() + layer_spatial(longlake_osm, lazy = TRUE) + layer_spatial(longlake_depthdf) + coord_sf(crs = 3978) + labs(caption = "Should have a little grey area around the sides, rotated projection") ) # no grey thing print( ggplot() + annotation_spatial(longlake_osm, lazy = TRUE) + layer_spatial(longlake_depthdf) + coord_sf(crs = 3978) + labs(caption = "Should have no grey area around the sides, rotated projection") ) # with alpha print( ggplot() + annotation_spatial(longlake_osm, alpha = 0.7, lazy = TRUE) + layer_spatial(longlake_depthdf) + coord_sf(crs = 3978) + labs(caption = "Should have no grey area around the sides, rotated projection, slight transparency") ) # with aesthetics (currently not implemented) # print( # ggplot() + # layer_spatial(longlake_osm, aes(), lazy = TRUE) + # layer_spatial(longlake_depthdf) # ) # # print( # ggplot() + # layer_spatial(longlake_osm, aes(alpha = stat(band1), fill = NULL), lazy = TRUE) + # layer_spatial(longlake_depthdf) # ) # # print( # ggplot() + # layer_spatial(longlake_depth_raster, lazy = TRUE) + # layer_spatial(longlake_depthdf) + # coord_sf(crs = 3978) # ) # graphical tests so... expect_true(TRUE) }) test_that("layer-spatial works for stars objects", { stars_rast <- stars::read_stars(system.file("longlake/longlake_depth.tif", package = "ggspatial")) stars_rgb <- stars::read_stars(system.file("longlake/longlake.tif", package = "ggspatial")) print(ggplot() + layer_spatial(stars_rast) + labs(caption = "longlake raster read by stars")) print(ggplot() + layer_spatial(stars_rgb) + labs(caption = "longlake rgb read by stars")) load_longlake_data() print( ggplot() + annotation_spatial(stars_rgb) + layer_spatial(longlake_depthdf) + labs(caption = "annotation stars rgb") ) print( ggplot() + annotation_spatial(stars_rgb, lazy = TRUE) + layer_spatial(longlake_depthdf) + labs(caption = "annotation stars rgb with lazy=TRUE") ) # graphical tests so... expect_true(TRUE) }) test_that("layer-spatial raster does not throw warnings", { load_longlake_data() # this error doesn't show up unless there's a wrapper around the call # such as in testthat or RMarkdown. this is replicated below # the culprit is raster::projectRaster() # # withCallingHandlers( # print( # ggplot() + # annotation_map_tile(type = "osm", progress = "none") + # layer_spatial(longlake_depthdf) + # coord_sf(crs = 26920) + # labs(caption = "just checking whether it prints without any messages") # ), # warning = function(w) stop(w) # ) expect_silent( print( ggplot() + layer_spatial(longlake_osm) + labs(caption = "just checking whether it prints without any messages") ) ) expect_silent( print( ggplot() + layer_spatial(longlake_depth_raster) + labs(caption = "just checking whether it prints without any messages") ) ) expect_silent( print( ggplot() + annotation_map_tile(type = "osm", progress = "none") + layer_spatial(longlake_depthdf) + coord_sf(crs = 26920) + labs(caption = "just checking whether it prints without any messages") ) ) }) }
x<-1 print(x) x msg <- "hello" y <- 1:20 ##Sequence of number from 1 to 20 assigned to vector y y x <- c(0.5, 0.6) z <- c(TRUE, FALSE) a <- c(T,F) b <- c("a", "b", "c") d <- 9:29 e <- c(1+0i, 2+4i) f <- vector("numeric", length = 10) x f g <- c(1.7, "a") ##character h <- c(TRUE, 2) ##numeric i <- c("A", TRUE) ##character class(y) as.numeric(y) as.character(y) ##will convert all numbers to characters as.logical(y) ## will convert all numbers of y into logical values j <- list(1, "a", TRUE, 1+4i) ##creates a list with 4 different objects print(j) k <- matrix(nrow=2, ncol=3) ##initializes a matrix with 2 rows and 3 columns print(k) dim(k) ##dimension of k attributes(k) ##attributes of k, like dim k <- matrix(1:6, nrow= 2, ncol=3) print(k) l <- 1:10 ##matrices can also be created directly from vector by assigning the dimension attribute after creating the vector dim(l) <- c(2,5) ##2 rows and 5 columns print(l) x <- 1:3 y <- 10:12 cbind(x,y) ##column binding two vectors creates a matrix rbind(x,y) ## row binding two vectors also creates a matrix m <- factor(c("yes", "yes", "no", "yes", "no")) ## factor is a special type of vector, which is used to create, to represent categorical data print(m) table(m) ##gives a frequency count of how man of each levels there are unclass(m) ##strips out the class for a vector, and you can see the representation of the levels in the vector m <- factor(c("yes", "yes", "no", "yes", "no"), levels = c("yes", "no")) ##the order of the levels can be set with the argument levels to factor() print(m) n <- c(1,2,3,NA,10,3) ##the NA value is going to be a numeric missing value is.na(n) ##returns a logical vector showing which values are NA in the n vector n <- c(1, 2, NaN, NA, 4) is.na(n) ##an NAN value is always considered NA so in the logical vector it will show a true value on both NA and NaN values is.nan(n) ##an NA value is not always an NaN value, so in this case, the logical vector will show a false for the NA value o <- data.frame(foo = 1:4, bar = c(T,T,F,F)) ##data frames can store different types of objects, the foo and bar are the names given to the columns created in this dataframe print(o) nrow(o) ##shows the amount of rows that this dataframe contains ncol(o) ##shows the amount of columns that this dataframe contains p <- 1:3 names(p) ##shows the name of the object p, it is null by default because p does not have names names(p) <- c("foo", "bar", "norf") ##gives names to each element of the vector p print(p) names(p) #shows the names of all elements of p q <- list(a=1, b=2, c=3) ##names of elements of the list are a, b and c print(q) r <- matrix(1:4, nrow=2, ncol=2) dimnames(r) <- list(c("a", "b"), c("c", "d")) #dimnames receives a list value, in which the first element is the vector of rownames and the second element is the vector of columnnames print(r)
/how_objects_work_in_R.R
no_license
jazvillagra/datasciencecoursera
R
false
false
2,847
r
x<-1 print(x) x msg <- "hello" y <- 1:20 ##Sequence of number from 1 to 20 assigned to vector y y x <- c(0.5, 0.6) z <- c(TRUE, FALSE) a <- c(T,F) b <- c("a", "b", "c") d <- 9:29 e <- c(1+0i, 2+4i) f <- vector("numeric", length = 10) x f g <- c(1.7, "a") ##character h <- c(TRUE, 2) ##numeric i <- c("A", TRUE) ##character class(y) as.numeric(y) as.character(y) ##will convert all numbers to characters as.logical(y) ## will convert all numbers of y into logical values j <- list(1, "a", TRUE, 1+4i) ##creates a list with 4 different objects print(j) k <- matrix(nrow=2, ncol=3) ##initializes a matrix with 2 rows and 3 columns print(k) dim(k) ##dimension of k attributes(k) ##attributes of k, like dim k <- matrix(1:6, nrow= 2, ncol=3) print(k) l <- 1:10 ##matrices can also be created directly from vector by assigning the dimension attribute after creating the vector dim(l) <- c(2,5) ##2 rows and 5 columns print(l) x <- 1:3 y <- 10:12 cbind(x,y) ##column binding two vectors creates a matrix rbind(x,y) ## row binding two vectors also creates a matrix m <- factor(c("yes", "yes", "no", "yes", "no")) ## factor is a special type of vector, which is used to create, to represent categorical data print(m) table(m) ##gives a frequency count of how man of each levels there are unclass(m) ##strips out the class for a vector, and you can see the representation of the levels in the vector m <- factor(c("yes", "yes", "no", "yes", "no"), levels = c("yes", "no")) ##the order of the levels can be set with the argument levels to factor() print(m) n <- c(1,2,3,NA,10,3) ##the NA value is going to be a numeric missing value is.na(n) ##returns a logical vector showing which values are NA in the n vector n <- c(1, 2, NaN, NA, 4) is.na(n) ##an NAN value is always considered NA so in the logical vector it will show a true value on both NA and NaN values is.nan(n) ##an NA value is not always an NaN value, so in this case, the logical vector will show a false for the NA value o <- data.frame(foo = 1:4, bar = c(T,T,F,F)) ##data frames can store different types of objects, the foo and bar are the names given to the columns created in this dataframe print(o) nrow(o) ##shows the amount of rows that this dataframe contains ncol(o) ##shows the amount of columns that this dataframe contains p <- 1:3 names(p) ##shows the name of the object p, it is null by default because p does not have names names(p) <- c("foo", "bar", "norf") ##gives names to each element of the vector p print(p) names(p) #shows the names of all elements of p q <- list(a=1, b=2, c=3) ##names of elements of the list are a, b and c print(q) r <- matrix(1:4, nrow=2, ncol=2) dimnames(r) <- list(c("a", "b"), c("c", "d")) #dimnames receives a list value, in which the first element is the vector of rownames and the second element is the vector of columnnames print(r)
install.packages("neuralnet") install.packages("nnet") install.packages("caret") library(neuralnet) # for neuralnet(), nn model library(nnet) # for class.ind() library(caret) # for train(), tune parameters library(datasets) str(iris) iris data <- iris # 因為Species是類別型態,這邊轉換成三個output nodes,使用的是class.ind函式() head(class.ind(data$Species)) # 並和原始的資料合併在一起,cbind意即column-bind data <- cbind(data, class.ind(data$Species)) # 原始資料就會變成像這樣 head(data) formula.bpn <- setosa + versicolor + virginica ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width formula.bpn bpn <- neuralnet(formula = formula.bpn, data = data, hidden = c(2), # 一個隱藏層:2個node learningrate = 0.01, # learning rate threshold = 0.01, # partial derivatives of the error function, a stopping criteria stepmax = 5e5 # 最大的ieration數 = 500000(5*10^5) ) plot(bpn) #Tuning Parameters # nrow()是用來擷取資料筆數,乘上0.8後,表示我們的train set裡面要有多少筆資料(data size) smp.size <- floor(0.8*nrow(data)) # 因為是抽樣,有可能每次抽樣結果都不一樣,因此這裡規定好亂數表,讓每次抽樣的結果一樣 set.seed(100) # 從原始資料裡面,抽出train set所需要的資料筆數(data size) train.ind <- sample(seq_len(nrow(data)), smp.size) # 分成train/test train <- data[train.ind, ] test <- data[-train.ind, ] # tune parameters model <- train(form=formula.bpn, # formula data=train, # 資料 method="neuralnet", # 類神經網路(bpn) # 最重要的步驟:觀察不同排列組合(第一層1~4個nodes ; 第二層0~4個nodes) # 看何種排列組合(多少隱藏層、每層多少個node),會有最小的RMSE tuneGrid = expand.grid(.layer1=c(1:4), .layer2=c(0:4), .layer3=c(0)), # 以下的參數設定,和上面的neuralnet內一樣 learningrate = 0.01, # learning rate threshold = 0.01, # partial derivatives of the error function, a stopping criteria stepmax = 5e5 # 最大的ieration數 = 500000(5*10^5) ) # 計算出最佳參數:The final values used for the model were layer1 = 1, layer2 = 2 and layer3 = 0. model plot(model) bpn <- neuralnet(formula = formula.bpn, data = train, hidden = c(1,2), # The final values used for the model were layer1 = 1, layer2 = 2 and layer3 = 0. learningrate = 0.01, # learning rate threshold = 0.01, # partial derivatives of the error function, a stopping criteria stepmax = 5e5 # 最大的ieration數 = 500000(5*10^5) ) # 顯示經過參數評估之類神經網路 plot(bpn) #Make Predictions # 取前四個欄位,進行預測 pred <- compute(bpn, test[, 1:4]) # 預測結果 pred$net.result # 四捨五入後,變成0/1的狀態 pred.result <- round(pred$net.result) pred.result pred.result <- as.data.frame(pred.result) # 建立一個新欄位,叫做Species pred.result$Species <- "" # 把預測結果轉回Species的型態 for(i in 1:nrow(pred.result)){ if(pred.result[i, 1]==1){ pred.result[i, "Species"] <- "setosa"} if(pred.result[i, 2]==1){ pred.result[i, "Species"] <- "versicolor"} if(pred.result[i, 3]==1){ pred.result[i, "Species"] <- "virginica"} } pred.result # 混淆矩陣 (預測率有96.67%) table(real = test$Species, predict = pred.result$Species)
/ML2Network.R
no_license
chaoneng/ML2DB
R
false
false
3,781
r
install.packages("neuralnet") install.packages("nnet") install.packages("caret") library(neuralnet) # for neuralnet(), nn model library(nnet) # for class.ind() library(caret) # for train(), tune parameters library(datasets) str(iris) iris data <- iris # 因為Species是類別型態,這邊轉換成三個output nodes,使用的是class.ind函式() head(class.ind(data$Species)) # 並和原始的資料合併在一起,cbind意即column-bind data <- cbind(data, class.ind(data$Species)) # 原始資料就會變成像這樣 head(data) formula.bpn <- setosa + versicolor + virginica ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width formula.bpn bpn <- neuralnet(formula = formula.bpn, data = data, hidden = c(2), # 一個隱藏層:2個node learningrate = 0.01, # learning rate threshold = 0.01, # partial derivatives of the error function, a stopping criteria stepmax = 5e5 # 最大的ieration數 = 500000(5*10^5) ) plot(bpn) #Tuning Parameters # nrow()是用來擷取資料筆數,乘上0.8後,表示我們的train set裡面要有多少筆資料(data size) smp.size <- floor(0.8*nrow(data)) # 因為是抽樣,有可能每次抽樣結果都不一樣,因此這裡規定好亂數表,讓每次抽樣的結果一樣 set.seed(100) # 從原始資料裡面,抽出train set所需要的資料筆數(data size) train.ind <- sample(seq_len(nrow(data)), smp.size) # 分成train/test train <- data[train.ind, ] test <- data[-train.ind, ] # tune parameters model <- train(form=formula.bpn, # formula data=train, # 資料 method="neuralnet", # 類神經網路(bpn) # 最重要的步驟:觀察不同排列組合(第一層1~4個nodes ; 第二層0~4個nodes) # 看何種排列組合(多少隱藏層、每層多少個node),會有最小的RMSE tuneGrid = expand.grid(.layer1=c(1:4), .layer2=c(0:4), .layer3=c(0)), # 以下的參數設定,和上面的neuralnet內一樣 learningrate = 0.01, # learning rate threshold = 0.01, # partial derivatives of the error function, a stopping criteria stepmax = 5e5 # 最大的ieration數 = 500000(5*10^5) ) # 計算出最佳參數:The final values used for the model were layer1 = 1, layer2 = 2 and layer3 = 0. model plot(model) bpn <- neuralnet(formula = formula.bpn, data = train, hidden = c(1,2), # The final values used for the model were layer1 = 1, layer2 = 2 and layer3 = 0. learningrate = 0.01, # learning rate threshold = 0.01, # partial derivatives of the error function, a stopping criteria stepmax = 5e5 # 最大的ieration數 = 500000(5*10^5) ) # 顯示經過參數評估之類神經網路 plot(bpn) #Make Predictions # 取前四個欄位,進行預測 pred <- compute(bpn, test[, 1:4]) # 預測結果 pred$net.result # 四捨五入後,變成0/1的狀態 pred.result <- round(pred$net.result) pred.result pred.result <- as.data.frame(pred.result) # 建立一個新欄位,叫做Species pred.result$Species <- "" # 把預測結果轉回Species的型態 for(i in 1:nrow(pred.result)){ if(pred.result[i, 1]==1){ pred.result[i, "Species"] <- "setosa"} if(pred.result[i, 2]==1){ pred.result[i, "Species"] <- "versicolor"} if(pred.result[i, 3]==1){ pred.result[i, "Species"] <- "virginica"} } pred.result # 混淆矩陣 (預測率有96.67%) table(real = test$Species, predict = pred.result$Species)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/transform_to_std_norm.R \name{transform_to_std_norm} \alias{transform_to_std_norm} \title{Returns the exact transformation of a chi-square random variable to the standard normal random variable.} \usage{ transform_to_std_norm(chi_square_rv, df) } \description{ Returns the exact transformation of a chi-square random variable to the standard normal random variable. }
/man/transform_to_std_norm.Rd
permissive
rasel-biswas/miLRT
R
false
true
446
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/transform_to_std_norm.R \name{transform_to_std_norm} \alias{transform_to_std_norm} \title{Returns the exact transformation of a chi-square random variable to the standard normal random variable.} \usage{ transform_to_std_norm(chi_square_rv, df) } \description{ Returns the exact transformation of a chi-square random variable to the standard normal random variable. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/variants.R \name{variantsToGRanges} \alias{variantsToGRanges} \title{Convert variants to GRanges.} \usage{ variantsToGRanges(variants, oneBasedCoord = TRUE, slStyle = "UCSC") } \arguments{ \item{variants}{A list of R objects corresponding to the JSON objects returned by the GA4GH Variants API.} \item{oneBasedCoord}{Convert genomic positions to 1-based coordinates.} \item{slStyle}{The style for seqnames (chrN or N or...). Default is UCSC.} } \value{ \link[GenomicRanges]{GRanges} } \description{ Note that the Global Alliance for Genomics and Health API uses a 0-based coordinate system. For more detail, please see GA4GH discussions such as the following: \itemize{ \item\url{https://github.com/ga4gh/schemas/issues/168} \item\url{https://github.com/ga4gh/schemas/issues/121} } } \examples{ variants1 <- searchVariants(converter = variantsToGRanges) summary(variants1) variants2 <- variantsToGRanges(searchVariants()) print(identical(variants1, variants2)) } \seealso{ Other variants converter functions: \code{\link{variantsToVRanges}} }
/man/variantsToGRanges.Rd
permissive
adamstruck/ga4ghr
R
false
true
1,132
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/variants.R \name{variantsToGRanges} \alias{variantsToGRanges} \title{Convert variants to GRanges.} \usage{ variantsToGRanges(variants, oneBasedCoord = TRUE, slStyle = "UCSC") } \arguments{ \item{variants}{A list of R objects corresponding to the JSON objects returned by the GA4GH Variants API.} \item{oneBasedCoord}{Convert genomic positions to 1-based coordinates.} \item{slStyle}{The style for seqnames (chrN or N or...). Default is UCSC.} } \value{ \link[GenomicRanges]{GRanges} } \description{ Note that the Global Alliance for Genomics and Health API uses a 0-based coordinate system. For more detail, please see GA4GH discussions such as the following: \itemize{ \item\url{https://github.com/ga4gh/schemas/issues/168} \item\url{https://github.com/ga4gh/schemas/issues/121} } } \examples{ variants1 <- searchVariants(converter = variantsToGRanges) summary(variants1) variants2 <- variantsToGRanges(searchVariants()) print(identical(variants1, variants2)) } \seealso{ Other variants converter functions: \code{\link{variantsToVRanges}} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_effort.R \name{calc_fishing_days_trip} \alias{calc_fishing_days_trip} \title{Calculate fishing days for a fishing trip.} \usage{ calc_fishing_days_trip(trip, .int = FALSE) } \arguments{ \item{trip}{Data.frame of the trip data} } \value{ A data.frame with the fishing days by gear, fishing area, economic zone and rectangle. } \description{ Calculate fishing days for a single fishing trip using data in the format described in the package vignette \emph{checking_data}. } \details{ The input is a single fishing trip. The format of the data should be checked by \code{\link{check_format}} before calling this function (see the package vignette \emph{checking_data} for more details). Fishing days is reported at the gear (type and mesh size), fishing area, economic zone and rectangle level. Passive and active gears are treated separately. For active gears, each fishing date has 1 fishing day that is spread equally over the active gears. For passive gears, each use of a passive gear is one fishing day, i.e. on fishing date can have several passive fishing days simultaneously. See the vignette \emph{calculating_fishing_effort} for more details. This function is called by \code{\link{calc_fishing_effort}}. } \examples{ trip1 <- data.frame( eunr_id = "my_boat", loa = 2000, gt = 70, kw = 400, trip_id = "trip1", # 4 day trip depdate = "20140718", deptime = "0615", retdate = "20140721", rettime = "1615", # Only fish on 2 of those fishdate = c("20140719", "20140719", "20140719", "20140719", "20140720", "20140720", "20140720"), gear = c("OTB","OTB","OTB","GN","OTB","GN","FPO"), gear_mesh_size = c(80,80,80,50,80,50,0), fishing_area = "27.4.B", economic_zone = "EU", rectangle = c("39F0","40F0","41F0","41F0","41F0","41F0","41F0"), stringsAsFactors = FALSE ) fd <- calc_fishing_days_trip(trip1) } \seealso{ See \code{\link{calc_fishing_effort}}. See the package vignette \emph{checking_data} for data preparation and the vignette \emph{calculating_fishing_effort} for the calculation details. }
/man/calc_fishing_days_trip.Rd
no_license
ChrKo1/fecR2.0
R
false
true
2,126
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_effort.R \name{calc_fishing_days_trip} \alias{calc_fishing_days_trip} \title{Calculate fishing days for a fishing trip.} \usage{ calc_fishing_days_trip(trip, .int = FALSE) } \arguments{ \item{trip}{Data.frame of the trip data} } \value{ A data.frame with the fishing days by gear, fishing area, economic zone and rectangle. } \description{ Calculate fishing days for a single fishing trip using data in the format described in the package vignette \emph{checking_data}. } \details{ The input is a single fishing trip. The format of the data should be checked by \code{\link{check_format}} before calling this function (see the package vignette \emph{checking_data} for more details). Fishing days is reported at the gear (type and mesh size), fishing area, economic zone and rectangle level. Passive and active gears are treated separately. For active gears, each fishing date has 1 fishing day that is spread equally over the active gears. For passive gears, each use of a passive gear is one fishing day, i.e. on fishing date can have several passive fishing days simultaneously. See the vignette \emph{calculating_fishing_effort} for more details. This function is called by \code{\link{calc_fishing_effort}}. } \examples{ trip1 <- data.frame( eunr_id = "my_boat", loa = 2000, gt = 70, kw = 400, trip_id = "trip1", # 4 day trip depdate = "20140718", deptime = "0615", retdate = "20140721", rettime = "1615", # Only fish on 2 of those fishdate = c("20140719", "20140719", "20140719", "20140719", "20140720", "20140720", "20140720"), gear = c("OTB","OTB","OTB","GN","OTB","GN","FPO"), gear_mesh_size = c(80,80,80,50,80,50,0), fishing_area = "27.4.B", economic_zone = "EU", rectangle = c("39F0","40F0","41F0","41F0","41F0","41F0","41F0"), stringsAsFactors = FALSE ) fd <- calc_fishing_days_trip(trip1) } \seealso{ See \code{\link{calc_fishing_effort}}. See the package vignette \emph{checking_data} for data preparation and the vignette \emph{calculating_fishing_effort} for the calculation details. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/EP_Name_Search.R \name{EP_Name_Search} \alias{EP_Name_Search} \title{Name Search Scrapper} \usage{ EP_Name_Search(Name, include_db = F) } \arguments{ \item{Name}{A string of the desired player's name} \item{include_db}{Boolean to determine if you also want to return birthdate information alongside the links} } \value{ A vector of player eliteprospects URLs with the same name } \description{ Returns a list of URLs of the players from a league stats URL. }
/man/EP_Name_Search.Rd
no_license
palmerimatthew/EPScraper
R
false
true
538
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/EP_Name_Search.R \name{EP_Name_Search} \alias{EP_Name_Search} \title{Name Search Scrapper} \usage{ EP_Name_Search(Name, include_db = F) } \arguments{ \item{Name}{A string of the desired player's name} \item{include_db}{Boolean to determine if you also want to return birthdate information alongside the links} } \value{ A vector of player eliteprospects URLs with the same name } \description{ Returns a list of URLs of the players from a league stats URL. }
rm(list = ls()) # code by Patrick Thompson # April 2021 # patrick.thompson@dfo-mpo.gc.ca #load packages##### library(BASMasterSample) library(rgdal) library(sf) library(tidyverse) library(data.table) library(raster) library(cowplot) library(ggspatial) source('./code/functions/HSS.r') #change plot theme#### source("./code/functions/plot_theme.R") theme_set(plt_theme) #load research area#### research_area <- readOGR("./spatial_data/calvert_research_area.gpkg") research_area <- st_as_sf(research_area, wtk = geometry) area_plot <- ggplot()+ geom_sf(data = research_area, fill = NA) #read in habitat lines habitat_line_features <- readOGR("./spatial_data/habitat_line_features.gpkg") habitat_line_features <- st_as_sf(habitat_line_features) #read in habitat polygons habitat_polygons <- readOGR("./spatial_data/habitat_line_polyRug_legbuff.gpkg") habitat_polygons <- st_as_sf(habitat_polygons) plot(habitat_polygons) #read in legacy sites legacy <- readOGR("./spatial_data/hakai_legacy_sites.gpkg") legacy <- st_as_sf(legacy) legacy$type <- "legacy" #function to produce HIP BAS for a habitat #habitats is the habitat that you are selecting sites for #priority habitats are habitats that would be preferentially sampled in a given halton box #this excludes any halton box with those habitats from consideration so that only boxes without priority habitats are selected habitat_HIP <- function(habitats, samples, priority_habitats = NA){ #create Halton boxes##### #get bounding box for BC # MARINE MS, need to add the seed to it for it to work. bb <- getBB() attr(bb, "seed") <- getSeed() all_boxes <- point2Frame(pts = habitat_polygons %>% filter(habitat %in% habitats), bb = bb, size = 100) if(!is.na(priority_habitats)){ all_boxes_priority <- point2Frame(pts = habitat_polygons %>% filter(habitat %in% priority_habitats), bb = bb, size = 100) all_boxes <- all_boxes %>% filter(!(HaltonIndex %in% all_boxes_priority$HaltonIndex)) } coords <- st_coordinates(st_centroid(all_boxes)) bb.tmp <- st_bbox(bb) bbox <- cbind(c(bb.tmp['xmin'], bb.tmp['ymin']), c(bb.tmp['xmax'], bb.tmp['ymax'])) HSS.pts <- getHipSample(X = coords[,1], Y = coords[,2], index = all_boxes$HaltonIndex, N = samples*2, bb = bbox, base = c(2,3), quiet = TRUE, Ps1 = 0:1, Ps2 = 0:2, hipS1 = 0:1, hipS2 = c(0,2,1)) # Changed the order for fun n.boxes <- length(table(HSS.pts$HIPIndex)) # Chooses a sample from all boxes. Small wrapper function. pts.new <- getSamples(HSS.pts, n = samples) # Bounding box to clip the HIP boxes to. bb.tmp <- st_bbox(research_area) bbox2 <- cbind(c(bb.tmp['xmin'], bb.tmp['ymin']), c(bb.tmp['xmax'], bb.tmp['ymax'])) HIPBoxes <- st_as_sf(getHIPBoxes(hip = HSS.pts, bb = bbox2, n = n.boxes, base = c(2,3))) HIPBoxes <- st_set_crs(HIPBoxes, value = st_crs(bb)) plot(HIPBoxes) selected_boxes <- all_boxes %>% filter(HaltonIndex %in% pts.new$index) selected_boxes_points <- st_centroid(selected_boxes) area_plot+ geom_sf(data = habitat_line_features, size = 0.3) + geom_sf(data = selected_boxes_points, color = 2, size = 3) selected_boxes <- st_intersection(habitat_polygons %>% filter(habitat %in% habitats), selected_boxes) return(selected_boxes) } bb <- getBB() attr(bb, "seed") <- getSeed() #get samples per habitat#### unclassified_sites <- habitat_HIP(habitats = "unclassified", samples = 30, priority_habitats = c("giant_kelp", "bull_kelp", "seagrass")) #deeper_sites <- masterSample(shp = habitat_polygons %>% filter(habitat %in% c("low_rugosity", "high_rugosity")), N = c("low_rugosity" = 20, "high_rugosity" = 40), stratum = "habitat", bb = bb) #deeper_sites$habitat <- c(rep("low_rugosity", 20), rep("high_rugosity", 40)) #these next two are if you want to use halton boxes for the low and high rugosity sites low_rugosity_sites <- habitat_HIP(habitats = "low_rugosity", samples = 20, priority_habitats = c("giant_kelp", "bull_kelp", "seagrass", "unclassified", "high_rugosity")) high_rugosity_sites <- habitat_HIP(habitats = "high_rugosity", samples = 40, priority_habitats = c("giant_kelp", "bull_kelp", "seagrass", "unclassified")) habitat_sites <- habitat_HIP(habitats = c("giant_kelp", "bull_kelp", "seagrass"), samples = 68) HIP_sites <- bind_rows(unclassified_sites, habitat_sites, high_rugosity_sites, low_rugosity_sites) %>% #add low and high rugosity sites here if you want to use the halton box method for them group_by(HaltonIndex) %>% mutate(count = n()) %>% arrange(desc(count), HaltonIndex) selected_HIP <- st_centroid(HIP_sites) selected_HIP$type <- "HIP" #t1 <- selected_HIP %>% # group_by(HaltonIndex) %>% # summarise_each(funs(list(unique(habitat)))) box_count <- HIP_sites %>% as.data.frame() %>% dplyr::select(HaltonIndex, count) %>% unique() table(box_count$count) table(all_sites$habitat) #note this includes overlapping sites for classified habitat so this number will be lower in reality all_sites <- bind_rows(selected_HIP, legacy) area_plot+ geom_sf(data = habitat_line_features, size = 0.3) + geom_sf(data = all_sites, aes(fill = factor(count)), size = 3, pch = 21)+ scale_fill_brewer(palette = "Set1", name = "habitats\nin box")+ facet_wrap(~habitat) ggsave("./figures/BAS_general_1.pdf", height = 8, width = 12) area_plot+ geom_sf(data = habitat_line_features, size = 0.3) + geom_sf(data = all_sites, aes(fill = factor(count)), size = 3, pch = 21)+ scale_fill_brewer(palette = "Set1", name = "habitats\nin box") ggsave("./figures/BAS_general_all.pdf", height = 8, width = 12)
/code/generate_HIP_Hakai_eDNA_BMM.R
no_license
plthompson/Hakai_eDNA_BAS
R
false
false
5,622
r
rm(list = ls()) # code by Patrick Thompson # April 2021 # patrick.thompson@dfo-mpo.gc.ca #load packages##### library(BASMasterSample) library(rgdal) library(sf) library(tidyverse) library(data.table) library(raster) library(cowplot) library(ggspatial) source('./code/functions/HSS.r') #change plot theme#### source("./code/functions/plot_theme.R") theme_set(plt_theme) #load research area#### research_area <- readOGR("./spatial_data/calvert_research_area.gpkg") research_area <- st_as_sf(research_area, wtk = geometry) area_plot <- ggplot()+ geom_sf(data = research_area, fill = NA) #read in habitat lines habitat_line_features <- readOGR("./spatial_data/habitat_line_features.gpkg") habitat_line_features <- st_as_sf(habitat_line_features) #read in habitat polygons habitat_polygons <- readOGR("./spatial_data/habitat_line_polyRug_legbuff.gpkg") habitat_polygons <- st_as_sf(habitat_polygons) plot(habitat_polygons) #read in legacy sites legacy <- readOGR("./spatial_data/hakai_legacy_sites.gpkg") legacy <- st_as_sf(legacy) legacy$type <- "legacy" #function to produce HIP BAS for a habitat #habitats is the habitat that you are selecting sites for #priority habitats are habitats that would be preferentially sampled in a given halton box #this excludes any halton box with those habitats from consideration so that only boxes without priority habitats are selected habitat_HIP <- function(habitats, samples, priority_habitats = NA){ #create Halton boxes##### #get bounding box for BC # MARINE MS, need to add the seed to it for it to work. bb <- getBB() attr(bb, "seed") <- getSeed() all_boxes <- point2Frame(pts = habitat_polygons %>% filter(habitat %in% habitats), bb = bb, size = 100) if(!is.na(priority_habitats)){ all_boxes_priority <- point2Frame(pts = habitat_polygons %>% filter(habitat %in% priority_habitats), bb = bb, size = 100) all_boxes <- all_boxes %>% filter(!(HaltonIndex %in% all_boxes_priority$HaltonIndex)) } coords <- st_coordinates(st_centroid(all_boxes)) bb.tmp <- st_bbox(bb) bbox <- cbind(c(bb.tmp['xmin'], bb.tmp['ymin']), c(bb.tmp['xmax'], bb.tmp['ymax'])) HSS.pts <- getHipSample(X = coords[,1], Y = coords[,2], index = all_boxes$HaltonIndex, N = samples*2, bb = bbox, base = c(2,3), quiet = TRUE, Ps1 = 0:1, Ps2 = 0:2, hipS1 = 0:1, hipS2 = c(0,2,1)) # Changed the order for fun n.boxes <- length(table(HSS.pts$HIPIndex)) # Chooses a sample from all boxes. Small wrapper function. pts.new <- getSamples(HSS.pts, n = samples) # Bounding box to clip the HIP boxes to. bb.tmp <- st_bbox(research_area) bbox2 <- cbind(c(bb.tmp['xmin'], bb.tmp['ymin']), c(bb.tmp['xmax'], bb.tmp['ymax'])) HIPBoxes <- st_as_sf(getHIPBoxes(hip = HSS.pts, bb = bbox2, n = n.boxes, base = c(2,3))) HIPBoxes <- st_set_crs(HIPBoxes, value = st_crs(bb)) plot(HIPBoxes) selected_boxes <- all_boxes %>% filter(HaltonIndex %in% pts.new$index) selected_boxes_points <- st_centroid(selected_boxes) area_plot+ geom_sf(data = habitat_line_features, size = 0.3) + geom_sf(data = selected_boxes_points, color = 2, size = 3) selected_boxes <- st_intersection(habitat_polygons %>% filter(habitat %in% habitats), selected_boxes) return(selected_boxes) } bb <- getBB() attr(bb, "seed") <- getSeed() #get samples per habitat#### unclassified_sites <- habitat_HIP(habitats = "unclassified", samples = 30, priority_habitats = c("giant_kelp", "bull_kelp", "seagrass")) #deeper_sites <- masterSample(shp = habitat_polygons %>% filter(habitat %in% c("low_rugosity", "high_rugosity")), N = c("low_rugosity" = 20, "high_rugosity" = 40), stratum = "habitat", bb = bb) #deeper_sites$habitat <- c(rep("low_rugosity", 20), rep("high_rugosity", 40)) #these next two are if you want to use halton boxes for the low and high rugosity sites low_rugosity_sites <- habitat_HIP(habitats = "low_rugosity", samples = 20, priority_habitats = c("giant_kelp", "bull_kelp", "seagrass", "unclassified", "high_rugosity")) high_rugosity_sites <- habitat_HIP(habitats = "high_rugosity", samples = 40, priority_habitats = c("giant_kelp", "bull_kelp", "seagrass", "unclassified")) habitat_sites <- habitat_HIP(habitats = c("giant_kelp", "bull_kelp", "seagrass"), samples = 68) HIP_sites <- bind_rows(unclassified_sites, habitat_sites, high_rugosity_sites, low_rugosity_sites) %>% #add low and high rugosity sites here if you want to use the halton box method for them group_by(HaltonIndex) %>% mutate(count = n()) %>% arrange(desc(count), HaltonIndex) selected_HIP <- st_centroid(HIP_sites) selected_HIP$type <- "HIP" #t1 <- selected_HIP %>% # group_by(HaltonIndex) %>% # summarise_each(funs(list(unique(habitat)))) box_count <- HIP_sites %>% as.data.frame() %>% dplyr::select(HaltonIndex, count) %>% unique() table(box_count$count) table(all_sites$habitat) #note this includes overlapping sites for classified habitat so this number will be lower in reality all_sites <- bind_rows(selected_HIP, legacy) area_plot+ geom_sf(data = habitat_line_features, size = 0.3) + geom_sf(data = all_sites, aes(fill = factor(count)), size = 3, pch = 21)+ scale_fill_brewer(palette = "Set1", name = "habitats\nin box")+ facet_wrap(~habitat) ggsave("./figures/BAS_general_1.pdf", height = 8, width = 12) area_plot+ geom_sf(data = habitat_line_features, size = 0.3) + geom_sf(data = all_sites, aes(fill = factor(count)), size = 3, pch = 21)+ scale_fill_brewer(palette = "Set1", name = "habitats\nin box") ggsave("./figures/BAS_general_all.pdf", height = 8, width = 12)
% Auto-generated: do not edit by hand \name{''MessageLog} \alias{''MessageLog} \title{MessageLog component} \description{ } \usage{ ''MessageLog(id=NULL, log=NULL, trigServLogUpdate=NULL) } \arguments{ \item{id}{Character. } \item{log}{List of characters. } \item{trigServLogUpdate}{Logical. } } \value{named list of JSON elements corresponding to React.js properties and their values}
/man/''MessageLog.Rd
no_license
CodingJinxx/DashServerStatePattern
R
false
false
396
rd
% Auto-generated: do not edit by hand \name{''MessageLog} \alias{''MessageLog} \title{MessageLog component} \description{ } \usage{ ''MessageLog(id=NULL, log=NULL, trigServLogUpdate=NULL) } \arguments{ \item{id}{Character. } \item{log}{List of characters. } \item{trigServLogUpdate}{Logical. } } \value{named list of JSON elements corresponding to React.js properties and their values}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary_methods.R \name{summary} \alias{summary} \alias{summary.onephase} \alias{summary.twophase} \alias{summary.threephase} \title{Summarizing Global and Small-Area Estimation Results} \usage{ \method{summary}{onephase}(object, coefs = FALSE, ...) \method{summary}{twophase}(object, coefs = FALSE, ...) \method{summary}{threephase}(object, coefs = FALSE, ...) } \arguments{ \item{object}{object of class \code{onephase}, \code{twophase} or \code{threephase}, containing estimation results of the respective estimation method.} \item{coefs}{of type "\code{\link[base]{logical}}". If set to \code{TRUE}, also gives the regression coefficients of \code{\link{twophase}} and \code{\link{threephase}} estimations. Defaults to \code{FALSE}.} \item{...}{additional arguments, so far ignored.} } \description{ Summarizing Global and Small-Area Estimation Results }
/man/summary.Rd
no_license
cran/forestinventory
R
false
true
969
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary_methods.R \name{summary} \alias{summary} \alias{summary.onephase} \alias{summary.twophase} \alias{summary.threephase} \title{Summarizing Global and Small-Area Estimation Results} \usage{ \method{summary}{onephase}(object, coefs = FALSE, ...) \method{summary}{twophase}(object, coefs = FALSE, ...) \method{summary}{threephase}(object, coefs = FALSE, ...) } \arguments{ \item{object}{object of class \code{onephase}, \code{twophase} or \code{threephase}, containing estimation results of the respective estimation method.} \item{coefs}{of type "\code{\link[base]{logical}}". If set to \code{TRUE}, also gives the regression coefficients of \code{\link{twophase}} and \code{\link{threephase}} estimations. Defaults to \code{FALSE}.} \item{...}{additional arguments, so far ignored.} } \description{ Summarizing Global and Small-Area Estimation Results }
# load library, data, and create dependencies library(qtl2) iron <- read_cross2( system.file("extdata", "iron.zip", package="qtl2") ) map <- insert_pseudomarkers(map=iron$gmap, step=1) pr <- calc_genoprob(cross=iron, map=map, error_prob=0.002) # chromosome 2 coefficients - additive and dominant effects c2effB_pg <- scan1coef(pr[,"2"], iron$pheno[,"liver"], kinship_loco[["2"]], contrasts=cbind(mu=c(1,1,1), a=c(-1, 0, 1), d=c(-0.5, 1, -0.5))) # output figure png(filename = "./fig/chr2_effects_pg_add_dom.png") par(mar=c(4.1, 4.1, 1.1, 2.6), las=1) plot(c2effB_pg, map["2"], columns=2:3, col=col) last_coef <- unclass(c2effB_pg)[nrow(c2effB_pg),2:3] for(i in seq(along=last_coef)) axis(side=4, at=last_coef[i], names(last_coef)[i], tick=FALSE, col.axis=col[i]) dev.off()
/code/chr2_effects_pg_add_dom.R
permissive
smcclatchy/mapping
R
false
false
799
r
# load library, data, and create dependencies library(qtl2) iron <- read_cross2( system.file("extdata", "iron.zip", package="qtl2") ) map <- insert_pseudomarkers(map=iron$gmap, step=1) pr <- calc_genoprob(cross=iron, map=map, error_prob=0.002) # chromosome 2 coefficients - additive and dominant effects c2effB_pg <- scan1coef(pr[,"2"], iron$pheno[,"liver"], kinship_loco[["2"]], contrasts=cbind(mu=c(1,1,1), a=c(-1, 0, 1), d=c(-0.5, 1, -0.5))) # output figure png(filename = "./fig/chr2_effects_pg_add_dom.png") par(mar=c(4.1, 4.1, 1.1, 2.6), las=1) plot(c2effB_pg, map["2"], columns=2:3, col=col) last_coef <- unclass(c2effB_pg)[nrow(c2effB_pg),2:3] for(i in seq(along=last_coef)) axis(side=4, at=last_coef[i], names(last_coef)[i], tick=FALSE, col.axis=col[i]) dev.off()
# @file SimpleAbxBetterChoice.R # # Copyright 2018 Observational Health Data Sciences and Informatics # # This file is part of SimpleAbxBetterChoice # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #' SimpleAbxBetterChoice #' #' @docType package #' @name SimpleAbxBetterChoice #' @import DatabaseConnector #' @importFrom magrittr %>% NULL
/SimpleAbxBetterChoice_IP/R/AbxBetterChoice.R
no_license
ABMI/AbxBetterChoice
R
false
false
839
r
# @file SimpleAbxBetterChoice.R # # Copyright 2018 Observational Health Data Sciences and Informatics # # This file is part of SimpleAbxBetterChoice # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #' SimpleAbxBetterChoice #' #' @docType package #' @name SimpleAbxBetterChoice #' @import DatabaseConnector #' @importFrom magrittr %>% NULL
combine_animation_frames <- function(gif_file, animation_cfg, task_names=NULL, intro_config, n_outro) { # run imageMagick convert to build a gif if(is.null(task_names)) task_names <- '*' png_files <- paste(sprintf('6_visualize/tmp/gif_frame_%s.png', task_names), collapse=' ') tmp_dir <- './6_visualize/tmp/magick' if(!dir.exists(tmp_dir)) dir.create(tmp_dir) magick_command <- sprintf( 'convert -define registry:temporary-path=%s -limit memory 24GiB -delay %d -loop 0 %s %s', tmp_dir, animation_cfg$frame_delay_cs, png_files, gif_file) if(Sys.info()[['sysname']] == "Windows") { magick_command <- sprintf('magick %s', magick_command) } system(magick_command) # simplify the gif with gifsicle - cuts size by about 2/3 stopifnot(task_names == '*') png_dir <- dirname(png_files) png_patt <- basename(png_files) %>% tools::file_path_sans_ext() total_frames <- grepl(dir(png_dir), pattern = png_patt) %>% sum() # how many intro frames? how many storm frames? how many outro frames? intro_delay <- intro_config$frame_delay_cs storm_delay <- animation_cfg$frame_delay_cs outro_delay <- 200 final_delay <- 700 freeze_delay <- 150 # **trash code for now: calc_delays <- function(delay, start_frame, end_frame){ paste(paste(sprintf('-d%s "#', delay), seq(start_frame-1, end_frame-1), sep = '') %>% paste('"', sep = ''), collapse = " ") } intro_delays <- calc_delays(intro_delay, 1, intro_config$n_frames) storm_delays <- calc_delays(storm_delay, intro_config$n_frames+1, total_frames-n_outro-1) # freeze the last storm frame too for as long as we are showing each outro frame: last_storm_delay <- calc_delays(freeze_delay, total_frames-n_outro, total_frames-n_outro) outro_delays <- calc_delays(outro_delay, total_frames-n_outro+1, total_frames-1) final_delay <- calc_delays(final_delay, total_frames, total_frames) gifsicle_command <- sprintf('gifsicle -b -O3 %s %s %s %s %s %s --colors 256', gif_file, intro_delays, storm_delays, last_storm_delay, outro_delays, final_delay) system(gifsicle_command) }
/6_visualize/src/combine_animation_frames.R
no_license
jzwart/vizstorm-GIF
R
false
false
2,089
r
combine_animation_frames <- function(gif_file, animation_cfg, task_names=NULL, intro_config, n_outro) { # run imageMagick convert to build a gif if(is.null(task_names)) task_names <- '*' png_files <- paste(sprintf('6_visualize/tmp/gif_frame_%s.png', task_names), collapse=' ') tmp_dir <- './6_visualize/tmp/magick' if(!dir.exists(tmp_dir)) dir.create(tmp_dir) magick_command <- sprintf( 'convert -define registry:temporary-path=%s -limit memory 24GiB -delay %d -loop 0 %s %s', tmp_dir, animation_cfg$frame_delay_cs, png_files, gif_file) if(Sys.info()[['sysname']] == "Windows") { magick_command <- sprintf('magick %s', magick_command) } system(magick_command) # simplify the gif with gifsicle - cuts size by about 2/3 stopifnot(task_names == '*') png_dir <- dirname(png_files) png_patt <- basename(png_files) %>% tools::file_path_sans_ext() total_frames <- grepl(dir(png_dir), pattern = png_patt) %>% sum() # how many intro frames? how many storm frames? how many outro frames? intro_delay <- intro_config$frame_delay_cs storm_delay <- animation_cfg$frame_delay_cs outro_delay <- 200 final_delay <- 700 freeze_delay <- 150 # **trash code for now: calc_delays <- function(delay, start_frame, end_frame){ paste(paste(sprintf('-d%s "#', delay), seq(start_frame-1, end_frame-1), sep = '') %>% paste('"', sep = ''), collapse = " ") } intro_delays <- calc_delays(intro_delay, 1, intro_config$n_frames) storm_delays <- calc_delays(storm_delay, intro_config$n_frames+1, total_frames-n_outro-1) # freeze the last storm frame too for as long as we are showing each outro frame: last_storm_delay <- calc_delays(freeze_delay, total_frames-n_outro, total_frames-n_outro) outro_delays <- calc_delays(outro_delay, total_frames-n_outro+1, total_frames-1) final_delay <- calc_delays(final_delay, total_frames, total_frames) gifsicle_command <- sprintf('gifsicle -b -O3 %s %s %s %s %s %s --colors 256', gif_file, intro_delays, storm_delays, last_storm_delay, outro_delays, final_delay) system(gifsicle_command) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SingleSimFit.R \name{summary.SingleSimFit} \alias{summary.SingleSimFit} \title{summary.SingleSimFit} \arguments{ \item{model.summary}{The model summary from the fit} \item{treatment.effect}{The estimate of treatment effect from the model fit} \item{CI.limit}{The confidence interval limit (by default 0.95), call \code{summary(object,CI.limit=x)} to use CI of \code{x} instead.} \item{CI}{The confidence interval of the treatment effect} \item{se}{Estimate for the standard error of (log) treatment effect} \item{dispersion}{Estimate for the dispersion parameter or numeric(0) if Poisson/quasi-Poisson model used} \item{rate.estimate}{Estimate of the event rates from the model a vector c(control arm, treatment arm)} \item{pval}{The p value directly from the model fit (this is for the single model fit only, i.e. not using Rubin's formula)} \item{datastatus}{The status of SingleSim object to which the fit was applied} \item{df}{The number of degrees of freedom of the model} \item{dropout}{The number of dropouts of each arm} \item{number.subjects}{The number of subjects in each arm} } \description{ The summary object for a \code{SingleSimFit} object } \details{ A \code{print.summary.SingleSimFit} method has been implemented } \examples{ sim <- SimulateComplete(study.time=365,number.subjects=50, event.rates=c(0.01,0.005),dispersions=0.25) fit <- Simfit(sim) summary(fit) } \seealso{ \code{\link{SingleSimFit.object}} }
/man/summary.SingleSimFit.Rd
no_license
scientific-computing-solutions/dejaVu
R
false
true
1,518
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SingleSimFit.R \name{summary.SingleSimFit} \alias{summary.SingleSimFit} \title{summary.SingleSimFit} \arguments{ \item{model.summary}{The model summary from the fit} \item{treatment.effect}{The estimate of treatment effect from the model fit} \item{CI.limit}{The confidence interval limit (by default 0.95), call \code{summary(object,CI.limit=x)} to use CI of \code{x} instead.} \item{CI}{The confidence interval of the treatment effect} \item{se}{Estimate for the standard error of (log) treatment effect} \item{dispersion}{Estimate for the dispersion parameter or numeric(0) if Poisson/quasi-Poisson model used} \item{rate.estimate}{Estimate of the event rates from the model a vector c(control arm, treatment arm)} \item{pval}{The p value directly from the model fit (this is for the single model fit only, i.e. not using Rubin's formula)} \item{datastatus}{The status of SingleSim object to which the fit was applied} \item{df}{The number of degrees of freedom of the model} \item{dropout}{The number of dropouts of each arm} \item{number.subjects}{The number of subjects in each arm} } \description{ The summary object for a \code{SingleSimFit} object } \details{ A \code{print.summary.SingleSimFit} method has been implemented } \examples{ sim <- SimulateComplete(study.time=365,number.subjects=50, event.rates=c(0.01,0.005),dispersions=0.25) fit <- Simfit(sim) summary(fit) } \seealso{ \code{\link{SingleSimFit.object}} }
# set work directory # specify the column class of data to make read.table faster classes<-c("character","character",rep("numeric",7)) # read data from disk tabAll <- read.table("household_power_consumption.txt", sep=";",header = TRUE, colClasses = classes,na.strings="?",stringsAsFactor=F) # grap a subset of data in date 2007-02-01 and 2007-02-02 and save it to variable "data" data<-tabAll[(tabAll$Date=="1/2/2007"|tabAll$Date=="2/2/2007"),] # plot the histogram with title and xlabel hist(data$Global_active_power,main="Global Active Power",xlab="Global Active Power (kilowatts)",col="red") # copy the plot to a PNG file dev.copy(png,file="plot1.png",height=480,width=480) # close the file device dev.off()
/plot1.R
no_license
jiag/ExData_Plotting1
R
false
false
716
r
# set work directory # specify the column class of data to make read.table faster classes<-c("character","character",rep("numeric",7)) # read data from disk tabAll <- read.table("household_power_consumption.txt", sep=";",header = TRUE, colClasses = classes,na.strings="?",stringsAsFactor=F) # grap a subset of data in date 2007-02-01 and 2007-02-02 and save it to variable "data" data<-tabAll[(tabAll$Date=="1/2/2007"|tabAll$Date=="2/2/2007"),] # plot the histogram with title and xlabel hist(data$Global_active_power,main="Global Active Power",xlab="Global Active Power (kilowatts)",col="red") # copy the plot to a PNG file dev.copy(png,file="plot1.png",height=480,width=480) # close the file device dev.off()
#### name:get_vals#### # this is a test #dat$dates <- as.Date(rep("2014-08-23", nrow(dat))) #str(dat) #.dataframe <- analyte #str(analyte) reml_boilerplate2 <- function(.dataframe){ strng <- list() for(i in 1:ncol(.dataframe)){ # i = 6 .variable <- names(.dataframe)[i] #.dataframe[,.variable] if(is.character(.dataframe[ ,.variable])){ .dataframe[,.variable] <- factor(.dataframe[,.variable]) } if(is.factor(.dataframe[,.variable])){ x <- .dataframe[,.variable] vals <- names(table(x)) # symbols may pollute the string to parse vals <- make.names(vals) vals <- tolower(vals) vals <- gsub("\\.","_",vals) vals <- gsub("_+","_",vals) v <- .variable #v strng[[.variable]] <- paste( v, ' = c(', paste(vals, sep = '', collapse = ' = "TBA",') ,' = "TBA")', sep = '') } else if(is.numeric(.dataframe[,.variable])){ v <- .variable strng[[.variable]] <- paste(v,' = "number"',sep='') # strng[[.variable]] <- '"number"' } else if( !all(is.na(as.Date(as.character(na.omit(.dataframe[,.variable])), origin = "1970-01-01"))) ){ v <- .variable strng[[.variable]] <- paste(v,' = "YYYY-MM-DD"',sep='') # strng[[.variable]] <- '"YYYY-MM-DD"' } else if (all(is.na(.dataframe[ ,.variable]))){ v <- .variable strng[[.variable]] <- paste(v,' = "', names(.dataframe)[i], '"', sep='') } } #strng strng2 <- "" for(n in 1:(length(strng)-1)){ strng2 <- paste(strng2, strng[[n]], ",\n") } strng2 <- paste(strng2, strng[[length(strng)]], "\n") #cat(strng2) strng3 <- paste(" unit_metadata = list(",strng2,")", sep = "") #cat(strng3) eval(parse(text = strng3)) #unit_metadata return(unit_metadata) } #u1 <- get_vals(analyte) #u1
/R/reml_boilerplate2.r
permissive
ivanhanigan/disentangle
R
false
false
1,756
r
#### name:get_vals#### # this is a test #dat$dates <- as.Date(rep("2014-08-23", nrow(dat))) #str(dat) #.dataframe <- analyte #str(analyte) reml_boilerplate2 <- function(.dataframe){ strng <- list() for(i in 1:ncol(.dataframe)){ # i = 6 .variable <- names(.dataframe)[i] #.dataframe[,.variable] if(is.character(.dataframe[ ,.variable])){ .dataframe[,.variable] <- factor(.dataframe[,.variable]) } if(is.factor(.dataframe[,.variable])){ x <- .dataframe[,.variable] vals <- names(table(x)) # symbols may pollute the string to parse vals <- make.names(vals) vals <- tolower(vals) vals <- gsub("\\.","_",vals) vals <- gsub("_+","_",vals) v <- .variable #v strng[[.variable]] <- paste( v, ' = c(', paste(vals, sep = '', collapse = ' = "TBA",') ,' = "TBA")', sep = '') } else if(is.numeric(.dataframe[,.variable])){ v <- .variable strng[[.variable]] <- paste(v,' = "number"',sep='') # strng[[.variable]] <- '"number"' } else if( !all(is.na(as.Date(as.character(na.omit(.dataframe[,.variable])), origin = "1970-01-01"))) ){ v <- .variable strng[[.variable]] <- paste(v,' = "YYYY-MM-DD"',sep='') # strng[[.variable]] <- '"YYYY-MM-DD"' } else if (all(is.na(.dataframe[ ,.variable]))){ v <- .variable strng[[.variable]] <- paste(v,' = "', names(.dataframe)[i], '"', sep='') } } #strng strng2 <- "" for(n in 1:(length(strng)-1)){ strng2 <- paste(strng2, strng[[n]], ",\n") } strng2 <- paste(strng2, strng[[length(strng)]], "\n") #cat(strng2) strng3 <- paste(" unit_metadata = list(",strng2,")", sep = "") #cat(strng3) eval(parse(text = strng3)) #unit_metadata return(unit_metadata) } #u1 <- get_vals(analyte) #u1
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/segreg_local.R \name{localEntropy} \alias{localEntropy} \title{Computes the local entropy score for a unit area Ei (diversity).} \usage{ localEntropy(pop, pop_intensity) } \arguments{ \item{pop}{A data frame with id, group, and number of population.} \item{pop_intensity}{A data frame with id and population intensity for all groups. Result from \code{\link{popIntensity}}.} } \value{ A data frame with id and local indexes of entropy. } \description{ Computes the local entropy score for a unit area Ei (diversity). } \references{ Iceland (2004. The multigroup entropy index (also known as Theil’s H or the information theory index). \emph{US Census Bureau}, Retrieved July, 31, 2006. Sousa (2017). Segregation Metrics. \url{https://github.com/sandrofsousa/Resolution/blob/master/Pysegreg/segregationMetrics.py}. } \author{ Beatriz Moura dos Santos }
/man/localEntropy.Rd
permissive
biamouras/accSeg
R
false
true
946
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/segreg_local.R \name{localEntropy} \alias{localEntropy} \title{Computes the local entropy score for a unit area Ei (diversity).} \usage{ localEntropy(pop, pop_intensity) } \arguments{ \item{pop}{A data frame with id, group, and number of population.} \item{pop_intensity}{A data frame with id and population intensity for all groups. Result from \code{\link{popIntensity}}.} } \value{ A data frame with id and local indexes of entropy. } \description{ Computes the local entropy score for a unit area Ei (diversity). } \references{ Iceland (2004. The multigroup entropy index (also known as Theil’s H or the information theory index). \emph{US Census Bureau}, Retrieved July, 31, 2006. Sousa (2017). Segregation Metrics. \url{https://github.com/sandrofsousa/Resolution/blob/master/Pysegreg/segregationMetrics.py}. } \author{ Beatriz Moura dos Santos }
library(testthat) library(mc) test_check("mc")
/tests/testthat.R
no_license
yuki0425/mc
R
false
false
48
r
library(testthat) library(mc) test_check("mc")
#' @rdname parseMisoAnnotation #' #' @param complexEvents Boolean: should complex events in A3SS and A5SS be #' parsed? #' #' @export #' @examples #' # Load sample files #' folder <- "extdata/eventsAnnotSample/VASTDB/Hsa/TEMPLATES" #' vastToolsOutput <- system.file(folder, package="psichomics") #' #' vast <- parseVastToolsAnnotation(vastToolsOutput) parseVastToolsAnnotation <- function( folder, types=c("ALT3", "ALT5", "COMBI", "IR", "MERGE3m", "MIC", "EXSK", "MULTI"), genome="Hsa", complexEvents=FALSE) { display("Retrieving VAST-TOOLS annotation...") typesRegex <- sprintf("(%s)", paste(types, collapse="|")) typesFile <- list.files(folder, full.names=TRUE, pattern=sprintf( "%s\\.%s\\..*.txt$", genome, typesRegex)) # Remove first file if available typesFile <- grep("\\.1\\.txt", typesFile, invert=TRUE, value=TRUE) names(typesFile) <- gsub(sprintf("^.*%s\\.(.*?)\\..*\\.txt$", genome), "\\1", typesFile) annot <- lapply(typesFile, read.delim, stringsAsFactors = FALSE, comment.char="#", header=TRUE) display("Parsing VAST-TOOLS annotation...") types <- names(annot) skippedExon <- c("COMBI", "MERGE3m", "MIC", "EXSK", "MULTI") parseEvents <- function(i) { type <- types[i] display(type) a <- annot[[i]] if (nrow(a) > 0) { parsed <- parseVastToolsEvent(a) if (!complexEvents) { if (type == "ALT3") { filter <- !is.na(parsed$A1.start) parsed <- parsed[filter, ] parsed <- uniqueBy(parsed, "Chromosome", "Strand", "C1.end", "A1.start", "C2.start") } else if (type == "ALT5") { filter <- !is.na(parsed$A1.end) parsed <- parsed[filter, ] parsed <- uniqueBy(parsed, "Chromosome", "Strand", "C1.end", "A1.end", "C2.start") } else if (type %in% skippedExon) { C1.end <- vapply(parsed$C1.end, length, numeric(1)) > 1 A1.start <- vapply(parsed$A1.start, length, numeric(1)) > 1 A1.end <- vapply(parsed$A1.end, length, numeric(1)) > 1 C2.start <- vapply(parsed$C2.start, length, numeric(1)) > 1 filter <- !(C1.end | A1.start | A1.end | C2.start) parsed <- parsed[filter, ] } } return(parsed) } } events <- lapply(seq_along(annot), parseEvents) events <- rbind.fill(events) events <- unique(events) names(events)[match("Gene.symbol", names(events))] <- "Gene" # Remove duplicated skipped exons from multiple event types skipped <- events$Event.type == "SE" uniq <- uniqueBy(events[skipped, ], "Chromosome", "Strand", "C1.end", "A1.start", "A1.end", "C2.start") events <- rbind(events[!skipped, ], uniq) class(events) <- c("ASevents", class(events)) return(events) } #' Parses an alternative splicing event from VAST-TOOLS #' #' @details Junctions are parsed from #' #' @param event Data.frame: VAST-TOOLS event containing gene symbol, event ID, #' length, junctions coordinates, event type and inclusion levels for both #' samples #' #' @note Only supports to parse one event at a time. #' #' @return List with the event attributes (chromosome, strand, event type and #' the position of the exon boundaries) #' @keywords internal #' #' @examples #' event <- read.table(text = #' "NFYA HsaEX0042823 chr6:41046768-41046903 136 chr6:41040823,41046768-41046903,41051785 C2 0 N 0 N" #' ) #' psichomics:::parseVastToolsEvent(event) parseVastToolsEvent <- function(event) { # Create list with event attributes event_attrs <- data.frame("Program" = "VAST-TOOLS", "Gene symbol" = as.character(event[[1]]), "Event ID" = as.character(event[[2]]), stringsAsFactors = FALSE) # By default, assumes things may be parsable as an exon skipping # TODO (NunoA): make sure this is intended... event_type <- as.character(event[1, 6]) event_type <- switch(event_type, "IR-C" = "RI", "IR-S" = "RI", "Alt3" = "A3SS", "Alt5" = "A5SS", "SE") event_attrs[["Event.type"]] <- event_type # Split junctions position coord <- as.character(event[[5]]) junctions <- strsplit(coord, ":|,|-|=") # Split multiple acceptors/donors (separated with +) splitJunctions <- function(i) { split <- strsplit(i, "+", fixed=TRUE) if (length(split) < 4) split[[4]] <- character(0) return(split) } junctions <- lapply(junctions, splitJunctions) junctions <- data.matrix(do.call(rbind, junctions)) # Get chromosomes and convert numbers to numeric event_attrs[["Chromosome"]] <- junctions[, 1] nrowJunctions <- nrow(junctions) junctions <- junctions[, 2:ncol(junctions)] junctions <- matrix(lapply(junctions, as.numeric), nrow = nrowJunctions) # Get strand for retained intron if (event_type == "RI") { len <- nchar(coord) strand <- substr(coord, len, len) parsed <- parseVastToolsRI(junctions, strand) } else { parseJunctions <- switch(event_type, "SE" = parseVastToolsSE, "A3SS" = parseVastToolsA3SS, "A5SS" = parseVastToolsA5SS) parsed <- parseJunctions(junctions) } if (ncol(event) > 7) { more_attrs <- data.frame("Inclusion level A" = as.numeric(event[[7]]), "Inclusion level B" = as.numeric(event[[9]]), stringsAsFactors = FALSE) return(cbind(event_attrs, more_attrs, parsed)) } else { return(cbind(event_attrs, parsed)) } } #' Parse junctions of an event from VAST-TOOLS according to event type #' #' @param junctions Data.frame or matrix: exon-exon junctions of alternative #' splicing events (it must have 4 columns) #' #' @details The following event types are available to be parsed: #' \itemize{ #' \item{\bold{SE} (skipped exon)} #' \item{\bold{RI} (retained intron)} #' \item{\bold{A5SS} (alternative 5' splice site)} #' \item{\bold{A3SS} (alternative 3' splice site)} #' } #' #' @seealso \code{\link{parseVastToolsEvent}()} #' #' @return List of parsed junctions #' @keywords internal #' #' @examples #' junctions <- read.table(text = "41040823 41046768 41046903 41051785") #' psichomics:::parseVastToolsSE(junctions) #' #' # these functions are vectorised! #' junctions <- read.table(text = "41040823 41046768 41046903 41051785 #' 58864658 58864693 58864294 58864563") #' psichomics:::parseVastToolsSE(junctions) parseVastToolsSE <- function (junctions) { # Creates a data frame of parsed junctions filled with NAs parsed <- createJunctionsTemplate(nrow(junctions)) # Strand is plus if the first junction is lower than the last junction plus <- sapply(junctions[, 1], "[[", 1) < sapply(junctions[, 4], "[[", 1) parsed[["Strand"]] <- ifelse(plus, "+", "-") parsed[["C1.end"]] <- junctions[, 1] parsed[["C2.start"]] <- junctions[, 4] # Plus strand parsed[plus, ][["A1.start"]] <- junctions[plus, 2] parsed[plus, ][["A1.end"]] <- junctions[plus, 3] # Minus strand parsed[!plus, ][["A1.start"]] <- junctions[!plus, 3] parsed[!plus, ][["A1.end"]] <- junctions[!plus, 2] return(parsed) } #' @rdname parseVastToolsSE #' @param strand Character: positive (+) or negative (-) strand #' #' @examples #' #' junctions <- read.table(text = "58864658 58864693 58864294 58864563") #' psichomics:::parseVastToolsRI(junctions, strand = "+") parseVastToolsRI <- function (junctions, strand) { # Creates a data frame of parsed junctions filled with NAs parsed <- createJunctionsTemplate(nrow(junctions)) plus <- strand == "+" parsed[["Strand"]] <- strand # Plus strand parsed[plus, ][["C1.start"]] <- junctions[plus, 1] parsed[plus, ][["C1.end"]] <- junctions[plus, 2] parsed[plus, ][["C2.start"]] <- junctions[plus, 3] parsed[plus, ][["C2.end"]] <- junctions[plus, 4] # Minus strand parsed[!plus, ][["C1.start"]] <- junctions[!plus, 2] parsed[!plus, ][["C1.end"]] <- junctions[!plus, 1] parsed[!plus, ][["C2.start"]] <- junctions[!plus, 4] parsed[!plus, ][["C2.end"]] <- junctions[!plus, 3] return(parsed) } #' @rdname parseVastToolsSE #' #' @examples #' #' junctions <- rbind( #' c(36276385, list(c(36277798, 36277315)), 36277974), #' c(7133604, 7133377, list(c(7133474, 7133456))) #' ) #' psichomics:::parseVastToolsA3SS(junctions) parseVastToolsA3SS <- function (junctions) { # Creates a data frame of parsed junctions filled with NAs parsed <- createJunctionsTemplate(nrow(junctions)) # Check if there aren't junctions missing is2Available <- sapply(junctions[,2], length) > 0 is3Available <- sapply(junctions[,3], length) > 0 # Strand is plus if the first junction is lower than the other junctions available <- ifelse(is3Available, junctions[, 3], junctions[, 2]) plus <- sapply(junctions[, 1], "[[", 1) < sapply(available, "[[", 1) parsed[["Strand"]] <- ifelse(plus, "+", "-") parsed[["C1.end"]] <- junctions[, 1] # Plus strand plus3 <- plus & is3Available bigList <- sapply(junctions[, 2], length) > 2 # filter unrecognised events parsed[plus & !bigList, ][c("A1.start", "A2.start")] <- ldply(junctions[plus & !bigList, 2]) parsed[plus & bigList, ][["A2.start"]] <- junctions[plus & bigList, 2] parsed[plus3, ][["A2.end"]] <- junctions[plus3, 3] # Minus strand minus2 <- !plus & is2Available bigList <- sapply(junctions[, 3], length) > 2 # filter unrecognised events parsed[!plus & !bigList, ][c("A1.start", "A2.start")] <- ldply(junctions[!plus & !bigList, 3]) parsed[!plus & bigList, ][["A2.start"]] <- junctions[!plus & bigList, 3] parsed[minus2, ][["A2.end"]] <- junctions[minus2, 2] return(parsed) } #' @rdname parseVastToolsSE #' #' @examples #' #' junctions <- rbind( #' c(74650610, list(c(74650654, 74650658)), 74650982), #' c(list(c(49557666, 49557642), 49557746, 49557470)) #' ) #' psichomics:::parseVastToolsA5SS(junctions) parseVastToolsA5SS <- function (junctions) { # Creates a data frame of parsed junctions filled with NAs parsed <- createJunctionsTemplate(nrow(junctions)) # Check if there aren't junctions missing is1Available <- sapply(junctions[,1], length) > 0 is2Available <- sapply(junctions[,2], length) > 0 # Strand is plus if the first junction is lower than the other junctions available <- ifelse(is2Available, junctions[, 2], junctions[, 1]) plus <- sapply(available, "[[", 1) < sapply(junctions[, 3], "[[", 1) parsed[["Strand"]] <- ifelse(plus, "+", "-") parsed[["C2.start"]] <- junctions[, 3] # Plus strand plus1 <- plus & is1Available bigList <- sapply(junctions[, 2], length) > 2 # filter unrecognised events parsed[plus1, ][["A2.start"]] <- junctions[plus1, 1] parsed[plus & !bigList, ][c("A1.end", "A2.end")] <- ldply(junctions[plus & !bigList, 2]) parsed[plus & bigList, ][["A2.end"]] <- junctions[plus & bigList, 2] # Minus strand minus2 <- !plus & is2Available bigList <- sapply(junctions[, 1], length) > 2 # filter unrecognised events parsed[minus2, ][["A2.start"]] <- junctions[minus2, 2] parsed[!plus & !bigList, ][c("A1.end", "A2.end")] <- ldply(junctions[!plus & !bigList, 1]) parsed[!plus & bigList, ][["A2.end"]] <- junctions[!plus & bigList, 1] return(parsed) }
/R/events_vastTools.R
no_license
liangdp1984/psichomics
R
false
false
12,115
r
#' @rdname parseMisoAnnotation #' #' @param complexEvents Boolean: should complex events in A3SS and A5SS be #' parsed? #' #' @export #' @examples #' # Load sample files #' folder <- "extdata/eventsAnnotSample/VASTDB/Hsa/TEMPLATES" #' vastToolsOutput <- system.file(folder, package="psichomics") #' #' vast <- parseVastToolsAnnotation(vastToolsOutput) parseVastToolsAnnotation <- function( folder, types=c("ALT3", "ALT5", "COMBI", "IR", "MERGE3m", "MIC", "EXSK", "MULTI"), genome="Hsa", complexEvents=FALSE) { display("Retrieving VAST-TOOLS annotation...") typesRegex <- sprintf("(%s)", paste(types, collapse="|")) typesFile <- list.files(folder, full.names=TRUE, pattern=sprintf( "%s\\.%s\\..*.txt$", genome, typesRegex)) # Remove first file if available typesFile <- grep("\\.1\\.txt", typesFile, invert=TRUE, value=TRUE) names(typesFile) <- gsub(sprintf("^.*%s\\.(.*?)\\..*\\.txt$", genome), "\\1", typesFile) annot <- lapply(typesFile, read.delim, stringsAsFactors = FALSE, comment.char="#", header=TRUE) display("Parsing VAST-TOOLS annotation...") types <- names(annot) skippedExon <- c("COMBI", "MERGE3m", "MIC", "EXSK", "MULTI") parseEvents <- function(i) { type <- types[i] display(type) a <- annot[[i]] if (nrow(a) > 0) { parsed <- parseVastToolsEvent(a) if (!complexEvents) { if (type == "ALT3") { filter <- !is.na(parsed$A1.start) parsed <- parsed[filter, ] parsed <- uniqueBy(parsed, "Chromosome", "Strand", "C1.end", "A1.start", "C2.start") } else if (type == "ALT5") { filter <- !is.na(parsed$A1.end) parsed <- parsed[filter, ] parsed <- uniqueBy(parsed, "Chromosome", "Strand", "C1.end", "A1.end", "C2.start") } else if (type %in% skippedExon) { C1.end <- vapply(parsed$C1.end, length, numeric(1)) > 1 A1.start <- vapply(parsed$A1.start, length, numeric(1)) > 1 A1.end <- vapply(parsed$A1.end, length, numeric(1)) > 1 C2.start <- vapply(parsed$C2.start, length, numeric(1)) > 1 filter <- !(C1.end | A1.start | A1.end | C2.start) parsed <- parsed[filter, ] } } return(parsed) } } events <- lapply(seq_along(annot), parseEvents) events <- rbind.fill(events) events <- unique(events) names(events)[match("Gene.symbol", names(events))] <- "Gene" # Remove duplicated skipped exons from multiple event types skipped <- events$Event.type == "SE" uniq <- uniqueBy(events[skipped, ], "Chromosome", "Strand", "C1.end", "A1.start", "A1.end", "C2.start") events <- rbind(events[!skipped, ], uniq) class(events) <- c("ASevents", class(events)) return(events) } #' Parses an alternative splicing event from VAST-TOOLS #' #' @details Junctions are parsed from #' #' @param event Data.frame: VAST-TOOLS event containing gene symbol, event ID, #' length, junctions coordinates, event type and inclusion levels for both #' samples #' #' @note Only supports to parse one event at a time. #' #' @return List with the event attributes (chromosome, strand, event type and #' the position of the exon boundaries) #' @keywords internal #' #' @examples #' event <- read.table(text = #' "NFYA HsaEX0042823 chr6:41046768-41046903 136 chr6:41040823,41046768-41046903,41051785 C2 0 N 0 N" #' ) #' psichomics:::parseVastToolsEvent(event) parseVastToolsEvent <- function(event) { # Create list with event attributes event_attrs <- data.frame("Program" = "VAST-TOOLS", "Gene symbol" = as.character(event[[1]]), "Event ID" = as.character(event[[2]]), stringsAsFactors = FALSE) # By default, assumes things may be parsable as an exon skipping # TODO (NunoA): make sure this is intended... event_type <- as.character(event[1, 6]) event_type <- switch(event_type, "IR-C" = "RI", "IR-S" = "RI", "Alt3" = "A3SS", "Alt5" = "A5SS", "SE") event_attrs[["Event.type"]] <- event_type # Split junctions position coord <- as.character(event[[5]]) junctions <- strsplit(coord, ":|,|-|=") # Split multiple acceptors/donors (separated with +) splitJunctions <- function(i) { split <- strsplit(i, "+", fixed=TRUE) if (length(split) < 4) split[[4]] <- character(0) return(split) } junctions <- lapply(junctions, splitJunctions) junctions <- data.matrix(do.call(rbind, junctions)) # Get chromosomes and convert numbers to numeric event_attrs[["Chromosome"]] <- junctions[, 1] nrowJunctions <- nrow(junctions) junctions <- junctions[, 2:ncol(junctions)] junctions <- matrix(lapply(junctions, as.numeric), nrow = nrowJunctions) # Get strand for retained intron if (event_type == "RI") { len <- nchar(coord) strand <- substr(coord, len, len) parsed <- parseVastToolsRI(junctions, strand) } else { parseJunctions <- switch(event_type, "SE" = parseVastToolsSE, "A3SS" = parseVastToolsA3SS, "A5SS" = parseVastToolsA5SS) parsed <- parseJunctions(junctions) } if (ncol(event) > 7) { more_attrs <- data.frame("Inclusion level A" = as.numeric(event[[7]]), "Inclusion level B" = as.numeric(event[[9]]), stringsAsFactors = FALSE) return(cbind(event_attrs, more_attrs, parsed)) } else { return(cbind(event_attrs, parsed)) } } #' Parse junctions of an event from VAST-TOOLS according to event type #' #' @param junctions Data.frame or matrix: exon-exon junctions of alternative #' splicing events (it must have 4 columns) #' #' @details The following event types are available to be parsed: #' \itemize{ #' \item{\bold{SE} (skipped exon)} #' \item{\bold{RI} (retained intron)} #' \item{\bold{A5SS} (alternative 5' splice site)} #' \item{\bold{A3SS} (alternative 3' splice site)} #' } #' #' @seealso \code{\link{parseVastToolsEvent}()} #' #' @return List of parsed junctions #' @keywords internal #' #' @examples #' junctions <- read.table(text = "41040823 41046768 41046903 41051785") #' psichomics:::parseVastToolsSE(junctions) #' #' # these functions are vectorised! #' junctions <- read.table(text = "41040823 41046768 41046903 41051785 #' 58864658 58864693 58864294 58864563") #' psichomics:::parseVastToolsSE(junctions) parseVastToolsSE <- function (junctions) { # Creates a data frame of parsed junctions filled with NAs parsed <- createJunctionsTemplate(nrow(junctions)) # Strand is plus if the first junction is lower than the last junction plus <- sapply(junctions[, 1], "[[", 1) < sapply(junctions[, 4], "[[", 1) parsed[["Strand"]] <- ifelse(plus, "+", "-") parsed[["C1.end"]] <- junctions[, 1] parsed[["C2.start"]] <- junctions[, 4] # Plus strand parsed[plus, ][["A1.start"]] <- junctions[plus, 2] parsed[plus, ][["A1.end"]] <- junctions[plus, 3] # Minus strand parsed[!plus, ][["A1.start"]] <- junctions[!plus, 3] parsed[!plus, ][["A1.end"]] <- junctions[!plus, 2] return(parsed) } #' @rdname parseVastToolsSE #' @param strand Character: positive (+) or negative (-) strand #' #' @examples #' #' junctions <- read.table(text = "58864658 58864693 58864294 58864563") #' psichomics:::parseVastToolsRI(junctions, strand = "+") parseVastToolsRI <- function (junctions, strand) { # Creates a data frame of parsed junctions filled with NAs parsed <- createJunctionsTemplate(nrow(junctions)) plus <- strand == "+" parsed[["Strand"]] <- strand # Plus strand parsed[plus, ][["C1.start"]] <- junctions[plus, 1] parsed[plus, ][["C1.end"]] <- junctions[plus, 2] parsed[plus, ][["C2.start"]] <- junctions[plus, 3] parsed[plus, ][["C2.end"]] <- junctions[plus, 4] # Minus strand parsed[!plus, ][["C1.start"]] <- junctions[!plus, 2] parsed[!plus, ][["C1.end"]] <- junctions[!plus, 1] parsed[!plus, ][["C2.start"]] <- junctions[!plus, 4] parsed[!plus, ][["C2.end"]] <- junctions[!plus, 3] return(parsed) } #' @rdname parseVastToolsSE #' #' @examples #' #' junctions <- rbind( #' c(36276385, list(c(36277798, 36277315)), 36277974), #' c(7133604, 7133377, list(c(7133474, 7133456))) #' ) #' psichomics:::parseVastToolsA3SS(junctions) parseVastToolsA3SS <- function (junctions) { # Creates a data frame of parsed junctions filled with NAs parsed <- createJunctionsTemplate(nrow(junctions)) # Check if there aren't junctions missing is2Available <- sapply(junctions[,2], length) > 0 is3Available <- sapply(junctions[,3], length) > 0 # Strand is plus if the first junction is lower than the other junctions available <- ifelse(is3Available, junctions[, 3], junctions[, 2]) plus <- sapply(junctions[, 1], "[[", 1) < sapply(available, "[[", 1) parsed[["Strand"]] <- ifelse(plus, "+", "-") parsed[["C1.end"]] <- junctions[, 1] # Plus strand plus3 <- plus & is3Available bigList <- sapply(junctions[, 2], length) > 2 # filter unrecognised events parsed[plus & !bigList, ][c("A1.start", "A2.start")] <- ldply(junctions[plus & !bigList, 2]) parsed[plus & bigList, ][["A2.start"]] <- junctions[plus & bigList, 2] parsed[plus3, ][["A2.end"]] <- junctions[plus3, 3] # Minus strand minus2 <- !plus & is2Available bigList <- sapply(junctions[, 3], length) > 2 # filter unrecognised events parsed[!plus & !bigList, ][c("A1.start", "A2.start")] <- ldply(junctions[!plus & !bigList, 3]) parsed[!plus & bigList, ][["A2.start"]] <- junctions[!plus & bigList, 3] parsed[minus2, ][["A2.end"]] <- junctions[minus2, 2] return(parsed) } #' @rdname parseVastToolsSE #' #' @examples #' #' junctions <- rbind( #' c(74650610, list(c(74650654, 74650658)), 74650982), #' c(list(c(49557666, 49557642), 49557746, 49557470)) #' ) #' psichomics:::parseVastToolsA5SS(junctions) parseVastToolsA5SS <- function (junctions) { # Creates a data frame of parsed junctions filled with NAs parsed <- createJunctionsTemplate(nrow(junctions)) # Check if there aren't junctions missing is1Available <- sapply(junctions[,1], length) > 0 is2Available <- sapply(junctions[,2], length) > 0 # Strand is plus if the first junction is lower than the other junctions available <- ifelse(is2Available, junctions[, 2], junctions[, 1]) plus <- sapply(available, "[[", 1) < sapply(junctions[, 3], "[[", 1) parsed[["Strand"]] <- ifelse(plus, "+", "-") parsed[["C2.start"]] <- junctions[, 3] # Plus strand plus1 <- plus & is1Available bigList <- sapply(junctions[, 2], length) > 2 # filter unrecognised events parsed[plus1, ][["A2.start"]] <- junctions[plus1, 1] parsed[plus & !bigList, ][c("A1.end", "A2.end")] <- ldply(junctions[plus & !bigList, 2]) parsed[plus & bigList, ][["A2.end"]] <- junctions[plus & bigList, 2] # Minus strand minus2 <- !plus & is2Available bigList <- sapply(junctions[, 1], length) > 2 # filter unrecognised events parsed[minus2, ][["A2.start"]] <- junctions[minus2, 2] parsed[!plus & !bigList, ][c("A1.end", "A2.end")] <- ldply(junctions[!plus & !bigList, 1]) parsed[!plus & bigList, ][["A2.end"]] <- junctions[!plus & bigList, 1] return(parsed) }
# 下载并按照RODBC包,已安装则无须执行此代码 install.packages("RODBC") # 加载RODBC library(RODBC) # 获取于Excel的连接对象 conn <- odbcConnectExcel2007("F:/R语言/RData/2015年度中国城市GDP排名.xlsx") # 从连接对象中,读取工作表Table 1 mydata <- sqlFetch(conn, "Table 1") mydata # 关闭连接 odbcClose(conn) # 安装xlsx包,快速操作xlsx文件 # 注意:这个要求机器上装载了java,否则加载不了xlsx包 # 使用read.xlsx(file, n) 函数读取xlsx文件,n代表工作表序列号 install.packages("xlsx") # 加载xlsx包 library(xlsx) # 直接读取xlsx文件 mydata <- read.xlsx("F:/R语言/RData/2015年度中国城市GDP排名.xlsx", 1)
/案例演示/2、创建数据集/9、导入Excel数据.R
no_license
ocxz/RLange
R
false
false
710
r
# 下载并按照RODBC包,已安装则无须执行此代码 install.packages("RODBC") # 加载RODBC library(RODBC) # 获取于Excel的连接对象 conn <- odbcConnectExcel2007("F:/R语言/RData/2015年度中国城市GDP排名.xlsx") # 从连接对象中,读取工作表Table 1 mydata <- sqlFetch(conn, "Table 1") mydata # 关闭连接 odbcClose(conn) # 安装xlsx包,快速操作xlsx文件 # 注意:这个要求机器上装载了java,否则加载不了xlsx包 # 使用read.xlsx(file, n) 函数读取xlsx文件,n代表工作表序列号 install.packages("xlsx") # 加载xlsx包 library(xlsx) # 直接读取xlsx文件 mydata <- read.xlsx("F:/R语言/RData/2015年度中国城市GDP排名.xlsx", 1)
sorteaRecta <- function() { x <- matrix(runif(4)*2 - 1, 2, 2) p1 <- x[1,] p2 <- x[2,] m <- (p2[2] - p1[2])/(p2[1] - p1[1]) a <- p1[2] - p1[1]*m c(-a, -m, 1) } sorteaPuntos <- function(N) { x <- cbind(1, matrix(runif(2*N, -1, 1), N, 2)) } evaluaPuntos <- function(x, r) { sign(x %*% r) } calculaG <- function(x, y) { xPrima <- t(x) xDaga <- solve(xPrima %*% x) %*% xPrima xDaga %*% y } ejercicio5 <- function(Puntos, Simulaciones) { salida <- rep(0, Simulaciones) for (i in 1:Simulaciones) { x <- sorteaPuntos(Puntos) r <- sorteaRecta() y <- evaluaPuntos(x, r) g <- calculaG(x, y) yp <- evaluaPuntos(x, g) salida[i] <- mean(yp != y) } return(salida) } ejercicio6 <- function(PuntosIn, PuntosOut, Simulaciones) { salida <- rep(0, Simulaciones) for (i in 1:Simulaciones) { x <- sorteaPuntos(PuntosIn) r <- sorteaRecta() y <- evaluaPuntos(x, r) g <- calculaG(x, y) xOut <- sorteaPuntos(PuntosOut) yOut <- evaluaPuntos(xOut, r) ypOut <- evaluaPuntos(xOut, g) salida[i] <- mean(ypOut != yOut) } return(salida) } perceptron <- function(y, f, g, x) { newG <- g indices <- 1:length(y) for (n in 1:100) { yp = evaluaPuntos(x, newG) errores <- y != yp L = sum(errores) if (L == 0) { break; } indiceErrores <- indices[errores] indiceAleatorio <- indiceErrores[sample(1:L, 1)] p <- x[indiceAleatorio, ] newG <- newG + evaluaPuntos(p,f)*p } return(list("n"= n, "g" = newG)) } ejercicio7 <- function(PuntosIn, Simulaciones) { salida <- matrix(0, Simulaciones, 2) for (i in 1:Simulaciones) { x <- sorteaPuntos(PuntosIn) r <- sorteaRecta() y <- evaluaPuntos(x, r) g <- calculaG(x, y) z <- perceptron(y, r, g, x) yp <- evaluaPuntos(x, z$g) print(paste("n: ", z$n)) print(paste("mean: ", mean(yp != y))) salida[i, ] <- c(z$n, mean(yp != y)) } return(salida) }
/homework2.r
no_license
jgsastre/MachineLearning
R
false
false
1,965
r
sorteaRecta <- function() { x <- matrix(runif(4)*2 - 1, 2, 2) p1 <- x[1,] p2 <- x[2,] m <- (p2[2] - p1[2])/(p2[1] - p1[1]) a <- p1[2] - p1[1]*m c(-a, -m, 1) } sorteaPuntos <- function(N) { x <- cbind(1, matrix(runif(2*N, -1, 1), N, 2)) } evaluaPuntos <- function(x, r) { sign(x %*% r) } calculaG <- function(x, y) { xPrima <- t(x) xDaga <- solve(xPrima %*% x) %*% xPrima xDaga %*% y } ejercicio5 <- function(Puntos, Simulaciones) { salida <- rep(0, Simulaciones) for (i in 1:Simulaciones) { x <- sorteaPuntos(Puntos) r <- sorteaRecta() y <- evaluaPuntos(x, r) g <- calculaG(x, y) yp <- evaluaPuntos(x, g) salida[i] <- mean(yp != y) } return(salida) } ejercicio6 <- function(PuntosIn, PuntosOut, Simulaciones) { salida <- rep(0, Simulaciones) for (i in 1:Simulaciones) { x <- sorteaPuntos(PuntosIn) r <- sorteaRecta() y <- evaluaPuntos(x, r) g <- calculaG(x, y) xOut <- sorteaPuntos(PuntosOut) yOut <- evaluaPuntos(xOut, r) ypOut <- evaluaPuntos(xOut, g) salida[i] <- mean(ypOut != yOut) } return(salida) } perceptron <- function(y, f, g, x) { newG <- g indices <- 1:length(y) for (n in 1:100) { yp = evaluaPuntos(x, newG) errores <- y != yp L = sum(errores) if (L == 0) { break; } indiceErrores <- indices[errores] indiceAleatorio <- indiceErrores[sample(1:L, 1)] p <- x[indiceAleatorio, ] newG <- newG + evaluaPuntos(p,f)*p } return(list("n"= n, "g" = newG)) } ejercicio7 <- function(PuntosIn, Simulaciones) { salida <- matrix(0, Simulaciones, 2) for (i in 1:Simulaciones) { x <- sorteaPuntos(PuntosIn) r <- sorteaRecta() y <- evaluaPuntos(x, r) g <- calculaG(x, y) z <- perceptron(y, r, g, x) yp <- evaluaPuntos(x, z$g) print(paste("n: ", z$n)) print(paste("mean: ", mean(yp != y))) salida[i, ] <- c(z$n, mean(yp != y)) } return(salida) }
# GDAL Bug - https://github.com/jhollist/elevatr/issues/62 library(sf) library(elevatr) library(dplyr) point_elev <- tibble(long = c(-89, -88.5), lat = c(43.5, 44)) %>% sf::st_as_sf(coords = c("long", "lat"), crs = 4326) %>% sf::st_make_grid(square = TRUE, cellsize = 0.1, what = 'centers') %>% sf::st_as_sf() %>% mutate(site_id = paste0('r_', row_number())) %>% elevatr::get_aws_points(z = 9)
/inst/bug.R
no_license
cran/elevatr
R
false
false
436
r
# GDAL Bug - https://github.com/jhollist/elevatr/issues/62 library(sf) library(elevatr) library(dplyr) point_elev <- tibble(long = c(-89, -88.5), lat = c(43.5, 44)) %>% sf::st_as_sf(coords = c("long", "lat"), crs = 4326) %>% sf::st_make_grid(square = TRUE, cellsize = 0.1, what = 'centers') %>% sf::st_as_sf() %>% mutate(site_id = paste0('r_', row_number())) %>% elevatr::get_aws_points(z = 9)
#' easy awesome peak set vis #' TESTING #' seqsetvis allows you to... #' #' 2 steps #' \code{\link{ssvOverlapIntervalSets}}. #' \code{\link{fetchWindowedBigwig}}. #' Otherwise refer to the vignettes to see "_PACKAGE"
/R/seqsetvis.R
no_license
jgordon3/seqsetvis
R
false
false
217
r
#' easy awesome peak set vis #' TESTING #' seqsetvis allows you to... #' #' 2 steps #' \code{\link{ssvOverlapIntervalSets}}. #' \code{\link{fetchWindowedBigwig}}. #' Otherwise refer to the vignettes to see "_PACKAGE"
library(assertive) # assert_* functions to throw errors if variables aren't in the right form a=c(1,2,3,4,5) b=c(1,2,3,4,-5) assert_all_are_positive(a) assert_all_are_positive(b)
/R/assertive-app-1.R
permissive
FabrizioBF/software-testing
R
false
false
183
r
library(assertive) # assert_* functions to throw errors if variables aren't in the right form a=c(1,2,3,4,5) b=c(1,2,3,4,-5) assert_all_are_positive(a) assert_all_are_positive(b)
corsim<-function(x,lambda,mu,missing,told=0,tyoung=0){ x<-sort(x,decreasing=TRUE) #make vector ranks and times n<-length(x)+1 if (told == 0){ rho<- 1-missing/(missing+n) origin<-0 #sample origin conditioned on n and x_1 while (x[1]>origin){ r <- runif(1,0,1) if (lambda>mu) { origin <- log((-lambda * rho - lambda * r^(1/n) + mu * r^(1/n) + lambda * rho * r^(1/n))/(lambda * rho * (-1 + r^(1/n)))) / (lambda - mu) } else { origin<- -(r^(1/n)/(lambda *(-1 + r^(1/n)* rho))) } } if (tyoung==0){ ranks<-0:(length(x)+1) times<-c(origin,x,0) } else { missyoung<-length(which(x<tyoung)) times<-c(0,x[1:(length(x)-missyoung)],tyoung) ranks<-1:length(times) ranks<-ranks-1 } } else { missold<-length(which(x>told)) missyoung<-length(which(x<tyoung)) if (missold<length(x)){ times<-c(told,x[(missold+1):(length(x)-missyoung)],tyoung) } else { times<-c(told,tyoung) } ranks<-1:length(times) ranks<-ranks+missold-1 } #after times[i] we have ranks[i]+1 lineages while(missing>0){ #distrranks[i]: prob insert between ranks[i] and ranks[i+1] if (length(ranks)>2){ distrranks<-vector() for (i in 2:length(ranks)){ temp <- ranks[i] * (intp1(times[i-1],lambda,mu) - intp1(times[i],lambda,mu)) distrranks<-c(distrranks,temp) } distrranks<-distrranks/sum(distrranks) for (i in 2:length(distrranks)){distrranks[i]<-distrranks[i]+distrranks[i-1]} r <- runif(1,0,1) addrank<-min(which(distrranks>r)) # addrank=k means adding between ranks[k] and ranks[k+1] in time # means adding to ranks[k+1] lineages } else {addrank<-1} r <- runif(1,0,1) const<-intp1(times[addrank],lambda,mu) - intp1(times[(addrank+1)],lambda,mu) temp<- intp1(times[(addrank+1)],lambda,mu)/const xnew<- 1/(mu-lambda)*log((1-(r+temp)*const*lambda)/(1-(r+temp)*const*mu)) x<-c(x,xnew) x<-sort(x,decreasing=TRUE) missing<-missing-1 } x }
/R/corsim.R
no_license
tanja819/TreeSim
R
false
false
1,837
r
corsim<-function(x,lambda,mu,missing,told=0,tyoung=0){ x<-sort(x,decreasing=TRUE) #make vector ranks and times n<-length(x)+1 if (told == 0){ rho<- 1-missing/(missing+n) origin<-0 #sample origin conditioned on n and x_1 while (x[1]>origin){ r <- runif(1,0,1) if (lambda>mu) { origin <- log((-lambda * rho - lambda * r^(1/n) + mu * r^(1/n) + lambda * rho * r^(1/n))/(lambda * rho * (-1 + r^(1/n)))) / (lambda - mu) } else { origin<- -(r^(1/n)/(lambda *(-1 + r^(1/n)* rho))) } } if (tyoung==0){ ranks<-0:(length(x)+1) times<-c(origin,x,0) } else { missyoung<-length(which(x<tyoung)) times<-c(0,x[1:(length(x)-missyoung)],tyoung) ranks<-1:length(times) ranks<-ranks-1 } } else { missold<-length(which(x>told)) missyoung<-length(which(x<tyoung)) if (missold<length(x)){ times<-c(told,x[(missold+1):(length(x)-missyoung)],tyoung) } else { times<-c(told,tyoung) } ranks<-1:length(times) ranks<-ranks+missold-1 } #after times[i] we have ranks[i]+1 lineages while(missing>0){ #distrranks[i]: prob insert between ranks[i] and ranks[i+1] if (length(ranks)>2){ distrranks<-vector() for (i in 2:length(ranks)){ temp <- ranks[i] * (intp1(times[i-1],lambda,mu) - intp1(times[i],lambda,mu)) distrranks<-c(distrranks,temp) } distrranks<-distrranks/sum(distrranks) for (i in 2:length(distrranks)){distrranks[i]<-distrranks[i]+distrranks[i-1]} r <- runif(1,0,1) addrank<-min(which(distrranks>r)) # addrank=k means adding between ranks[k] and ranks[k+1] in time # means adding to ranks[k+1] lineages } else {addrank<-1} r <- runif(1,0,1) const<-intp1(times[addrank],lambda,mu) - intp1(times[(addrank+1)],lambda,mu) temp<- intp1(times[(addrank+1)],lambda,mu)/const xnew<- 1/(mu-lambda)*log((1-(r+temp)*const*lambda)/(1-(r+temp)*const*mu)) x<-c(x,xnew) x<-sort(x,decreasing=TRUE) missing<-missing-1 } x }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/ComparatorUnderSampling.R \name{sampleComparator} \alias{sampleComparator} \title{Sample the comparator group down} \usage{ sampleComparator(cohortMethodData, comparatorToTreatedRatio = 2) } \arguments{ \item{cohortMethodData}{The original cohortMethodData.} \item{comparatorToTreatedRatio}{The ratio between comparator and treated group.} } \value{ An object of type \code{cohortMethodData} with the sampled populations. } \description{ Sample the comparator group down } \details{ When the comparator group is extremely large, it may be more efficient to only use a sample to fit the propensity model. This function creates a new cohortMethodData object where to comparator group is sampled down to a size relative to the treated group. }
/man/sampleComparator.Rd
permissive
tdbennett/CohortMethod
R
false
false
829
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/ComparatorUnderSampling.R \name{sampleComparator} \alias{sampleComparator} \title{Sample the comparator group down} \usage{ sampleComparator(cohortMethodData, comparatorToTreatedRatio = 2) } \arguments{ \item{cohortMethodData}{The original cohortMethodData.} \item{comparatorToTreatedRatio}{The ratio between comparator and treated group.} } \value{ An object of type \code{cohortMethodData} with the sampled populations. } \description{ Sample the comparator group down } \details{ When the comparator group is extremely large, it may be more efficient to only use a sample to fit the propensity model. This function creates a new cohortMethodData object where to comparator group is sampled down to a size relative to the treated group. }
#================== #AssignDevTypes.R #================== # #<doc> # ## AssignDevTypes Module #### November 6, 2018 # #This module assigns households to development types: Urban (located within an urbanized area boundary) and Rural (located outside of an urbanized area boundary). # ### Model Parameter Estimation # #This module has no parameters. Households are assigned to development types based on input assumptions on the proportions of housing units that are urban by Bzone and housing type. # ### How the Module Works # #The user specifies the proportion of housing units that are *Urban* (located within an urbanized area boundary) by housing type (SF, MF, GQ) and Bzone. Each household is randomly assigned as *Urban* or *Rural* based on its housing type and Bzone and the urban/rural proportions of housing units of that housing type in that Bzone. # #</doc> #================================= #Packages used in code development #================================= #Uncomment following lines during code development. Recomment when done. # library(visioneval) #============================================= #SECTION 1: ESTIMATE AND SAVE MODEL PARAMETERS #============================================= #This module has no parameters. Households are assigned to development types #based on input assumptions on the proportions of housing units that are urban #by Bzone and housing type. #================================================ #SECTION 2: DEFINE THE MODULE DATA SPECIFICATIONS #================================================ #Define the data specifications #------------------------------ AssignDevTypesSpecifications <- list( #Level of geography module is applied at RunBy = "Region", #Specify new tables to be created by Inp if any #Specify new tables to be created by Set if any #Specify input data Inp = items( item( NAME = items( "PropUrbanSFDU", "PropUrbanMFDU", "PropUrbanGQDU"), FILE = "bzone_urban_du_proportions.csv", TABLE = "Bzone", GROUP = "Year", TYPE = "double", UNITS = "NA", NAVALUE = -1, SIZE = 0, PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "", UNLIKELY = "", TOTAL = "", DESCRIPTION = items( "Proportion of single family dwelling units located within the urban portion of the zone", "Proportion of multi-family dwelling units located within the urban portion of the zone", "Proportion of group quarters accommodations located within the urban portion of the zone" ) ) ), #Specify data to be loaded from data store Get = items( item( NAME = "Marea", TABLE = "Marea", GROUP = "Year", TYPE = "character", UNITS = "ID", PROHIBIT = "", ISELEMENTOF = "" ), item( NAME = "Bzone", TABLE = "Bzone", GROUP = "Year", TYPE = "character", UNITS = "ID", PROHIBIT = "", ISELEMENTOF = "" ), item( NAME = "Marea", TABLE = "Bzone", GROUP = "Year", TYPE = "character", UNITS = "ID", PROHIBIT = "", ISELEMENTOF = "" ), item( NAME = items( "PropUrbanSFDU", "PropUrbanMFDU", "PropUrbanGQDU"), TABLE = "Bzone", GROUP = "Year", TYPE = "double", UNITS = "NA", PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "" ), item( NAME = "HhId", TABLE = "Household", GROUP = "Year", TYPE = "character", UNITS = "ID", PROHIBIT = "", ISELEMENTOF = "" ), item( NAME = "HouseType", TABLE = "Household", GROUP = "Year", TYPE = "character", UNITS = "category", PROHIBIT = "", ISELEMENTOF = c("SF", "MF", "GQ") ), item( NAME = "Bzone", TABLE = "Household", GROUP = "Year", TYPE = "character", UNITS = "ID", PROHIBIT = "", ISELEMENTOF = "" ), item( NAME = "HhSize", TABLE = "Household", GROUP = "Year", TYPE = "people", UNITS = "PRSN", PROHIBIT = c("NA", "<= 0"), ISELEMENTOF = "" ), item( NAME = "Income", TABLE = "Household", GROUP = "Year", TYPE = "currency", UNITS = "USD.2010", PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "" ) ), #Specify data to saved in the data store Set = items( item( NAME = "DevType", TABLE = "Household", GROUP = "Year", TYPE = "character", UNITS = "category", NAVALUE = "NA", PROHIBIT = "NA", ISELEMENTOF = c("Urban", "Rural"), SIZE = 5, DESCRIPTION = "Development type (Urban or Rural) of the place where the household resides" ), item( NAME = "Marea", TABLE = "Household", GROUP = "Year", TYPE = "character", UNITS = "ID", NAVALUE = "NA", PROHIBIT = "", ISELEMENTOF = "", DESCRIPTION = "Name of metropolitan area (Marea) that household is in or NA if none" ), item( NAME = "UrbanPop", TABLE = "Bzone", GROUP = "Year", TYPE = "people", UNITS = "PRSN", NAVALUE = -1, PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "", SIZE = 0, DESCRIPTION = "Urbanized area population in the Bzone" ), item( NAME = "RuralPop", TABLE = "Bzone", GROUP = "Year", TYPE = "people", UNITS = "PRSN", NAVALUE = -1, PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "", SIZE = 0, DESCRIPTION = "Rural (i.e. non-urbanized area) population in the Bzone" ), item( NAME = "UrbanPop", TABLE = "Marea", GROUP = "Year", TYPE = "people", UNITS = "PRSN", NAVALUE = -1, PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "", SIZE = 0, DESCRIPTION = "Urbanized area population in the Marea (metropolitan area)" ), item( NAME = "RuralPop", TABLE = "Marea", GROUP = "Year", TYPE = "people", UNITS = "PRSN", NAVALUE = -1, PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "", SIZE = 0, DESCRIPTION = "Rural (i.e. non-urbanized area) population in the Marea (metropolitan area)" ), item( NAME = "UrbanIncome", TABLE = "Marea", GROUP = "Year", TYPE = "currency", UNITS = "USD.2010", NAVALUE = -1, PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "", SIZE = 0, DESCRIPTION = "Total household income of the urbanized area population in the Marea (metropolitan area)" ), item( NAME = "RuralIncome", TABLE = "Marea", GROUP = "Year", TYPE = "currency", UNITS = "USD.2010", NAVALUE = -1, PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "", SIZE = 0, DESCRIPTION = "Total household income of the rural (i.e. non-urbanized area) population in the Marea (metropolitan area)" ) ) ) #Save the data specifications list #--------------------------------- #' Specifications list for AssignDevTypes module #' #' A list containing specifications for the AssignDevTypes module. #' #' @format A list containing 4 components: #' \describe{ #' \item{RunBy}{the level of geography that the module is run at} #' \item{Inp}{scenario input data to be loaded into the datastore for this #' module} #' \item{Get}{module inputs to be read from the datastore} #' \item{Set}{module outputs to be written to the datastore} #' } #' @source AssignDevTypes.R script. "AssignDevTypesSpecifications" usethis::use_data(AssignDevTypesSpecifications, overwrite = TRUE) #======================================================= #SECTION 3: DEFINE FUNCTIONS THAT IMPLEMENT THE SUBMODEL #======================================================= #This function assigns a development type - Urban and Rural - to each household #based on the household's housing type and Bzone and the proportion of the #housing units of the housing type in the Bzone that are located in the urban #area. # TestDat_ <- testModule( # ModuleName = "AssignDevTypes", # LoadDatastore = TRUE, # SaveDatastore = TRUE, # DoRun = FALSE # ) # L <- TestDat_$L #Main module function that assigns a development type to each household #---------------------------------------------------------------------- #' Main module function to assign a development type to each household. #' #' \code{AssignDevTypes} assigns a development type to each household. #' #' This function assigns a development type to each household based on the #' household housing type and Bzone and input assumptions about the proportion #' of housing units by housing type and Bzone that are urban. #' #' @param L A list containing the components listed in the Get specifications #' for the module. #' @return A list containing the components specified in the Set #' specifications for the module. #' @name AssignDevTypes #' @import visioneval stats #' @export AssignDevTypes <- function(L) { #Set up #------ #Fix seed as synthesis involves sampling set.seed(L$G$Seed) #Calculate the number of households NumHh <- length(L$Year$Household[[1]]) #Define a vector of development types Dt <- c("Urban", "Rural") #Define a vector of housing types Ht <- c("SF", "MF", "GQ") #Define a vector of Bzones Bz <- L$Year$Bzone$Bzone #Define a vector of Mareas Ma <- L$Year$Marea$Marea #Assign development types #------------------------ #Create matrix of urban proportions by Bzone and housing type PropNames <- c("PropUrbanSFDU", "PropUrbanMFDU", "PropUrbanGQDU") UrbanProp_BzHt <- as.matrix(data.frame(L$Year$Bzone[PropNames])) rm(PropNames) colnames(UrbanProp_BzHt) <- Ht rownames(UrbanProp_BzHt) <- Bz #Identify urban probability for each household UrbanProb_ <- UrbanProp_BzHt[cbind(L$Year$Household$Bzone, L$Year$Household$HouseType)] #Sample to identify development type DevType_ <- rep("Rural", NumHh) DevType_[runif(NumHh) <= UrbanProb_] <- "Urban" #Identify Marea Marea_ <- L$Year$Bzone$Marea[(match(L$Year$Household$Bzone, L$Year$Bzone$Bzone))] #Calculate urban and rural population by Bzone #--------------------------------------------- Pop_BzDt <- tapply(L$Year$Household$HhSize, list(L$Year$Household$Bzone, DevType_), sum) Pop_BzDt[is.na(Pop_BzDt)] <- 0 #Calculate urban and rural population and total household income by Marea #------------------------------------------------------------------------ Pop_MaDt <- tapply(L$Year$Household$HhSize, list(Marea_, DevType_), sum) Pop_MaDt[is.na(Pop_MaDt)] <- 0 Income_MaDt <- tapply(L$Year$Household$Income, list(Marea_, DevType_), sum) Income_MaDt[is.na(Income_MaDt)] <- 0 #Return list of results #---------------------- Out_ls <- initDataList() Out_ls$Year$Household$DevType <- DevType_ Out_ls$Year$Household$Marea <- Marea_ attributes(Out_ls$Year$Household$Marea)$SIZE <- max(nchar(Marea_[!is.na(Marea_)])) Out_ls$Year$Bzone <- list( UrbanPop = unname(Pop_BzDt[Bz,"Urban"]), RuralPop = unname(Pop_BzDt[Bz,"Rural"]) ) Out_ls$Year$Marea <- list( UrbanPop = unname(Pop_MaDt[Ma,"Urban"]), RuralPop = unname(Pop_MaDt[Ma,"Rural"]), UrbanIncome = unname(Income_MaDt[Ma,"Urban"]), RuralIncome = unname(Income_MaDt[Ma,"Rural"]) ) Out_ls } #=============================================================== #SECTION 4: MODULE DOCUMENTATION AND AUXILLIARY DEVELOPMENT CODE #=============================================================== #Run module automatic documentation #---------------------------------- documentModule("AssignDevTypes") #Test code to check specifications, loading inputs, and whether datastore #contains data needed to run module. Return input list (L) to use for developing #module functions #------------------------------------------------------------------------------- # TestDat_ <- testModule( # ModuleName = "AssignDevTypes", # LoadDatastore = TRUE, # SaveDatastore = TRUE, # DoRun = FALSE # ) # L <- TestDat_$L #Test code to check everything including running the module and checking whether #the outputs are consistent with the 'Set' specifications #------------------------------------------------------------------------------- # TestDat_ <- testModule( # ModuleName = "AssignDevTypes", # LoadDatastore = TRUE, # SaveDatastore = TRUE, # DoRun = TRUE # )
/sources/modules/VELandUse/R/AssignDevTypes.R
permissive
rickdonnelly/VisionEval-Dev
R
false
false
12,554
r
#================== #AssignDevTypes.R #================== # #<doc> # ## AssignDevTypes Module #### November 6, 2018 # #This module assigns households to development types: Urban (located within an urbanized area boundary) and Rural (located outside of an urbanized area boundary). # ### Model Parameter Estimation # #This module has no parameters. Households are assigned to development types based on input assumptions on the proportions of housing units that are urban by Bzone and housing type. # ### How the Module Works # #The user specifies the proportion of housing units that are *Urban* (located within an urbanized area boundary) by housing type (SF, MF, GQ) and Bzone. Each household is randomly assigned as *Urban* or *Rural* based on its housing type and Bzone and the urban/rural proportions of housing units of that housing type in that Bzone. # #</doc> #================================= #Packages used in code development #================================= #Uncomment following lines during code development. Recomment when done. # library(visioneval) #============================================= #SECTION 1: ESTIMATE AND SAVE MODEL PARAMETERS #============================================= #This module has no parameters. Households are assigned to development types #based on input assumptions on the proportions of housing units that are urban #by Bzone and housing type. #================================================ #SECTION 2: DEFINE THE MODULE DATA SPECIFICATIONS #================================================ #Define the data specifications #------------------------------ AssignDevTypesSpecifications <- list( #Level of geography module is applied at RunBy = "Region", #Specify new tables to be created by Inp if any #Specify new tables to be created by Set if any #Specify input data Inp = items( item( NAME = items( "PropUrbanSFDU", "PropUrbanMFDU", "PropUrbanGQDU"), FILE = "bzone_urban_du_proportions.csv", TABLE = "Bzone", GROUP = "Year", TYPE = "double", UNITS = "NA", NAVALUE = -1, SIZE = 0, PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "", UNLIKELY = "", TOTAL = "", DESCRIPTION = items( "Proportion of single family dwelling units located within the urban portion of the zone", "Proportion of multi-family dwelling units located within the urban portion of the zone", "Proportion of group quarters accommodations located within the urban portion of the zone" ) ) ), #Specify data to be loaded from data store Get = items( item( NAME = "Marea", TABLE = "Marea", GROUP = "Year", TYPE = "character", UNITS = "ID", PROHIBIT = "", ISELEMENTOF = "" ), item( NAME = "Bzone", TABLE = "Bzone", GROUP = "Year", TYPE = "character", UNITS = "ID", PROHIBIT = "", ISELEMENTOF = "" ), item( NAME = "Marea", TABLE = "Bzone", GROUP = "Year", TYPE = "character", UNITS = "ID", PROHIBIT = "", ISELEMENTOF = "" ), item( NAME = items( "PropUrbanSFDU", "PropUrbanMFDU", "PropUrbanGQDU"), TABLE = "Bzone", GROUP = "Year", TYPE = "double", UNITS = "NA", PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "" ), item( NAME = "HhId", TABLE = "Household", GROUP = "Year", TYPE = "character", UNITS = "ID", PROHIBIT = "", ISELEMENTOF = "" ), item( NAME = "HouseType", TABLE = "Household", GROUP = "Year", TYPE = "character", UNITS = "category", PROHIBIT = "", ISELEMENTOF = c("SF", "MF", "GQ") ), item( NAME = "Bzone", TABLE = "Household", GROUP = "Year", TYPE = "character", UNITS = "ID", PROHIBIT = "", ISELEMENTOF = "" ), item( NAME = "HhSize", TABLE = "Household", GROUP = "Year", TYPE = "people", UNITS = "PRSN", PROHIBIT = c("NA", "<= 0"), ISELEMENTOF = "" ), item( NAME = "Income", TABLE = "Household", GROUP = "Year", TYPE = "currency", UNITS = "USD.2010", PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "" ) ), #Specify data to saved in the data store Set = items( item( NAME = "DevType", TABLE = "Household", GROUP = "Year", TYPE = "character", UNITS = "category", NAVALUE = "NA", PROHIBIT = "NA", ISELEMENTOF = c("Urban", "Rural"), SIZE = 5, DESCRIPTION = "Development type (Urban or Rural) of the place where the household resides" ), item( NAME = "Marea", TABLE = "Household", GROUP = "Year", TYPE = "character", UNITS = "ID", NAVALUE = "NA", PROHIBIT = "", ISELEMENTOF = "", DESCRIPTION = "Name of metropolitan area (Marea) that household is in or NA if none" ), item( NAME = "UrbanPop", TABLE = "Bzone", GROUP = "Year", TYPE = "people", UNITS = "PRSN", NAVALUE = -1, PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "", SIZE = 0, DESCRIPTION = "Urbanized area population in the Bzone" ), item( NAME = "RuralPop", TABLE = "Bzone", GROUP = "Year", TYPE = "people", UNITS = "PRSN", NAVALUE = -1, PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "", SIZE = 0, DESCRIPTION = "Rural (i.e. non-urbanized area) population in the Bzone" ), item( NAME = "UrbanPop", TABLE = "Marea", GROUP = "Year", TYPE = "people", UNITS = "PRSN", NAVALUE = -1, PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "", SIZE = 0, DESCRIPTION = "Urbanized area population in the Marea (metropolitan area)" ), item( NAME = "RuralPop", TABLE = "Marea", GROUP = "Year", TYPE = "people", UNITS = "PRSN", NAVALUE = -1, PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "", SIZE = 0, DESCRIPTION = "Rural (i.e. non-urbanized area) population in the Marea (metropolitan area)" ), item( NAME = "UrbanIncome", TABLE = "Marea", GROUP = "Year", TYPE = "currency", UNITS = "USD.2010", NAVALUE = -1, PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "", SIZE = 0, DESCRIPTION = "Total household income of the urbanized area population in the Marea (metropolitan area)" ), item( NAME = "RuralIncome", TABLE = "Marea", GROUP = "Year", TYPE = "currency", UNITS = "USD.2010", NAVALUE = -1, PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "", SIZE = 0, DESCRIPTION = "Total household income of the rural (i.e. non-urbanized area) population in the Marea (metropolitan area)" ) ) ) #Save the data specifications list #--------------------------------- #' Specifications list for AssignDevTypes module #' #' A list containing specifications for the AssignDevTypes module. #' #' @format A list containing 4 components: #' \describe{ #' \item{RunBy}{the level of geography that the module is run at} #' \item{Inp}{scenario input data to be loaded into the datastore for this #' module} #' \item{Get}{module inputs to be read from the datastore} #' \item{Set}{module outputs to be written to the datastore} #' } #' @source AssignDevTypes.R script. "AssignDevTypesSpecifications" usethis::use_data(AssignDevTypesSpecifications, overwrite = TRUE) #======================================================= #SECTION 3: DEFINE FUNCTIONS THAT IMPLEMENT THE SUBMODEL #======================================================= #This function assigns a development type - Urban and Rural - to each household #based on the household's housing type and Bzone and the proportion of the #housing units of the housing type in the Bzone that are located in the urban #area. # TestDat_ <- testModule( # ModuleName = "AssignDevTypes", # LoadDatastore = TRUE, # SaveDatastore = TRUE, # DoRun = FALSE # ) # L <- TestDat_$L #Main module function that assigns a development type to each household #---------------------------------------------------------------------- #' Main module function to assign a development type to each household. #' #' \code{AssignDevTypes} assigns a development type to each household. #' #' This function assigns a development type to each household based on the #' household housing type and Bzone and input assumptions about the proportion #' of housing units by housing type and Bzone that are urban. #' #' @param L A list containing the components listed in the Get specifications #' for the module. #' @return A list containing the components specified in the Set #' specifications for the module. #' @name AssignDevTypes #' @import visioneval stats #' @export AssignDevTypes <- function(L) { #Set up #------ #Fix seed as synthesis involves sampling set.seed(L$G$Seed) #Calculate the number of households NumHh <- length(L$Year$Household[[1]]) #Define a vector of development types Dt <- c("Urban", "Rural") #Define a vector of housing types Ht <- c("SF", "MF", "GQ") #Define a vector of Bzones Bz <- L$Year$Bzone$Bzone #Define a vector of Mareas Ma <- L$Year$Marea$Marea #Assign development types #------------------------ #Create matrix of urban proportions by Bzone and housing type PropNames <- c("PropUrbanSFDU", "PropUrbanMFDU", "PropUrbanGQDU") UrbanProp_BzHt <- as.matrix(data.frame(L$Year$Bzone[PropNames])) rm(PropNames) colnames(UrbanProp_BzHt) <- Ht rownames(UrbanProp_BzHt) <- Bz #Identify urban probability for each household UrbanProb_ <- UrbanProp_BzHt[cbind(L$Year$Household$Bzone, L$Year$Household$HouseType)] #Sample to identify development type DevType_ <- rep("Rural", NumHh) DevType_[runif(NumHh) <= UrbanProb_] <- "Urban" #Identify Marea Marea_ <- L$Year$Bzone$Marea[(match(L$Year$Household$Bzone, L$Year$Bzone$Bzone))] #Calculate urban and rural population by Bzone #--------------------------------------------- Pop_BzDt <- tapply(L$Year$Household$HhSize, list(L$Year$Household$Bzone, DevType_), sum) Pop_BzDt[is.na(Pop_BzDt)] <- 0 #Calculate urban and rural population and total household income by Marea #------------------------------------------------------------------------ Pop_MaDt <- tapply(L$Year$Household$HhSize, list(Marea_, DevType_), sum) Pop_MaDt[is.na(Pop_MaDt)] <- 0 Income_MaDt <- tapply(L$Year$Household$Income, list(Marea_, DevType_), sum) Income_MaDt[is.na(Income_MaDt)] <- 0 #Return list of results #---------------------- Out_ls <- initDataList() Out_ls$Year$Household$DevType <- DevType_ Out_ls$Year$Household$Marea <- Marea_ attributes(Out_ls$Year$Household$Marea)$SIZE <- max(nchar(Marea_[!is.na(Marea_)])) Out_ls$Year$Bzone <- list( UrbanPop = unname(Pop_BzDt[Bz,"Urban"]), RuralPop = unname(Pop_BzDt[Bz,"Rural"]) ) Out_ls$Year$Marea <- list( UrbanPop = unname(Pop_MaDt[Ma,"Urban"]), RuralPop = unname(Pop_MaDt[Ma,"Rural"]), UrbanIncome = unname(Income_MaDt[Ma,"Urban"]), RuralIncome = unname(Income_MaDt[Ma,"Rural"]) ) Out_ls } #=============================================================== #SECTION 4: MODULE DOCUMENTATION AND AUXILLIARY DEVELOPMENT CODE #=============================================================== #Run module automatic documentation #---------------------------------- documentModule("AssignDevTypes") #Test code to check specifications, loading inputs, and whether datastore #contains data needed to run module. Return input list (L) to use for developing #module functions #------------------------------------------------------------------------------- # TestDat_ <- testModule( # ModuleName = "AssignDevTypes", # LoadDatastore = TRUE, # SaveDatastore = TRUE, # DoRun = FALSE # ) # L <- TestDat_$L #Test code to check everything including running the module and checking whether #the outputs are consistent with the 'Set' specifications #------------------------------------------------------------------------------- # TestDat_ <- testModule( # ModuleName = "AssignDevTypes", # LoadDatastore = TRUE, # SaveDatastore = TRUE, # DoRun = TRUE # )
# creating a data frame days <- c('mon','tue','wed','thur','fri') temp <- c(22.2,21,23,24.3,25) rain <- c(TRUE,TRUE,FALSE,FALSE,TRUE) #pass in the vectors : df <- data.frame(days,temp,rain) df str(df) library(dplyr)
/bringing files together.R
no_license
Dynamicideas/newme
R
false
false
239
r
# creating a data frame days <- c('mon','tue','wed','thur','fri') temp <- c(22.2,21,23,24.3,25) rain <- c(TRUE,TRUE,FALSE,FALSE,TRUE) #pass in the vectors : df <- data.frame(days,temp,rain) df str(df) library(dplyr)
#' make a grouping list #' #' description tables with grouping requires a list of indexes to #' group on. Often this should be equivalent to categorization #' according to some grouping variable. If so, the 'glist' needed #' can be created by either specifying that variable or the name #' of that variable in data source 'ref'. #' @title make glist #' @param x object #' @param ref reference, a data frame to get \code{x} from (if character) else a #' data frame or vector to compare with lengthwise #' @param max.levels if the number of groups exceed this level stop. #' @export make_glist <- function(x, ref = NULL, max.levels = 25){ if(!is.null(ref)){ if(is.data.frame(ref)){ if(is.character(x)){ x <- ref[[x]] } else { if(length(x) != nrow(ref)){ stop("[make_glist] 'x' not a fit for the reference") } } } else { if(length(x) != length(ref)){ stop("[make_glist] 'x' not equal in length to reference") } } } y <- as.factor(x) if(length(levels(y)) > max.levels){ stop("[make_glist] the number of levels exceed 'max.levels'") } g <- as.list(NULL) for(k in levels(y)){ g[[k]] <- y == k } g } #' factorize a glist #' #' reverse-engineer a categorical variable from glist, if possible #' @param glist a list of indices #' @param as.factor return a factor object? #' @param reverse.levels levels in order of glist? #' @export factorize_glist <- function(glist, as.factor = FALSE, reverse.levels = FALSE){ g <- as.data.frame(glist) rS <- rowSums(g) if(any(is.na(rS)) | any(stats::na.omit(rS) != 1)){ text1 <- paste0("[descripteur::factorize_glist]: The grouping in glist", " is not equivalent to a categorical variable") ss <- all(rowSums(g, na.rm = TRUE) <= 1) text2 <- if(ss){ "\n -- But there may be a natural subset that is!\n" } else NULL stop(paste0(text1, text2)) } else { r <- g for(k in seq_along(g)){ r[[k]] <- ifelse(g[[k]], names(g)[k], "") } ret <- apply(X = r, MARGIN = 1, FUN = paste0, collapse = "") if(as.factor){ lev <- if(reverse.levels) rev(names(g)) else names(g) factor(ret, levels = lev) } else { ret } } }
/R/glist-functions.R
no_license
renlund/descripteur
R
false
false
2,486
r
#' make a grouping list #' #' description tables with grouping requires a list of indexes to #' group on. Often this should be equivalent to categorization #' according to some grouping variable. If so, the 'glist' needed #' can be created by either specifying that variable or the name #' of that variable in data source 'ref'. #' @title make glist #' @param x object #' @param ref reference, a data frame to get \code{x} from (if character) else a #' data frame or vector to compare with lengthwise #' @param max.levels if the number of groups exceed this level stop. #' @export make_glist <- function(x, ref = NULL, max.levels = 25){ if(!is.null(ref)){ if(is.data.frame(ref)){ if(is.character(x)){ x <- ref[[x]] } else { if(length(x) != nrow(ref)){ stop("[make_glist] 'x' not a fit for the reference") } } } else { if(length(x) != length(ref)){ stop("[make_glist] 'x' not equal in length to reference") } } } y <- as.factor(x) if(length(levels(y)) > max.levels){ stop("[make_glist] the number of levels exceed 'max.levels'") } g <- as.list(NULL) for(k in levels(y)){ g[[k]] <- y == k } g } #' factorize a glist #' #' reverse-engineer a categorical variable from glist, if possible #' @param glist a list of indices #' @param as.factor return a factor object? #' @param reverse.levels levels in order of glist? #' @export factorize_glist <- function(glist, as.factor = FALSE, reverse.levels = FALSE){ g <- as.data.frame(glist) rS <- rowSums(g) if(any(is.na(rS)) | any(stats::na.omit(rS) != 1)){ text1 <- paste0("[descripteur::factorize_glist]: The grouping in glist", " is not equivalent to a categorical variable") ss <- all(rowSums(g, na.rm = TRUE) <= 1) text2 <- if(ss){ "\n -- But there may be a natural subset that is!\n" } else NULL stop(paste0(text1, text2)) } else { r <- g for(k in seq_along(g)){ r[[k]] <- ifelse(g[[k]], names(g)[k], "") } ret <- apply(X = r, MARGIN = 1, FUN = paste0, collapse = "") if(as.factor){ lev <- if(reverse.levels) rev(names(g)) else names(g) factor(ret, levels = lev) } else { ret } } }
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.22623810986356e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L) result <- do.call(myTAI:::cpp_bootMatrix,testlist) str(result)
/myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615768201-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
1,804
r
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.22623810986356e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L) result <- do.call(myTAI:::cpp_bootMatrix,testlist) str(result)
/classifier_hyperparameter_gridsearch.R
no_license
jequihua/short_text_classification
R
false
false
4,575
r
#' @export makeRLearner.regr.gpfit = function(){ makeRLearnerRegr( cl = "regr.gpfit", package = "GPfit", par.set = makeParamSet( makeNumericVectorLearnerParam(id = "control", len = 3, lower = c(1, 1, 1)), makeNumericLearnerParam(id = "nug_thres", default = 20, lower = 10, upper = 25), makeLogicalLearnerParam(id = "trace", default = FALSE, tunable = FALSE), makeIntegerLearnerParam(id = "maxit", default = 100, lower = 0), makeUntypedLearnerParam(id = "optim_start", tunable = FALSE), makeLogicalLearnerParam(id = "scale", default = TRUE) ), properties = c("numerics"), name = "Gaussian Process Model fitting", short.name = "gpfit", note = "As the optimization routine assumes that the inputs are scaled to the unit hypercube [0,1]^d, the input gets scaled for each variable by default. If this is not wanted, scale = FALSE has to be set." ) } #' @export trainLearner.regr.gpfit = function(.learner, .task, .subset, scale = TRUE, ...) { d = getTaskData(.task, .subset, target.extra = TRUE) low = apply(d$data, 2, min) high = apply(d$data, 2, max) not.const = colnames(d$data)[high != low] if (scale) { d$data[,not.const] = apply(d$data[,not.const], 2, function(x) x = (x - min(x)) / (max(x) - min(x))) res = GPfit::GP_fit(d$data[, not.const], d$target, ...) res = attachTrainingInfo(res, list(scaled = TRUE, not.const = not.const, high = high, low = low)) return(res) } else { res = GPfit::GP_fit(d$data[, not.const], d$target, ...) res = attachTrainingInfo(res, list(scaled = FALSE, not.const = not.const)) return(res) } } #' @export predictLearner.regr.gpfit = function(.learner, .model, .newdata, ...) { tr.info = getTrainingInfo(.model) if (tr.info$scaled) { for (col.name in tr.info$not.const) { .newdata[,col.name] = (.newdata[,col.name] - tr.info$low[col.name]) / (tr.info$high[col.name] - tr.info$low[col.name]) } } predict(.model$learner.model, xnew = .newdata[, tr.info$not.const])$Y_hat }
/R/RLearner_regr_gpfit.R
no_license
Dotterbart/mlr
R
false
false
2,069
r
#' @export makeRLearner.regr.gpfit = function(){ makeRLearnerRegr( cl = "regr.gpfit", package = "GPfit", par.set = makeParamSet( makeNumericVectorLearnerParam(id = "control", len = 3, lower = c(1, 1, 1)), makeNumericLearnerParam(id = "nug_thres", default = 20, lower = 10, upper = 25), makeLogicalLearnerParam(id = "trace", default = FALSE, tunable = FALSE), makeIntegerLearnerParam(id = "maxit", default = 100, lower = 0), makeUntypedLearnerParam(id = "optim_start", tunable = FALSE), makeLogicalLearnerParam(id = "scale", default = TRUE) ), properties = c("numerics"), name = "Gaussian Process Model fitting", short.name = "gpfit", note = "As the optimization routine assumes that the inputs are scaled to the unit hypercube [0,1]^d, the input gets scaled for each variable by default. If this is not wanted, scale = FALSE has to be set." ) } #' @export trainLearner.regr.gpfit = function(.learner, .task, .subset, scale = TRUE, ...) { d = getTaskData(.task, .subset, target.extra = TRUE) low = apply(d$data, 2, min) high = apply(d$data, 2, max) not.const = colnames(d$data)[high != low] if (scale) { d$data[,not.const] = apply(d$data[,not.const], 2, function(x) x = (x - min(x)) / (max(x) - min(x))) res = GPfit::GP_fit(d$data[, not.const], d$target, ...) res = attachTrainingInfo(res, list(scaled = TRUE, not.const = not.const, high = high, low = low)) return(res) } else { res = GPfit::GP_fit(d$data[, not.const], d$target, ...) res = attachTrainingInfo(res, list(scaled = FALSE, not.const = not.const)) return(res) } } #' @export predictLearner.regr.gpfit = function(.learner, .model, .newdata, ...) { tr.info = getTrainingInfo(.model) if (tr.info$scaled) { for (col.name in tr.info$not.const) { .newdata[,col.name] = (.newdata[,col.name] - tr.info$low[col.name]) / (tr.info$high[col.name] - tr.info$low[col.name]) } } predict(.model$learner.model, xnew = .newdata[, tr.info$not.const])$Y_hat }
library(ggplot2) library(lubridate) library(reshape) library(gridExtra) library(RColorBrewer) require(RcolorBrewer) col <- brewer.pal(8, "brBG") # set working directory wd <- setwd("C:/Users/IOtte/Desktop/training/") ### load data iso <- read.csv2("iso_calc_copy.csv", header = T) ta200 <- read.csv("C:/Users/IOtte/Desktop/plot_air_temperatur/iso_ta200_monthly.csv", header = TRUE) ## Sort temperature data ta200 <- melt(ta200) colnames(ta200) <- c("plotID", "date", "ta200") ta200$year <- substr(ta200$date, 26,29) ta200$mon <- substr(ta200$date, 31,32) ta200 <- ta200[, -2] ta200$date <- paste(ta200$year, ta200$mon, sep = "-") ta200 <- ta200[, -3] ta200 <- ta200[, -3] ## Aggregate iso plot data to monthly mean values # build monthly mean values of d18-16, dD_H & dexcess iso.mns <- aggregate(cbind(iso$d18_16, iso$dD_H, iso$d.excess), by = list(substr(iso$date_sample, 1, 7), iso[, 4], iso[, 5], iso[, 6]), FUN = "mean", na.rm = TRUE) colnames(iso.mns) <- c("date", "plotID", "type", "elevation","d18_16", "dD_H", "dexcess") # build monthly sums of amount_mm amnt.smm <- aggregate(iso$amount_mm, by = list(substr(iso$date_sample, 1, 7), iso[, 4], iso[, 5], iso[, 6]), FUN = "sum", na.rm = TRUE) colnames(amnt.smm) <- c("date", "plotID", "type", "elevation", "amount") # merge monthly mean of d18-16 & dD_H and monthly sums of amount_mm iso.mnth <- merge(iso.mns, amnt.smm) ## Merge iso.mns and ta200 to iso.ta200 iso.ta200 <- merge(iso.mnth, ta200) ## subsetting for better facility of instruction #type <- lapply(types, function(i){ # sub <- subset(iso, iso$type == i) #}) ### build plot for presentation ### each plot seperately col.id.rn <- c("#3288bd", "#66c2a5", "#abdda4", "#e6f598", "#fee08b", "#fdae61", "#f46d43", "#d53e4f") leg.rn <- c("fer0", "fpd0", "fpo0", "foc0", "foc6", "flm1", "hom4", "sav5") col.id.fg <- c("#3288bd", "#66c2a5", "#abdda4", "#e6f598", "#fee08b", "#fdae61", "#f46d43") leg.fg <- c("fer0", "fpd0", "fpo0", "foc0", "foc6", "flm1", "nkw1") ## d18O iso.dD <- ggplot(subset(iso.ta200, iso.ta200[, 3] == "rain"), aes(x = date, y = d18_16, group = plotID, colour = plotID)) + geom_line() + scale_color_manual(values = col.id.rn, limits = leg.rn, name = "Plot ID SP1") + ylab( expression(delta^{2}*D ~ "\u2030")) + xlab("") + scale_x_discrete(labels = c("11", "12", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11")) + theme( panel.grid.major = element_line(color = "lightgray", size = 0.01), panel.background = element_rect(fill = NA), panel.border = element_rect(color = "gray", fill = NA)) ## dexcess iso.dexcess <- ggplot(subset(iso.ta200, iso.ta200[, 3] == "rain"), aes(x = date, y = dexcess, group = plotID, colour = plotID)) + geom_line() + scale_color_manual(values = col.id.rn, limits = leg.rn, name = "Plot ID SP1") + ylab( expression(dexcess ~ "\u2030")) + xlab("") + scale_y_reverse() + scale_x_discrete(labels = c("11", "12", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11")) + theme( panel.grid.major = element_line(color = "lightgray", size = 0.01), panel.background = element_rect(fill = NA), panel.border = element_rect(color = "gray", fill = NA)) ## temperature iso.ta.200 <- ggplot(subset(iso.ta200, iso.ta200[, 3] == "rain"), aes(x = date, y = ta200, group = plotID, colour = plotID)) + geom_line() + scale_color_manual(values = col.id.rn, limits = leg.rn, name = "Plot ID SP1") + ylab("ta200 [??C]") + xlab("") + scale_x_discrete(labels = c("11", "12", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11")) + theme( panel.grid.major = element_line(color = "lightgray", size = 0.01), panel.background = element_rect(fill = NA), panel.border = element_rect(color = "gray", fill = NA)) ## amount amount <- ggplot(subset(iso.ta200, iso.ta200[, 3] == "fog"), aes(x = date, y = amount, group = plotID, colour = plotID)) + geom_line() + scale_color_manual(values = col.id.fg, limits = leg.fg, name = "Plot ID SP1") + ylab("fog [mm]") + xlab("") + scale_x_discrete(labels = c("11", "12", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11")) + theme( panel.grid.major = element_line(color = "lightgray", size = 0.01), panel.background = element_rect(fill = NA), panel.border = element_rect(color = "gray", fill = NA)) ## wind direction ## afterwards merging d18.dex.amn.fg <- arrangeGrob(iso.d18, iso.dexcess, amount, ncol = 1, nrow = 3) # print "iso.mns.mnth.amnt.18O" png("out/iso_d18_dex_amn_fg.png", width = 20, height = 30, units = "cm", res = 300, pointsize = 15) print(d18.dex.amn.fg) dev.off()
/dfg_for_kilimanjaro/stable_isotope_analysis/iso_ta200.R
no_license
environmentalinformatics-marburg/magic
R
false
false
5,704
r
library(ggplot2) library(lubridate) library(reshape) library(gridExtra) library(RColorBrewer) require(RcolorBrewer) col <- brewer.pal(8, "brBG") # set working directory wd <- setwd("C:/Users/IOtte/Desktop/training/") ### load data iso <- read.csv2("iso_calc_copy.csv", header = T) ta200 <- read.csv("C:/Users/IOtte/Desktop/plot_air_temperatur/iso_ta200_monthly.csv", header = TRUE) ## Sort temperature data ta200 <- melt(ta200) colnames(ta200) <- c("plotID", "date", "ta200") ta200$year <- substr(ta200$date, 26,29) ta200$mon <- substr(ta200$date, 31,32) ta200 <- ta200[, -2] ta200$date <- paste(ta200$year, ta200$mon, sep = "-") ta200 <- ta200[, -3] ta200 <- ta200[, -3] ## Aggregate iso plot data to monthly mean values # build monthly mean values of d18-16, dD_H & dexcess iso.mns <- aggregate(cbind(iso$d18_16, iso$dD_H, iso$d.excess), by = list(substr(iso$date_sample, 1, 7), iso[, 4], iso[, 5], iso[, 6]), FUN = "mean", na.rm = TRUE) colnames(iso.mns) <- c("date", "plotID", "type", "elevation","d18_16", "dD_H", "dexcess") # build monthly sums of amount_mm amnt.smm <- aggregate(iso$amount_mm, by = list(substr(iso$date_sample, 1, 7), iso[, 4], iso[, 5], iso[, 6]), FUN = "sum", na.rm = TRUE) colnames(amnt.smm) <- c("date", "plotID", "type", "elevation", "amount") # merge monthly mean of d18-16 & dD_H and monthly sums of amount_mm iso.mnth <- merge(iso.mns, amnt.smm) ## Merge iso.mns and ta200 to iso.ta200 iso.ta200 <- merge(iso.mnth, ta200) ## subsetting for better facility of instruction #type <- lapply(types, function(i){ # sub <- subset(iso, iso$type == i) #}) ### build plot for presentation ### each plot seperately col.id.rn <- c("#3288bd", "#66c2a5", "#abdda4", "#e6f598", "#fee08b", "#fdae61", "#f46d43", "#d53e4f") leg.rn <- c("fer0", "fpd0", "fpo0", "foc0", "foc6", "flm1", "hom4", "sav5") col.id.fg <- c("#3288bd", "#66c2a5", "#abdda4", "#e6f598", "#fee08b", "#fdae61", "#f46d43") leg.fg <- c("fer0", "fpd0", "fpo0", "foc0", "foc6", "flm1", "nkw1") ## d18O iso.dD <- ggplot(subset(iso.ta200, iso.ta200[, 3] == "rain"), aes(x = date, y = d18_16, group = plotID, colour = plotID)) + geom_line() + scale_color_manual(values = col.id.rn, limits = leg.rn, name = "Plot ID SP1") + ylab( expression(delta^{2}*D ~ "\u2030")) + xlab("") + scale_x_discrete(labels = c("11", "12", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11")) + theme( panel.grid.major = element_line(color = "lightgray", size = 0.01), panel.background = element_rect(fill = NA), panel.border = element_rect(color = "gray", fill = NA)) ## dexcess iso.dexcess <- ggplot(subset(iso.ta200, iso.ta200[, 3] == "rain"), aes(x = date, y = dexcess, group = plotID, colour = plotID)) + geom_line() + scale_color_manual(values = col.id.rn, limits = leg.rn, name = "Plot ID SP1") + ylab( expression(dexcess ~ "\u2030")) + xlab("") + scale_y_reverse() + scale_x_discrete(labels = c("11", "12", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11")) + theme( panel.grid.major = element_line(color = "lightgray", size = 0.01), panel.background = element_rect(fill = NA), panel.border = element_rect(color = "gray", fill = NA)) ## temperature iso.ta.200 <- ggplot(subset(iso.ta200, iso.ta200[, 3] == "rain"), aes(x = date, y = ta200, group = plotID, colour = plotID)) + geom_line() + scale_color_manual(values = col.id.rn, limits = leg.rn, name = "Plot ID SP1") + ylab("ta200 [??C]") + xlab("") + scale_x_discrete(labels = c("11", "12", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11")) + theme( panel.grid.major = element_line(color = "lightgray", size = 0.01), panel.background = element_rect(fill = NA), panel.border = element_rect(color = "gray", fill = NA)) ## amount amount <- ggplot(subset(iso.ta200, iso.ta200[, 3] == "fog"), aes(x = date, y = amount, group = plotID, colour = plotID)) + geom_line() + scale_color_manual(values = col.id.fg, limits = leg.fg, name = "Plot ID SP1") + ylab("fog [mm]") + xlab("") + scale_x_discrete(labels = c("11", "12", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11")) + theme( panel.grid.major = element_line(color = "lightgray", size = 0.01), panel.background = element_rect(fill = NA), panel.border = element_rect(color = "gray", fill = NA)) ## wind direction ## afterwards merging d18.dex.amn.fg <- arrangeGrob(iso.d18, iso.dexcess, amount, ncol = 1, nrow = 3) # print "iso.mns.mnth.amnt.18O" png("out/iso_d18_dex_amn_fg.png", width = 20, height = 30, units = "cm", res = 300, pointsize = 15) print(d18.dex.amn.fg) dev.off()
% Generated by roxygen2 (4.0.1): do not edit by hand \name{ggfluctuation2} \alias{ggfluctuation2} \title{Fluctuation plot} \usage{ ggfluctuation2(table_data, floor = 0, ceiling = max(table_data$freq, na.rm = TRUE)) } \arguments{ \item{table_data}{a table of values, or a data frame with three columns, the last column being frequency} \item{floor}{don't display cells smaller than this value} \item{ceiling}{max value to compare to} } \description{ Create a fluctuation plot. } \details{ A fluctutation diagram is a graphical representation of a contingency table. This fuction currently only supports 2D contingency tables. The function was adopted from experiemntal functions within GGplot2 developed by Hadley Wickham. } \examples{ data(movies, package = "ggplot2") ggfluctuation2(table(movies$Action, movies$Comedy)) ggfluctuation2(table(movies$Action, movies$mpaa)) ggfluctuation2(table(movies[,c("Action", "mpaa")])) ggfluctuation2(table(warpbreaks$breaks, warpbreaks$tension)) } \author{ Hadley Wickham \email{h.wickham@gmail.com}, Barret Schloerke \email{schloerke@gmail.com} } \keyword{hplot}
/man/ggfluctuation2.Rd
no_license
pekkakohonen/ggally
R
false
false
1,107
rd
% Generated by roxygen2 (4.0.1): do not edit by hand \name{ggfluctuation2} \alias{ggfluctuation2} \title{Fluctuation plot} \usage{ ggfluctuation2(table_data, floor = 0, ceiling = max(table_data$freq, na.rm = TRUE)) } \arguments{ \item{table_data}{a table of values, or a data frame with three columns, the last column being frequency} \item{floor}{don't display cells smaller than this value} \item{ceiling}{max value to compare to} } \description{ Create a fluctuation plot. } \details{ A fluctutation diagram is a graphical representation of a contingency table. This fuction currently only supports 2D contingency tables. The function was adopted from experiemntal functions within GGplot2 developed by Hadley Wickham. } \examples{ data(movies, package = "ggplot2") ggfluctuation2(table(movies$Action, movies$Comedy)) ggfluctuation2(table(movies$Action, movies$mpaa)) ggfluctuation2(table(movies[,c("Action", "mpaa")])) ggfluctuation2(table(warpbreaks$breaks, warpbreaks$tension)) } \author{ Hadley Wickham \email{h.wickham@gmail.com}, Barret Schloerke \email{schloerke@gmail.com} } \keyword{hplot}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/EmmixWire.R \name{wj2.permuted} \alias{wj2.permuted} \title{The null distribution} \usage{ wj2.permuted(data, ret, nB = 99, contrast = NULL, seed = 1234) } \arguments{ \item{data}{The dataset, an n by m numeric matrix, where n is number of observations and m the dimension of data} \item{ret}{The return list of function emmixwire} \item{nB}{The number of permutations} \item{contrast}{A two- or three- dimensional vector the contrast(s) for the class differences} \item{seed}{random seed for the permutations.} } \value{ An n by nB matrix with its columns as the statistic Wj for each permutation. } \description{ This function caculates the null distribution of the weighted contrast W_j. } \details{ The number of classes of samples is either two or three, and the default contrast for two classes is c(1, -1), and three classes c(1, 0, -1). } \examples{ data(hedenlc) dat<-hedenlc[seq_len(100),] #for speed set.seed(123456) ret <-emmixwire(dat, g=3, ncov=3, nvcov=1, n1=7, n2=8, debug=1, itmax=20, epsilon=1e-5) ###calculate the W_j wj <- scores.wire(ret, contrast=c(1, -1)) ### the null distribution of W_j wj0 <- wj2.permuted(dat, ret, nB=19) ### the p-values of W_j pv <- pvalue.wire(wj, wj0) } \seealso{ \code{\link{emmixwire}} \code{\link{scores.wire}}. } \keyword{cluster} \keyword{datasets}
/man/wj2.permuted.Rd
no_license
andrewthomasjones/EMMIXcontrasts
R
false
true
1,400
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/EmmixWire.R \name{wj2.permuted} \alias{wj2.permuted} \title{The null distribution} \usage{ wj2.permuted(data, ret, nB = 99, contrast = NULL, seed = 1234) } \arguments{ \item{data}{The dataset, an n by m numeric matrix, where n is number of observations and m the dimension of data} \item{ret}{The return list of function emmixwire} \item{nB}{The number of permutations} \item{contrast}{A two- or three- dimensional vector the contrast(s) for the class differences} \item{seed}{random seed for the permutations.} } \value{ An n by nB matrix with its columns as the statistic Wj for each permutation. } \description{ This function caculates the null distribution of the weighted contrast W_j. } \details{ The number of classes of samples is either two or three, and the default contrast for two classes is c(1, -1), and three classes c(1, 0, -1). } \examples{ data(hedenlc) dat<-hedenlc[seq_len(100),] #for speed set.seed(123456) ret <-emmixwire(dat, g=3, ncov=3, nvcov=1, n1=7, n2=8, debug=1, itmax=20, epsilon=1e-5) ###calculate the W_j wj <- scores.wire(ret, contrast=c(1, -1)) ### the null distribution of W_j wj0 <- wj2.permuted(dat, ret, nB=19) ### the p-values of W_j pv <- pvalue.wire(wj, wj0) } \seealso{ \code{\link{emmixwire}} \code{\link{scores.wire}}. } \keyword{cluster} \keyword{datasets}
install.packages("igraph") library(igraph) # Create data data <- matrix(sample(0:1, 400, replace=TRUE, prob=c(0.8,0.2)), nrow=20) network <- graph_from_adjacency_matrix(data , mode='undirected', diag=F ) # When ploting, we can use different layouts: par(mfrow=c(2,2), mar=c(1,1,1,1)) plot(network, layout=layout.sphere, main="sphere") plot(network, layout=layout.circle, main="circle") plot(network, layout=layout.random, main="random") plot(network, layout=layout.fruchterman.reingold, main="fruchterman.reingold") library(gapminder) # Charge libraries: library(ggplot2) library(gganimate) # Make a ggplot, but add frame=year: one image per year ggplot(gapminder, aes(gdpPercap, lifeExp, size = pop, color = continent)) + geom_point() + scale_x_log10() + theme_bw() + # gganimate specific bits: labs(title = 'Year: {frame_time}', x = 'GDP per capita', y = 'life expectancy') + transition_time(year) + ease_aes('linear') # Save at gif:왜error가 나타날까 anim_save("271-ggplot2-animated-gif-chart-with-gganimate1.gif")
/Statistical Calculation Method/Class/SCM_0316(ggplot gallery).R
no_license
Lee-Eun-Ju/stat_r_practice
R
false
false
1,045
r
install.packages("igraph") library(igraph) # Create data data <- matrix(sample(0:1, 400, replace=TRUE, prob=c(0.8,0.2)), nrow=20) network <- graph_from_adjacency_matrix(data , mode='undirected', diag=F ) # When ploting, we can use different layouts: par(mfrow=c(2,2), mar=c(1,1,1,1)) plot(network, layout=layout.sphere, main="sphere") plot(network, layout=layout.circle, main="circle") plot(network, layout=layout.random, main="random") plot(network, layout=layout.fruchterman.reingold, main="fruchterman.reingold") library(gapminder) # Charge libraries: library(ggplot2) library(gganimate) # Make a ggplot, but add frame=year: one image per year ggplot(gapminder, aes(gdpPercap, lifeExp, size = pop, color = continent)) + geom_point() + scale_x_log10() + theme_bw() + # gganimate specific bits: labs(title = 'Year: {frame_time}', x = 'GDP per capita', y = 'life expectancy') + transition_time(year) + ease_aes('linear') # Save at gif:왜error가 나타날까 anim_save("271-ggplot2-animated-gif-chart-with-gganimate1.gif")
\name{plot.Diffepoce} \alias{plot.Diffepoce} \title{Plot difference of EPOCE estimators between two joint frailty models.} \description{ Plots values of the difference of two Cross-Validated Prognosis Observed Loss (CVPOL) computed with two joint frailty models. Confidence intervals are allowed. } \usage{ \method{plot}{Diffepoce}(x, conf.bands=TRUE, Xlab = "Time", Ylab = "EPOCE difference" , ...) } \arguments{ \item{x}{An object inheriting from \code{Diffepoce} class.} \item{conf.bands}{Logical value. Determines whether confidence intervals will be plotted. The default is FALSE.} \item{Xlab}{Label of x-axis. Default is '"Time"'} \item{Ylab}{Label of y-axis. Default is '"EPOCE difference"'} \item{\dots}{Other unused arguments.} } \value{ Print one plot with one curve and its confidence interval. } \seealso{ \code{\link{Diffepoce}} } \keyword{file}
/man/plot.Diffepoce.Rd
no_license
aminKMT/frailtypack
R
false
false
910
rd
\name{plot.Diffepoce} \alias{plot.Diffepoce} \title{Plot difference of EPOCE estimators between two joint frailty models.} \description{ Plots values of the difference of two Cross-Validated Prognosis Observed Loss (CVPOL) computed with two joint frailty models. Confidence intervals are allowed. } \usage{ \method{plot}{Diffepoce}(x, conf.bands=TRUE, Xlab = "Time", Ylab = "EPOCE difference" , ...) } \arguments{ \item{x}{An object inheriting from \code{Diffepoce} class.} \item{conf.bands}{Logical value. Determines whether confidence intervals will be plotted. The default is FALSE.} \item{Xlab}{Label of x-axis. Default is '"Time"'} \item{Ylab}{Label of y-axis. Default is '"EPOCE difference"'} \item{\dots}{Other unused arguments.} } \value{ Print one plot with one curve and its confidence interval. } \seealso{ \code{\link{Diffepoce}} } \keyword{file}
# WES TCGA matrix ################## #TCGA:in-Pan cancer Procescessing ################## require(ggplot2) require('ggpubr') require(grid) require(gridExtra) require(cowplot) colorType_Set='Set1' setwd('/Users/sinhas8/Project_Chromotrypsis/') tcga=read.csv('/Users/sinhas8/Project_Chromotrypsis/Results_New/Nov_28/Supp_Table3.csv') range01 <- function(x){(x-min(x))/(max(x)-min(x))} scaling_cancerType<-function(quan1=gi, quan2=hist){ unlist(lapply(split(quan1, quan2), function(x) range01(x))) } tcga$Normalized_gi=scaling_cancerType(tcga$CNV_Burden, tcga$info_tcga.hist) tcga$Normalized_hrd.loh=scaling_cancerType(tcga$HRD_by_LOH, tcga$info_tcga.hist) tcga$Normalized_hrd.LST=scaling_cancerType(tcga$HRD_by_LST, tcga$info_tcga.hist) tcga$Normalized_hrd.AIL=scaling_cancerType(tcga$HRD_by_AIL, tcga$info_tcga.hist) tcga$Normalized_Chromothripsis_Presence=scaling_cancerType(tcga$CHTP_Canonical_Definition_Presence, tcga$info_tcga.hist) colnames(tcga)[6]='race' levels(tcga$race)[2]=c('EA') colnames(tcga)[7]='hist' lung_can=tcga$hist=='LUSC' ################## #TCGA:Mutation :: GEne counts ################## prob_wdMut=readRDS('/Users/sinhas8/Downloads/TCGA_withMut.RDS') MutLoad_NOG=apply(prob_wdMut$Mut, 2, sum) tcga$mutLoad=MutLoad_NOG[match(tcga$info_tcga.Sample_Name, names(MutLoad_NOG))] tcga=tcga[!is.na(tcga$mutLoad),] tcga$scaled_mutLoad=scaling_cancerType(tcga$mutLoad, tcga$hist) tcga$scaled_mutLoad=range01(tcga$mutLoad) tiff('/Users/sinhas8/Project_Chromotrypsis/prep_final_figures/Mut_Load_Feb7.tif', width = 1800, height = 600) ggplot(tcga, aes(x=as.character(hist),y=scaled_mutLoad, fill=race))+ geom_boxplot(data=tcga, aes(fill=race))+ labs(title="MutLoad in 23 cancer types (TCGA)",x="Race", y = "Scaled MutLoad")+ facet_grid( ~info_tcga.Tissue_Type + info_tcga.CellofOrigin, scales = "free", space = "free_x")+ theme_classic(base_size = 25)+ stat_compare_means(method = "wilcox.test", label = "p", label.x = 1.5, label.y=rep(c(0.8,1.0), length(levels(tcga$hist))/2, 0.8), size = 6)+ # guides(fill=FALSE)+ scale_fill_brewer(palette=colorType_Set) dev.off() length(prob_wdMut$types) Mut_LUSC=prob_wdMut$Mut[,prob_wdMut$types=='LUSC'] write.csv(Mut_LUSC, '/Users/sinhas8/data_For_Brid/LUSC/Mut_LUSC.csv') metadata=data.frame(sample_ID=colnames(prob_wdMut$Mut)[prob_wdMut$types=='LUSC'], race=prob_wdMut$race[prob_wdMut$types=='LUSC'], hist=prob_wdMut$types[prob_wdMut$types=='LUSC'], stage=prob_wdMut$stage[prob_wdMut$types=='LUSC'], age=prob_wdMut$age[prob_wdMut$types=='LUSC'], sex=prob_wdMut$sex[prob_wdMut$types=='LUSC']) write.csv(metadata, '/Users/sinhas8/data_For_Brid/LUSC/metadata_LUSC.csv') ################## #TCGA:Mutation :: GEne counts ##################
/write_LUSCmatrix_forBrid.R
no_license
qindan2008/Scripts_MolCharAAvsEA
R
false
false
2,854
r
# WES TCGA matrix ################## #TCGA:in-Pan cancer Procescessing ################## require(ggplot2) require('ggpubr') require(grid) require(gridExtra) require(cowplot) colorType_Set='Set1' setwd('/Users/sinhas8/Project_Chromotrypsis/') tcga=read.csv('/Users/sinhas8/Project_Chromotrypsis/Results_New/Nov_28/Supp_Table3.csv') range01 <- function(x){(x-min(x))/(max(x)-min(x))} scaling_cancerType<-function(quan1=gi, quan2=hist){ unlist(lapply(split(quan1, quan2), function(x) range01(x))) } tcga$Normalized_gi=scaling_cancerType(tcga$CNV_Burden, tcga$info_tcga.hist) tcga$Normalized_hrd.loh=scaling_cancerType(tcga$HRD_by_LOH, tcga$info_tcga.hist) tcga$Normalized_hrd.LST=scaling_cancerType(tcga$HRD_by_LST, tcga$info_tcga.hist) tcga$Normalized_hrd.AIL=scaling_cancerType(tcga$HRD_by_AIL, tcga$info_tcga.hist) tcga$Normalized_Chromothripsis_Presence=scaling_cancerType(tcga$CHTP_Canonical_Definition_Presence, tcga$info_tcga.hist) colnames(tcga)[6]='race' levels(tcga$race)[2]=c('EA') colnames(tcga)[7]='hist' lung_can=tcga$hist=='LUSC' ################## #TCGA:Mutation :: GEne counts ################## prob_wdMut=readRDS('/Users/sinhas8/Downloads/TCGA_withMut.RDS') MutLoad_NOG=apply(prob_wdMut$Mut, 2, sum) tcga$mutLoad=MutLoad_NOG[match(tcga$info_tcga.Sample_Name, names(MutLoad_NOG))] tcga=tcga[!is.na(tcga$mutLoad),] tcga$scaled_mutLoad=scaling_cancerType(tcga$mutLoad, tcga$hist) tcga$scaled_mutLoad=range01(tcga$mutLoad) tiff('/Users/sinhas8/Project_Chromotrypsis/prep_final_figures/Mut_Load_Feb7.tif', width = 1800, height = 600) ggplot(tcga, aes(x=as.character(hist),y=scaled_mutLoad, fill=race))+ geom_boxplot(data=tcga, aes(fill=race))+ labs(title="MutLoad in 23 cancer types (TCGA)",x="Race", y = "Scaled MutLoad")+ facet_grid( ~info_tcga.Tissue_Type + info_tcga.CellofOrigin, scales = "free", space = "free_x")+ theme_classic(base_size = 25)+ stat_compare_means(method = "wilcox.test", label = "p", label.x = 1.5, label.y=rep(c(0.8,1.0), length(levels(tcga$hist))/2, 0.8), size = 6)+ # guides(fill=FALSE)+ scale_fill_brewer(palette=colorType_Set) dev.off() length(prob_wdMut$types) Mut_LUSC=prob_wdMut$Mut[,prob_wdMut$types=='LUSC'] write.csv(Mut_LUSC, '/Users/sinhas8/data_For_Brid/LUSC/Mut_LUSC.csv') metadata=data.frame(sample_ID=colnames(prob_wdMut$Mut)[prob_wdMut$types=='LUSC'], race=prob_wdMut$race[prob_wdMut$types=='LUSC'], hist=prob_wdMut$types[prob_wdMut$types=='LUSC'], stage=prob_wdMut$stage[prob_wdMut$types=='LUSC'], age=prob_wdMut$age[prob_wdMut$types=='LUSC'], sex=prob_wdMut$sex[prob_wdMut$types=='LUSC']) write.csv(metadata, '/Users/sinhas8/data_For_Brid/LUSC/metadata_LUSC.csv') ################## #TCGA:Mutation :: GEne counts ##################
NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") # How have emissions from motor vehicle sources changed from 1999–2008 in Baltimore City? NEIBalCity <- NEI[NEI$fips=="24510", ] SCCMotor <- SCC[grepl("On-Road", SCC$EI.Sector, ignore.case=FALSE),] NEIMotorBalCity <- subset(NEIBalCity, SCC %in% SCCMotor$SCC) NEIMotorBalCityTotal <- aggregate(NEIMotorBalCity$Emissions ~ NEIMotorBalCity$year,FUN=sum) colnames(NEIMotorBalCityTotal) <- c("Year","TotalEmissions") png(filename="plot5.png", units="px") plot(NEIMotorBalCityTotal$Year, NEIMotorBalCityTotal$TotalEmissions, type="l", xlab="Year", ylab="Total Emissions in tons", main="PM2.5 from motor vehicle sources in Baltimore City") dev.off()
/plot5.R
no_license
lyuehh/exdata-004
R
false
false
734
r
NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") # How have emissions from motor vehicle sources changed from 1999–2008 in Baltimore City? NEIBalCity <- NEI[NEI$fips=="24510", ] SCCMotor <- SCC[grepl("On-Road", SCC$EI.Sector, ignore.case=FALSE),] NEIMotorBalCity <- subset(NEIBalCity, SCC %in% SCCMotor$SCC) NEIMotorBalCityTotal <- aggregate(NEIMotorBalCity$Emissions ~ NEIMotorBalCity$year,FUN=sum) colnames(NEIMotorBalCityTotal) <- c("Year","TotalEmissions") png(filename="plot5.png", units="px") plot(NEIMotorBalCityTotal$Year, NEIMotorBalCityTotal$TotalEmissions, type="l", xlab="Year", ylab="Total Emissions in tons", main="PM2.5 from motor vehicle sources in Baltimore City") dev.off()
#### PhD: P2 Fitting CV #### ## Function: Fits Estimated WTP ## Author: Dr Peter King (p.m.king@kent.ac.uk) ## Last change: 13/07/2022 ## TODO: setup RENV #------------------------------ # Replication Information: #### # Selected output of 'sessionInfo()' #------------------------------ # R version 4.1.3 (2022-03-10) # Platform: x86_64-w64-mingw32/x64 (64-bit) # Running under: Windows 10 x64 (build 19043) # [1] LC_COLLATE=English_United Kingdom.1252 LC_CTYPE=English_United Kingdom.1252 # [1] MASS_7.3-56 compiler_4.1.3 tools_4.1.3 renv_0.15.4 ## Any issues installing packages try: # Sys.setenv(RENV_DOWNLOAD_METHOD="libcurl") # Sys.setenv(RENV_DOWNLOAD_FILE_METHOD=getOption("download.file.method")) # renv::snapshot() rm(list=ls()) library(magrittr) library(dplyr) library(reshape2) library(ggplot2) library(ggridges) library(tidyr) library(DCchoice) #------------------------------ # Section 1: Import Data #### #------------------------------ ## This is the latest version of the data: FullSurvey2 <- data.frame(read.csv("FullSurvey2.csv")) #------------------------------------------------------------------------------------- # Section 2: Estimate Q1 WTP With Covariates #### #------------------------------------------------------------------------------------- ## Estimation: Q1_SBDCModel_Covariates <- sbchoice( Q6ResearchResponse ~ Q1Gender + Q2Age + Q3Distance+ Q16BP + Q18Charity +Q6ResearchCertainty+ Q21Experts + IncomeDummy + Q20Consequentiality| Q6Bid, data = FullSurvey2,dist="normal") Q1_SBDCModel_Covariates_WTP <- krCI(Q1_SBDCModel_Covariates) summary(Q1_SBDCModel_Covariates) #------------------------------------------------------------------------------------- # Section 3: Estimate Q2 WTP With Covariates #### #------------------------------------------------------------------------------------- ## Estimation: Q2_SBDCModel_Covariates <- sbchoice( Q7TreatmentResponse ~ Q1Gender + Q2Age + Q3Distance + Q16BP + Q18Charity + Q7TreatmentCertainty+ Q21Experts + IncomeDummy + Q20Consequentiality | Q7Bid, data = FullSurvey2, dist = "normal" ) Q2_SBDCModel_Covariates_WTP <- krCI(Q2_SBDCModel_Covariates) summary(Q2_SBDCModel_Covariates) #--------------------------------------- # Section 4: Fit Q1 WTP per Respondent #### #--------------------------------------- ## Initialise vector with value zeroes FullSurvey2$Q1WTPFitted <- rep(0,nrow(FullSurvey2)) ## Used to do in apply but now in function ### Estimate mean WTP for each person for (i in 1:nrow(FullSurvey2)) { FullSurvey2$Q1WTPFitted[i] <- krCI( Q1_SBDCModel_Covariates, individual = data.frame( Q1Gender = FullSurvey2$Q1Gender[i], Q2Age = FullSurvey2$Q2Age[i], Q3Distance = FullSurvey2$Q3Distance[i], Q16BP = FullSurvey2$Q16BP[i], Q18Charity = FullSurvey2$Q18Charity[i], Q6ResearchCertainty = FullSurvey2$Q6ResearchCertainty[i], Q21Experts = FullSurvey2$Q21Experts[i], IncomeDummy = FullSurvey2$IncomeDummy[i], Q20Consequentiality = FullSurvey2$Q20Consequentiality[i]))$out[1,1] } FullSurvey2$Q1WTPFitted #--------------------------------------- # Section 5: Fit Q2 WTP per Respondent #### #--------------------------------------- ## Initialise vector with value zeroes FullSurvey2$Q2WTPFitted <- rep(0,nrow(FullSurvey2)) ## Used to do in apply but now in function ### Estimate mean WTP for each person for (i in 1:nrow(FullSurvey2)) { FullSurvey2$Q2WTPFitted[i] <- krCI( Q2_SBDCModel_Covariates, individual = data.frame( Q1Gender = FullSurvey2$Q1Gender[i], Q2Age = FullSurvey2$Q2Age[i], Q3Distance = FullSurvey2$Q3Distance[i], Q16BP = FullSurvey2$Q16BP[i], Q18Charity = FullSurvey2$Q18Charity[i], Q7TreatmentCertainty = FullSurvey2$Q7TreatmentCertainty[i], Q21Experts = FullSurvey2$Q21Experts[i], IncomeDummy = FullSurvey2$IncomeDummy[i], Q20Consequentiality = FullSurvey2$Q20Consequentiality[i]))$out[1,1] } FullSurvey2$Q2WTPFitted #--------------------------------------- # Section 6: Export WTP #### #--------------------------------------- saveRDS(FullSurvey2$Q1WTPFitted,"Q1_SBDCModel_Covariates_FittedWTP.rds") saveRDS(FullSurvey2$Q2WTPFitted,"Q2_SBDCModel_Covariates_FittedWTP.rds") # End Of Script ---------------------------------------------------
/Microplastics_FitWTP.R
no_license
pmpk20/PhDPilotSurvey
R
false
false
4,566
r
#### PhD: P2 Fitting CV #### ## Function: Fits Estimated WTP ## Author: Dr Peter King (p.m.king@kent.ac.uk) ## Last change: 13/07/2022 ## TODO: setup RENV #------------------------------ # Replication Information: #### # Selected output of 'sessionInfo()' #------------------------------ # R version 4.1.3 (2022-03-10) # Platform: x86_64-w64-mingw32/x64 (64-bit) # Running under: Windows 10 x64 (build 19043) # [1] LC_COLLATE=English_United Kingdom.1252 LC_CTYPE=English_United Kingdom.1252 # [1] MASS_7.3-56 compiler_4.1.3 tools_4.1.3 renv_0.15.4 ## Any issues installing packages try: # Sys.setenv(RENV_DOWNLOAD_METHOD="libcurl") # Sys.setenv(RENV_DOWNLOAD_FILE_METHOD=getOption("download.file.method")) # renv::snapshot() rm(list=ls()) library(magrittr) library(dplyr) library(reshape2) library(ggplot2) library(ggridges) library(tidyr) library(DCchoice) #------------------------------ # Section 1: Import Data #### #------------------------------ ## This is the latest version of the data: FullSurvey2 <- data.frame(read.csv("FullSurvey2.csv")) #------------------------------------------------------------------------------------- # Section 2: Estimate Q1 WTP With Covariates #### #------------------------------------------------------------------------------------- ## Estimation: Q1_SBDCModel_Covariates <- sbchoice( Q6ResearchResponse ~ Q1Gender + Q2Age + Q3Distance+ Q16BP + Q18Charity +Q6ResearchCertainty+ Q21Experts + IncomeDummy + Q20Consequentiality| Q6Bid, data = FullSurvey2,dist="normal") Q1_SBDCModel_Covariates_WTP <- krCI(Q1_SBDCModel_Covariates) summary(Q1_SBDCModel_Covariates) #------------------------------------------------------------------------------------- # Section 3: Estimate Q2 WTP With Covariates #### #------------------------------------------------------------------------------------- ## Estimation: Q2_SBDCModel_Covariates <- sbchoice( Q7TreatmentResponse ~ Q1Gender + Q2Age + Q3Distance + Q16BP + Q18Charity + Q7TreatmentCertainty+ Q21Experts + IncomeDummy + Q20Consequentiality | Q7Bid, data = FullSurvey2, dist = "normal" ) Q2_SBDCModel_Covariates_WTP <- krCI(Q2_SBDCModel_Covariates) summary(Q2_SBDCModel_Covariates) #--------------------------------------- # Section 4: Fit Q1 WTP per Respondent #### #--------------------------------------- ## Initialise vector with value zeroes FullSurvey2$Q1WTPFitted <- rep(0,nrow(FullSurvey2)) ## Used to do in apply but now in function ### Estimate mean WTP for each person for (i in 1:nrow(FullSurvey2)) { FullSurvey2$Q1WTPFitted[i] <- krCI( Q1_SBDCModel_Covariates, individual = data.frame( Q1Gender = FullSurvey2$Q1Gender[i], Q2Age = FullSurvey2$Q2Age[i], Q3Distance = FullSurvey2$Q3Distance[i], Q16BP = FullSurvey2$Q16BP[i], Q18Charity = FullSurvey2$Q18Charity[i], Q6ResearchCertainty = FullSurvey2$Q6ResearchCertainty[i], Q21Experts = FullSurvey2$Q21Experts[i], IncomeDummy = FullSurvey2$IncomeDummy[i], Q20Consequentiality = FullSurvey2$Q20Consequentiality[i]))$out[1,1] } FullSurvey2$Q1WTPFitted #--------------------------------------- # Section 5: Fit Q2 WTP per Respondent #### #--------------------------------------- ## Initialise vector with value zeroes FullSurvey2$Q2WTPFitted <- rep(0,nrow(FullSurvey2)) ## Used to do in apply but now in function ### Estimate mean WTP for each person for (i in 1:nrow(FullSurvey2)) { FullSurvey2$Q2WTPFitted[i] <- krCI( Q2_SBDCModel_Covariates, individual = data.frame( Q1Gender = FullSurvey2$Q1Gender[i], Q2Age = FullSurvey2$Q2Age[i], Q3Distance = FullSurvey2$Q3Distance[i], Q16BP = FullSurvey2$Q16BP[i], Q18Charity = FullSurvey2$Q18Charity[i], Q7TreatmentCertainty = FullSurvey2$Q7TreatmentCertainty[i], Q21Experts = FullSurvey2$Q21Experts[i], IncomeDummy = FullSurvey2$IncomeDummy[i], Q20Consequentiality = FullSurvey2$Q20Consequentiality[i]))$out[1,1] } FullSurvey2$Q2WTPFitted #--------------------------------------- # Section 6: Export WTP #### #--------------------------------------- saveRDS(FullSurvey2$Q1WTPFitted,"Q1_SBDCModel_Covariates_FittedWTP.rds") saveRDS(FullSurvey2$Q2WTPFitted,"Q2_SBDCModel_Covariates_FittedWTP.rds") # End Of Script ---------------------------------------------------
#set your working directory setwd("Downloads/data_science") library(caret) library(doMC) numCores <- detectCores() registerDoMC(cores = numCores/2) setB_last11 = read.csv("setB_last11.csv", header = TRUE) setB_last10 = read.csv("setB_last10.csv", header = TRUE) setB_last17 = read.csv("setB_last17.csv", header = TRUE) # setA_last11 = read.csv("last11.csv", header = TRUE) setA_last10 = read.csv("last10.csv", header = TRUE) setA_last17 = read.csv("last17.csv", header = TRUE) # setA = rbind(setA_last11, setA_last10, setA_last17) setB = rbind(setB_last11, setB_last10, setB_last17) downSetB = downSample(x = setB[, -42], y = as.factor(setB$SepsisLabel)) names(downSetB)[44]<-"SepsisLabel" table(downSetB$SepsisLabel) # train vs test validation <- createDataPartition(setA$SepsisLabel, p = 0.8, list = FALSE) train <- setA[validation,] test <- setA[-validation,] train <- rbind(train, downSetB) table(train$SepsisLabel) train[is.na(train)] <- 0 train_Y = ifelse(train$SepsisLabel==1, "Y", "N") train = subset(train, select=-c(X, EtCO2, SepsisLabel, Patient, Bilirubin_direct, TroponinI, Fibrinogen)) train =data.frame(train, train_Y) test[is.na(test)] <- 0 test_Y = ifelse(test$SepsisLabel==1, "Y", "N") test = subset(test, select=-c(X, EtCO2, SepsisLabel, Patient, Bilirubin_direct, TroponinI, Fibrinogen)) test =data.frame(test, test_Y) predCorr <- cor(train[,-38]) highCorr <- findCorrelation(predCorr, .90) train <- train[-highCorr] prePro_range <- preProcess(train, method = "range") train <- predict(prePro_range, newdata = train) apply(train, 2, FUN = function(x) {c("min"=min(x), "max"=max(x))}) set.seed(123) mtryValues <- c(10, 12, 15) ctrl <- trainControl(method = "LGOCV", summaryFunction = twoClassSummary, classProbs = TRUE, index = list(TrainSet = -validation), savePredictions = TRUE) rfFit <- train(x = train[,-35], y = train$train_Y, method = "rf", ntree = 1000, tuneGrid = data.frame(mtry = mtryValues), importance = TRUE, metric = "ROC", trControl = ctrl) rfFit rfFit$pred <- merge(rfFit$pred, rfFit$bestTune) rfFitCM <- confusionMatrix(rfFit, norm = "none") rfFitCM rfFitRoc <- roc(response = rfFit$pred$obs, predictor = rfFit$pred$Y, levels = rev(levels(rfFit$pred$obs))) update(plot(rfFit, ylab = "ROC (Validation Data)")) plot(rfFitRoc, legacy.axes = TRUE) rfImp <- varImp(rfFit, scale = FALSE) rfImp
/final.tree.R
no_license
Sepsis-Machine-Learning/Traditional-ML-Models
R
false
false
2,606
r
#set your working directory setwd("Downloads/data_science") library(caret) library(doMC) numCores <- detectCores() registerDoMC(cores = numCores/2) setB_last11 = read.csv("setB_last11.csv", header = TRUE) setB_last10 = read.csv("setB_last10.csv", header = TRUE) setB_last17 = read.csv("setB_last17.csv", header = TRUE) # setA_last11 = read.csv("last11.csv", header = TRUE) setA_last10 = read.csv("last10.csv", header = TRUE) setA_last17 = read.csv("last17.csv", header = TRUE) # setA = rbind(setA_last11, setA_last10, setA_last17) setB = rbind(setB_last11, setB_last10, setB_last17) downSetB = downSample(x = setB[, -42], y = as.factor(setB$SepsisLabel)) names(downSetB)[44]<-"SepsisLabel" table(downSetB$SepsisLabel) # train vs test validation <- createDataPartition(setA$SepsisLabel, p = 0.8, list = FALSE) train <- setA[validation,] test <- setA[-validation,] train <- rbind(train, downSetB) table(train$SepsisLabel) train[is.na(train)] <- 0 train_Y = ifelse(train$SepsisLabel==1, "Y", "N") train = subset(train, select=-c(X, EtCO2, SepsisLabel, Patient, Bilirubin_direct, TroponinI, Fibrinogen)) train =data.frame(train, train_Y) test[is.na(test)] <- 0 test_Y = ifelse(test$SepsisLabel==1, "Y", "N") test = subset(test, select=-c(X, EtCO2, SepsisLabel, Patient, Bilirubin_direct, TroponinI, Fibrinogen)) test =data.frame(test, test_Y) predCorr <- cor(train[,-38]) highCorr <- findCorrelation(predCorr, .90) train <- train[-highCorr] prePro_range <- preProcess(train, method = "range") train <- predict(prePro_range, newdata = train) apply(train, 2, FUN = function(x) {c("min"=min(x), "max"=max(x))}) set.seed(123) mtryValues <- c(10, 12, 15) ctrl <- trainControl(method = "LGOCV", summaryFunction = twoClassSummary, classProbs = TRUE, index = list(TrainSet = -validation), savePredictions = TRUE) rfFit <- train(x = train[,-35], y = train$train_Y, method = "rf", ntree = 1000, tuneGrid = data.frame(mtry = mtryValues), importance = TRUE, metric = "ROC", trControl = ctrl) rfFit rfFit$pred <- merge(rfFit$pred, rfFit$bestTune) rfFitCM <- confusionMatrix(rfFit, norm = "none") rfFitCM rfFitRoc <- roc(response = rfFit$pred$obs, predictor = rfFit$pred$Y, levels = rev(levels(rfFit$pred$obs))) update(plot(rfFit, ylab = "ROC (Validation Data)")) plot(rfFitRoc, legacy.axes = TRUE) rfImp <- varImp(rfFit, scale = FALSE) rfImp
ci.sc.ancova <- function(Psi=NULL, adj.means=NULL, s.anova=NULL, s.ancova=NULL, standardizer="s.ancova", c.weights, n, cov.means, SSwithin.x, conf.level=.95) {options(warn=-1) if(standardizer=="s.anova") { if(is.null(s.anova)) stop("'s.anova' is needed to standardized the contrast (this is the standard deviation of the errors from the ANOVA model)") if(missing(s.ancova)) stop("'s.ancova' is needed to standardized the contrast (even when using 's.anova' as the standardizer; this is the standard deviation of the errors from the ANCOVA model).") } if(standardizer!="s.ancova" & standardizer!="s.anova") stop("The standardizer must be either 's.anova' or 's.ancova'.") if(missing(cov.means)) stop("The mean of the covariate within each group (i.e., the vector 'cov.means') is missing.") if(is.null(Psi) & is.null(adj.means) ) stop("Input either 'Psi' or 'adj.means'") if(!is.null(Psi) & !is.null(adj.means) ) stop("Do not input both 'Psi' and 'adj.means'") if(is.null(Psi)) Psi<- sum(adj.means*c.weights) if(sum(c.weights)!=0) stop("The sum of the coefficients must be zero") if(sum(c.weights[c.weights>0])>1) stop("Please use fractions to specify the contrast weights") J<-length(c.weights) if(length(n)>1) {if (length(n)!=J) stop("'c.weights' and 'n' imply different numbers of groups.")} if(length(n)==1) n<-rep(n, J) if(length(cov.means)!=J) stop("'c.weights' and 'cov.means' imply different numbers of groups.") f.x.numerater<- (sum(c.weights*cov.means))^2 f.x.denominator<- SSwithin.x sample.size.weighted<- sum(c.weights^2 / n ) ratio<- s.ancova/s.anova alpha<-1-conf.level nu<-sum(n)-J-1 if(standardizer=="s.ancova") { psi <- Psi/s.ancova lambda.obs<- psi/sqrt(sample.size.weighted+(f.x.numerater/f.x.denominator)) lambda.limits<-conf.limits.nct(ncp=lambda.obs, df=nu, conf.level=1-alpha) psi.limit.upper<- lambda.limits$Upper.Limit*sqrt(sample.size.weighted+(f.x.numerater/f.x.denominator)) psi.limit.lower<- lambda.limits$Lower.Limit*sqrt(sample.size.weighted+(f.x.numerater/f.x.denominator)) } if(standardizer=="s.anova") { psi <- Psi/s.anova lambda.obs<-psi/(ratio*sqrt(sample.size.weighted+(f.x.numerater/f.x.denominator))) lambda.limits<-conf.limits.nct(ncp=lambda.obs, df=nu, conf.level=1-alpha) psi.limit.upper<- lambda.limits$Upper.Limit*ratio*sqrt(sample.size.weighted+(f.x.numerater/f.x.denominator)) psi.limit.lower<- lambda.limits$Lower.Limit*ratio*sqrt(sample.size.weighted+(f.x.numerater/f.x.denominator)) } list(standardizer=standardizer, psi.limit.lower=psi.limit.lower, psi=psi, psi.limit.upper=psi.limit.upper) }
/MBESS/R/ci.sc.ancova.R
no_license
ingted/R-Examples
R
false
false
2,623
r
ci.sc.ancova <- function(Psi=NULL, adj.means=NULL, s.anova=NULL, s.ancova=NULL, standardizer="s.ancova", c.weights, n, cov.means, SSwithin.x, conf.level=.95) {options(warn=-1) if(standardizer=="s.anova") { if(is.null(s.anova)) stop("'s.anova' is needed to standardized the contrast (this is the standard deviation of the errors from the ANOVA model)") if(missing(s.ancova)) stop("'s.ancova' is needed to standardized the contrast (even when using 's.anova' as the standardizer; this is the standard deviation of the errors from the ANCOVA model).") } if(standardizer!="s.ancova" & standardizer!="s.anova") stop("The standardizer must be either 's.anova' or 's.ancova'.") if(missing(cov.means)) stop("The mean of the covariate within each group (i.e., the vector 'cov.means') is missing.") if(is.null(Psi) & is.null(adj.means) ) stop("Input either 'Psi' or 'adj.means'") if(!is.null(Psi) & !is.null(adj.means) ) stop("Do not input both 'Psi' and 'adj.means'") if(is.null(Psi)) Psi<- sum(adj.means*c.weights) if(sum(c.weights)!=0) stop("The sum of the coefficients must be zero") if(sum(c.weights[c.weights>0])>1) stop("Please use fractions to specify the contrast weights") J<-length(c.weights) if(length(n)>1) {if (length(n)!=J) stop("'c.weights' and 'n' imply different numbers of groups.")} if(length(n)==1) n<-rep(n, J) if(length(cov.means)!=J) stop("'c.weights' and 'cov.means' imply different numbers of groups.") f.x.numerater<- (sum(c.weights*cov.means))^2 f.x.denominator<- SSwithin.x sample.size.weighted<- sum(c.weights^2 / n ) ratio<- s.ancova/s.anova alpha<-1-conf.level nu<-sum(n)-J-1 if(standardizer=="s.ancova") { psi <- Psi/s.ancova lambda.obs<- psi/sqrt(sample.size.weighted+(f.x.numerater/f.x.denominator)) lambda.limits<-conf.limits.nct(ncp=lambda.obs, df=nu, conf.level=1-alpha) psi.limit.upper<- lambda.limits$Upper.Limit*sqrt(sample.size.weighted+(f.x.numerater/f.x.denominator)) psi.limit.lower<- lambda.limits$Lower.Limit*sqrt(sample.size.weighted+(f.x.numerater/f.x.denominator)) } if(standardizer=="s.anova") { psi <- Psi/s.anova lambda.obs<-psi/(ratio*sqrt(sample.size.weighted+(f.x.numerater/f.x.denominator))) lambda.limits<-conf.limits.nct(ncp=lambda.obs, df=nu, conf.level=1-alpha) psi.limit.upper<- lambda.limits$Upper.Limit*ratio*sqrt(sample.size.weighted+(f.x.numerater/f.x.denominator)) psi.limit.lower<- lambda.limits$Lower.Limit*ratio*sqrt(sample.size.weighted+(f.x.numerater/f.x.denominator)) } list(standardizer=standardizer, psi.limit.lower=psi.limit.lower, psi=psi, psi.limit.upper=psi.limit.upper) }
# Multiple time series, part 1 # In the data chapter we discussed how the form of your data affects how you can plot it. Here, you'll explore that topic in the context of multiple time series. # # The dataset you'll use contains the global capture rates of seven salmon species from 1950 - 2010. # # In your workspace, the following dataset is available: # # fish.species: Each variable (column) is a Salmon Species and each observation (row) is one Year. # To get a multiple time series plot, however, both Year and Species should be in their own column. You need tidy data: one variable per column. Once you have that you can get the plot shown in the viewer by mapping Year to the x aesthetic and Species to the color aesthetic. # # You'll use the gather() function of the tidyr package, which is already loaded for you. # # Instructions # 100 XP # Instructions # 100 XP # Use gather() to move from fish.species to a tidy data frame, fish.tidy. This data frame should have three columns: Year (int), Species (factor) and Capture (int). # gather() takes four arguments: the original data frame (fish.species), the name of the key column (Species), the name of the value column (Capture) and the name of the grouping variable, with a minus in front (-Year). They can all be specified as object names (i.e. no ""). # Check the structure as a starting point str(fish.species) # Use gather to go from fish.species to fish.tidy fish.tidy <- gather(fish.species, Species, Capture, -Year) str(fish.tidy)
/Data Visualization with ggplot2 (Part 1)/Chapter 4-Geometries/12.R
permissive
artileda/Datacamp-Data-Scientist-with-R-2019
R
false
false
1,510
r
# Multiple time series, part 1 # In the data chapter we discussed how the form of your data affects how you can plot it. Here, you'll explore that topic in the context of multiple time series. # # The dataset you'll use contains the global capture rates of seven salmon species from 1950 - 2010. # # In your workspace, the following dataset is available: # # fish.species: Each variable (column) is a Salmon Species and each observation (row) is one Year. # To get a multiple time series plot, however, both Year and Species should be in their own column. You need tidy data: one variable per column. Once you have that you can get the plot shown in the viewer by mapping Year to the x aesthetic and Species to the color aesthetic. # # You'll use the gather() function of the tidyr package, which is already loaded for you. # # Instructions # 100 XP # Instructions # 100 XP # Use gather() to move from fish.species to a tidy data frame, fish.tidy. This data frame should have three columns: Year (int), Species (factor) and Capture (int). # gather() takes four arguments: the original data frame (fish.species), the name of the key column (Species), the name of the value column (Capture) and the name of the grouping variable, with a minus in front (-Year). They can all be specified as object names (i.e. no ""). # Check the structure as a starting point str(fish.species) # Use gather to go from fish.species to fish.tidy fish.tidy <- gather(fish.species, Species, Capture, -Year) str(fish.tidy)
data_sub <- data_sub[sample(nrow(data_sub)),]#shuffle the data dat_sub <- as.matrix(data_sub[,1:m_sub]) y <- as.vector(as.matrix(data_sub[,m_sub + 1])) train <- sample.split(matrix(0, nrow=1 , ncol = nrow(data_sub)), SplitRatio = 0.75) #SVM with radio kernel # Choose the best cost set.seed(1) tune.sigmoid <- tune(svm, y~., data = data_sub[train,], kernel = "radial", ranges = list(cost=c(0.01,0.1,1,10,100), gamma=c(0.01,0.1,1,10,100))) bestmod <- tune.sigmoid$best.model bestmod #Predictions svm.radial.train <- predict(bestmod, data_sub[train,]) mean((svm.radial.train-data_sub$y[train])^2) # Training svm.radial.test <- predict(bestmod, data_sub[!train,]) mean((svm.radial.test-data_sub$y[!train])^2) # Test plot(bestmod,data_sub) #================================================================================= #SVM with linear kernel # Choose the best cost set.seed(1) tune.out <- tune(svm, y~., data = data[train,], kernel = "linear", ranges = list(cost=c(0.01,0.1,1,10,100))) bestmod <- tune.out$best.model bestmod #Predictions svm.linear.train <- predict(bestmod, data[train,]) mean((svm.linear.train - data$y[train])^2) # Training svm.linear.test <- predict(bestmod, data[!train,]) mean((svm.linear.test - data$y[!train])^2) # Test #================================================================================= #SVM with sigmoid kernel # Choose the best cost set.seed(1) tune.sigmoid <- tune(svm, y~., data = data[train,], kernel = "sigmoid", ranges = list(cost=c(0.01,0.1,1,10,100), gamma=c(0.01,0.1,1,10,100))) bestmod <- tune.sigmoid$best.model bestmod #Predictions svm.sigmoid.train <- predict(bestmod, data[train,]) mean((svm.sigmoid.train-data$y[train])^2) # Training svm.sigmoid.test <- predict(bestmod, data[!train,]) mean((svm.sigmoid.test-data$y[!train])^2) # Test
/SVM/SVM_sub.R
no_license
EvanZhu2000/Data-analysis-of-insurance-claims
R
false
false
1,936
r
data_sub <- data_sub[sample(nrow(data_sub)),]#shuffle the data dat_sub <- as.matrix(data_sub[,1:m_sub]) y <- as.vector(as.matrix(data_sub[,m_sub + 1])) train <- sample.split(matrix(0, nrow=1 , ncol = nrow(data_sub)), SplitRatio = 0.75) #SVM with radio kernel # Choose the best cost set.seed(1) tune.sigmoid <- tune(svm, y~., data = data_sub[train,], kernel = "radial", ranges = list(cost=c(0.01,0.1,1,10,100), gamma=c(0.01,0.1,1,10,100))) bestmod <- tune.sigmoid$best.model bestmod #Predictions svm.radial.train <- predict(bestmod, data_sub[train,]) mean((svm.radial.train-data_sub$y[train])^2) # Training svm.radial.test <- predict(bestmod, data_sub[!train,]) mean((svm.radial.test-data_sub$y[!train])^2) # Test plot(bestmod,data_sub) #================================================================================= #SVM with linear kernel # Choose the best cost set.seed(1) tune.out <- tune(svm, y~., data = data[train,], kernel = "linear", ranges = list(cost=c(0.01,0.1,1,10,100))) bestmod <- tune.out$best.model bestmod #Predictions svm.linear.train <- predict(bestmod, data[train,]) mean((svm.linear.train - data$y[train])^2) # Training svm.linear.test <- predict(bestmod, data[!train,]) mean((svm.linear.test - data$y[!train])^2) # Test #================================================================================= #SVM with sigmoid kernel # Choose the best cost set.seed(1) tune.sigmoid <- tune(svm, y~., data = data[train,], kernel = "sigmoid", ranges = list(cost=c(0.01,0.1,1,10,100), gamma=c(0.01,0.1,1,10,100))) bestmod <- tune.sigmoid$best.model bestmod #Predictions svm.sigmoid.train <- predict(bestmod, data[train,]) mean((svm.sigmoid.train-data$y[train])^2) # Training svm.sigmoid.test <- predict(bestmod, data[!train,]) mean((svm.sigmoid.test-data$y[!train])^2) # Test
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 ln_dcmp <- function(x, mu, nu) { .Call('_cmpR_ln_dcmp', PACKAGE = 'cmpR', x, mu, nu) } samp_cmp <- function(n, mu, nu) { .Call('_cmpR_samp_cmp', PACKAGE = 'cmpR', n, mu, nu) }
/R/RcppExports.R
no_license
dsjohnson/cmpR
R
false
false
313
r
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 ln_dcmp <- function(x, mu, nu) { .Call('_cmpR_ln_dcmp', PACKAGE = 'cmpR', x, mu, nu) } samp_cmp <- function(n, mu, nu) { .Call('_cmpR_samp_cmp', PACKAGE = 'cmpR', n, mu, nu) }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/nonLinearSystems.R \name{henon} \alias{henon} \title{Henon map} \usage{ henon(start = runif(min = -0.5, max = 0.5, n = 2), a = 1.4, b = 0.3, n.sample = 5000, n.transient = 500, do.plot = TRUE) } \arguments{ \item{start}{A 2-dimensional vector indicating the starting values for the x and y Henon coordinates. If the starting point is not specified, it is generated randomly.} \item{a}{The \emph{a} parameter. Default: 1.4.} \item{b}{The \emph{b} parameter. Default: 0.3.} \item{n.sample}{Length of the generated time series. Default: 5000 samples.} \item{n.transient}{Number of transient samples that will be discarded. Default: 500 samples.} \item{do.plot}{Logical value. If TRUE (default value), a plot of the generated Henon system is shown.} } \value{ A list with two vectors named \emph{x} and \emph{y} containing the x-components and the y-components of the Henon map, respectively. } \description{ Generates a 2-dimensional time series using the Henon map. } \details{ The Henon map is defined as follows: \deqn{ x_n = 1 - a \cdot x_{n - 1}^2 + y_{n - 1}}{x[n] = 1 - a*x[n - 1]^2 + y[n - 1]} \deqn{ y_n = b \cdot x_{n - 1}}{y[n] = b*x[n - 1].} The default selection for both \emph{a} and \emph{b} parameters (\emph{a}=1.4 and \emph{b}=0.3) is known to produce a deterministic chaotic time series. } \note{ Some initial values may lead to an unstable system that will tend to infinity. } \examples{ \dontrun{ henon.map=henon(n.sample = 1000, n.transient=10,do.plot=TRUE, start=c(-0.006423277,-0.473545134)) # accessing the x coordinate and plotting it plot(ts(henon.map$x)) } } \author{ Constantino A. Garcia } \references{ Strogatz, S.: Nonlinear dynamics and chaos: with applications to physics, biology, chemistry and engineering (Studies in Nonlinearity) } \seealso{ \code{\link{logisticMap}, \link{lorenz}, \link{rossler}, \link{ikedaMap}, \link{cliffordMap}, \link{sinaiMap}, \link{gaussMap}} }
/man/henon.Rd
no_license
suvo-ee16s073/nonlinearTseries
R
false
false
2,016
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/nonLinearSystems.R \name{henon} \alias{henon} \title{Henon map} \usage{ henon(start = runif(min = -0.5, max = 0.5, n = 2), a = 1.4, b = 0.3, n.sample = 5000, n.transient = 500, do.plot = TRUE) } \arguments{ \item{start}{A 2-dimensional vector indicating the starting values for the x and y Henon coordinates. If the starting point is not specified, it is generated randomly.} \item{a}{The \emph{a} parameter. Default: 1.4.} \item{b}{The \emph{b} parameter. Default: 0.3.} \item{n.sample}{Length of the generated time series. Default: 5000 samples.} \item{n.transient}{Number of transient samples that will be discarded. Default: 500 samples.} \item{do.plot}{Logical value. If TRUE (default value), a plot of the generated Henon system is shown.} } \value{ A list with two vectors named \emph{x} and \emph{y} containing the x-components and the y-components of the Henon map, respectively. } \description{ Generates a 2-dimensional time series using the Henon map. } \details{ The Henon map is defined as follows: \deqn{ x_n = 1 - a \cdot x_{n - 1}^2 + y_{n - 1}}{x[n] = 1 - a*x[n - 1]^2 + y[n - 1]} \deqn{ y_n = b \cdot x_{n - 1}}{y[n] = b*x[n - 1].} The default selection for both \emph{a} and \emph{b} parameters (\emph{a}=1.4 and \emph{b}=0.3) is known to produce a deterministic chaotic time series. } \note{ Some initial values may lead to an unstable system that will tend to infinity. } \examples{ \dontrun{ henon.map=henon(n.sample = 1000, n.transient=10,do.plot=TRUE, start=c(-0.006423277,-0.473545134)) # accessing the x coordinate and plotting it plot(ts(henon.map$x)) } } \author{ Constantino A. Garcia } \references{ Strogatz, S.: Nonlinear dynamics and chaos: with applications to physics, biology, chemistry and engineering (Studies in Nonlinearity) } \seealso{ \code{\link{logisticMap}, \link{lorenz}, \link{rossler}, \link{ikedaMap}, \link{cliffordMap}, \link{sinaiMap}, \link{gaussMap}} }
library(MASS) library(ISLR) ### Simple linear regression names(Boston) ?Boston ?plot plot(medv~lstat,Boston) ?lm fit1=lm(medv~lstat,data=Boston) fit1 summary(fit1) abline(fit1,col="red") names(fit1) confint(fit1) ?confint predict(fit1,data.frame(lstat=c(5,10,15)),interval="confidence") ### Multiple linear regression fit2=lm(medv~lstat+age,data=Boston) summary(fit2) fit3=lm(medv~.,Boston) summary(fit3) par(mfrow=c(2,2)) plot(fit3) fit4=update(fit3,~.-age-indus) summary(fit4) ### Nonlinear terms and Interactions fit5=lm(medv~lstat*age,Boston) summary(fit5) fit6=lm(medv~lstat +I(lstat^2),Boston); summary(fit6) attach(Boston) par(mfrow=c(1,1)) plot(medv~lstat) points(lstat,fitted(fit6),col="red",pch=20) fit7=lm(medv~poly(lstat,4)) points(lstat,fitted(fit7),col="blue",pch=20) plot(1:20,1:20,pch=1:20,cex=2) ###Qualitative predictors fix(Carseats) names(Carseats) summary(Carseats) fit1=lm(Sales~.+Income:Advertising+Age:Price,Carseats) summary(fit1) contrasts(Carseats$ShelveLoc) ###Writing R functions regplot=function(x,y){ fit=lm(y~x) plot(x,y) abline(fit,col="red") } attach(Carseats) regplot(Price,Sales) regplot=function(x,y,...){ fit=lm(y~x) plot(x,y,...) abline(fit,col="red") } regplot(Price,Sales,xlab="Price",ylab="Sales",col="blue",pch=20)
/ch3.R
no_license
ravedata/Exercises_Introduction_to_Stat_Learning
R
false
false
1,274
r
library(MASS) library(ISLR) ### Simple linear regression names(Boston) ?Boston ?plot plot(medv~lstat,Boston) ?lm fit1=lm(medv~lstat,data=Boston) fit1 summary(fit1) abline(fit1,col="red") names(fit1) confint(fit1) ?confint predict(fit1,data.frame(lstat=c(5,10,15)),interval="confidence") ### Multiple linear regression fit2=lm(medv~lstat+age,data=Boston) summary(fit2) fit3=lm(medv~.,Boston) summary(fit3) par(mfrow=c(2,2)) plot(fit3) fit4=update(fit3,~.-age-indus) summary(fit4) ### Nonlinear terms and Interactions fit5=lm(medv~lstat*age,Boston) summary(fit5) fit6=lm(medv~lstat +I(lstat^2),Boston); summary(fit6) attach(Boston) par(mfrow=c(1,1)) plot(medv~lstat) points(lstat,fitted(fit6),col="red",pch=20) fit7=lm(medv~poly(lstat,4)) points(lstat,fitted(fit7),col="blue",pch=20) plot(1:20,1:20,pch=1:20,cex=2) ###Qualitative predictors fix(Carseats) names(Carseats) summary(Carseats) fit1=lm(Sales~.+Income:Advertising+Age:Price,Carseats) summary(fit1) contrasts(Carseats$ShelveLoc) ###Writing R functions regplot=function(x,y){ fit=lm(y~x) plot(x,y) abline(fit,col="red") } attach(Carseats) regplot(Price,Sales) regplot=function(x,y,...){ fit=lm(y~x) plot(x,y,...) abline(fit,col="red") } regplot(Price,Sales,xlab="Price",ylab="Sales",col="blue",pch=20)
\name{BANOVA.mediation} \alias{BANOVA.mediation} \alias{print.BANOVA.mediation} \title{Mediation analysis based on BANOVA models} \description{ \code{BANOVA.mediation} conducts mediation/moderated mediation analysis based on various BANOVA models. } \usage{ BANOVA.mediation(sol_1, sol_2, xvar, mediator, individual = F) \method{print}{BANOVA.mediation}(x, ...) } \arguments{ \item{sol_1}{a BANOVA.* model based on an outcome variable, a causal variable, a mediator and possible moderators} \item{sol_2}{a BANOVA.Normal model for the mediator which inlcudes the causal variable and moderators} \item{xvar}{the causal variable} \item{mediator}{the mediator variable} \item{individual}{whether to output individual level effects} \item{x}{a BANOVA.mediation object} \item{\dots}{additional arguments, currently ignored} } \details{ A mediation or moderated mediation analysis (Baron and Kenny 1986; Zao, Lynch and Chen 2010; Zhang, Wedel and Pieters 2008) based on BANOVA models is conducted, in which posterior distributions of the direct effect and indirect effect are calculated based on posterior samples. Means and 95\% credible intervals are reported. } \value{ \code{BANOVA.mediation} returns an object of class \code{"BANOVA.mediation"}. The returned object is a list containing: \item{dir_effects}{tables of the direct effect} \item{individual_direct}{the table of the direct effect at the individual level if individual = T and the causal variable is a within-subject variable} \item{m1_effects}{tables of the effct of the mediator on the outcome} \item{m2_effects}{tables of the effct of the causal variable on the mediator} \item{indir_effects}{tables of the indirect effect} \item{individual_indirect}{the table of the indirect effect at the individual level if individual = T and the mediator is a within-subject variable} \item{xvar}{the name of the causal variable} \item{mediator}{the name of the mediator} \item{individual}{the value of the argument individual} } \references{ Baron, R. M. and Kenny, D. A. (1986) \emph{Moderator Mediator Variables Distinction in Social Psychological Research: Conceptual, Strategic, and Statistical Considerations}, Journal of Personality and Social Psychology, Vol. 51, No. 6, pp. 1173-82. Zhang, J., Wedel,M. and Pieters, R. G.M. (2009) \emph{Sales Effects of Attention to Feature Advertisements: A Bayesian Mediation Analysis}, Journal of Marketing Research, Vol.46, No.5, pp. 669-681. Ying, Y. and MacKinnon,D. P. (2009) \emph{Bayesian Mediation Analysis}, Psychological Methods, Vol. 14, No.4, pp. 301-322. Zhao, X., John G. L. and Chen, Q. (2010) \emph{Reconsidering Baron and Kenny: Myths and Truths About Mediation Analysis}, Journal of Consumer Research, Vol.37, No.2, pp. 197-206. Wedel, M. and Dong, C. (2016) \emph{BANOVA: Bayesian Analysis of Variance for Consumer Research}. Submitted. } \examples{ data(condstudy) \donttest{ # use BANOVA.run based on 'Stan' model <- BANOVA.model('Normal') stanmodel <- BANOVA.build(model) out2 <- BANOVA.run(att~cond+pict, ~type, fit = stanmodel, data = condstudy, id = condstudy$id, iter = 500, thin = 1, chains = 2) out3 <- BANOVA.run(pict~cond, ~type, fit = stanmodel, data = condstudy, id = condstudy$id, iter = 500, thin = 1, chains = 2) # (moderated) mediation sol <- BANOVA.mediation(out2, out3, xvar='cond', mediator='pict') print(sol) print(sol$dir_effects) } }
/man/BANOVA.mediation.Rd
no_license
chen4519902/BANOVA_R
R
false
false
3,552
rd
\name{BANOVA.mediation} \alias{BANOVA.mediation} \alias{print.BANOVA.mediation} \title{Mediation analysis based on BANOVA models} \description{ \code{BANOVA.mediation} conducts mediation/moderated mediation analysis based on various BANOVA models. } \usage{ BANOVA.mediation(sol_1, sol_2, xvar, mediator, individual = F) \method{print}{BANOVA.mediation}(x, ...) } \arguments{ \item{sol_1}{a BANOVA.* model based on an outcome variable, a causal variable, a mediator and possible moderators} \item{sol_2}{a BANOVA.Normal model for the mediator which inlcudes the causal variable and moderators} \item{xvar}{the causal variable} \item{mediator}{the mediator variable} \item{individual}{whether to output individual level effects} \item{x}{a BANOVA.mediation object} \item{\dots}{additional arguments, currently ignored} } \details{ A mediation or moderated mediation analysis (Baron and Kenny 1986; Zao, Lynch and Chen 2010; Zhang, Wedel and Pieters 2008) based on BANOVA models is conducted, in which posterior distributions of the direct effect and indirect effect are calculated based on posterior samples. Means and 95\% credible intervals are reported. } \value{ \code{BANOVA.mediation} returns an object of class \code{"BANOVA.mediation"}. The returned object is a list containing: \item{dir_effects}{tables of the direct effect} \item{individual_direct}{the table of the direct effect at the individual level if individual = T and the causal variable is a within-subject variable} \item{m1_effects}{tables of the effct of the mediator on the outcome} \item{m2_effects}{tables of the effct of the causal variable on the mediator} \item{indir_effects}{tables of the indirect effect} \item{individual_indirect}{the table of the indirect effect at the individual level if individual = T and the mediator is a within-subject variable} \item{xvar}{the name of the causal variable} \item{mediator}{the name of the mediator} \item{individual}{the value of the argument individual} } \references{ Baron, R. M. and Kenny, D. A. (1986) \emph{Moderator Mediator Variables Distinction in Social Psychological Research: Conceptual, Strategic, and Statistical Considerations}, Journal of Personality and Social Psychology, Vol. 51, No. 6, pp. 1173-82. Zhang, J., Wedel,M. and Pieters, R. G.M. (2009) \emph{Sales Effects of Attention to Feature Advertisements: A Bayesian Mediation Analysis}, Journal of Marketing Research, Vol.46, No.5, pp. 669-681. Ying, Y. and MacKinnon,D. P. (2009) \emph{Bayesian Mediation Analysis}, Psychological Methods, Vol. 14, No.4, pp. 301-322. Zhao, X., John G. L. and Chen, Q. (2010) \emph{Reconsidering Baron and Kenny: Myths and Truths About Mediation Analysis}, Journal of Consumer Research, Vol.37, No.2, pp. 197-206. Wedel, M. and Dong, C. (2016) \emph{BANOVA: Bayesian Analysis of Variance for Consumer Research}. Submitted. } \examples{ data(condstudy) \donttest{ # use BANOVA.run based on 'Stan' model <- BANOVA.model('Normal') stanmodel <- BANOVA.build(model) out2 <- BANOVA.run(att~cond+pict, ~type, fit = stanmodel, data = condstudy, id = condstudy$id, iter = 500, thin = 1, chains = 2) out3 <- BANOVA.run(pict~cond, ~type, fit = stanmodel, data = condstudy, id = condstudy$id, iter = 500, thin = 1, chains = 2) # (moderated) mediation sol <- BANOVA.mediation(out2, out3, xvar='cond', mediator='pict') print(sol) print(sol$dir_effects) } }
library(ggplot2) library(forecast) library(tseries) value <- c(1.2, 1.7, 1.6, 1.2, 1.6, 1.3, 1.5, 1.9, 5.4, 4.2, 5.5, 6, 5.6, 6.2, 6.8, 7.1, 7.1, 5.8, 0, 5.2, 4.6, 3.6, 3, 3.8, 3.1, 3.4, 2, 3.1, 3.2, 1.6, 0.6, 3.3, 4.9, 6.5, 5.3, 3.5, 5.3, 7.2, 7.4, 7.3, 7.2, 4, 6.1, 4.3, 4, 2.4, 0.4, 2.4) sensor<-ts(value,frequency=24) # 24 hours fit <- auto.arima(sensor) LH.pred<-predict(fit,n.ahead=24) plot(sensor,ylim=c(0,10),xlim=c(0,5),type="o", lwd="1") lines(LH.pred$pred,col="red",type="o",lwd="1") grid() library(forecast) value <- c(1.2,1.7,1.6, 1.2, 1.6, 1.3, 1.5, 1.9, 5.4, 4.2, 5.5, 6.0, 5.6, 6.2, 6.8, 7.1, 7.1, 5.8, 0.0, 5.2, 4.6, 3.6, 3.0, 3.8, 3.1, 3.4, 2.0, 3.1, 3.2, 1.6, 0.6, 3.3, 4.9, 6.5, 5.3, 3.5, 5.3, 7.2, 7.4, 7.3, 7.2, 4.0, 6.1, 4.3, 4.0, 2.4, 0.4, 2.4, 1.2,1.7,1.6, 1.2, 1.6, 1.3, 1.5, 1.9, 5.4, 4.2, 5.5, 6.0, 5.6, 6.2, 6.8, 7.1, 7.1, 5.8, 0.0, 5.2, 4.6, 3.6, 3.0, 3.8, 3.1, 3.4, 2.0, 3.1, 3.2, 1.6, 0.6, 3.3, 4.9, 6.5, 5.3, 3.5, 5.3, 7.2, 7.4, 7.3, 7.2, 4.0, 6.1, 4.3, 4.0, 2.4, 0.4, 2.4) sensor <- ts(value,frequency=24) # consider adding a start so you get nicer labelling on your chart. fit <- auto.arima(sensor) fcast <- forecast(fit) plot(fcast) grid() fcast library(forecast) value <- c(1.2,1.7,1.6, 1.2, 1.6, 1.3, 1.5, 1.9, 5.4, 4.2, 5.5, 6.0, 5.6, 6.2, 6.8, 7.1, 7.1, 5.8, 0.0, 5.2, 4.6, 3.6, 3.0, 3.8, 3.1, 3.4, 2.0, 3.1, 3.2, 1.6, 0.6, 3.3, 4.9, 6.5, 5.3, 3.5, 5.3, 7.2, 7.4, 7.3, 7.2, 4.0, 6.1, 4.3, 4.0, 2.4, 0.4, 2.4, 1.2,1.7,1.6, 1.2, 1.6, 1.3, 1.5, 1.9, 5.4, 4.2, 5.5, 6.0, 5.6, 6.2, 6.8, 7.1, 7.1, 5.8, 0.0, 5.2, 4.6, 3.6, 3.0, 3.8, 3.1, 3.4, 2.0, 3.1, 3.2, 1.6, 0.6, 3.3, 4.9, 6.5, 5.3, 3.5, 5.3, 7.2, 7.4, 7.3, 7.2, 4.0, 6.1, 4.3, 4.0, 2.4, 0.4, 2.4) sensor <- ts(value,frequency=24) # consider adding a start so you get nicer labelling on your chart. fit <- auto.arima(sensor) fit2 = arima(sensor, order=c(4,1,9)) fcast <- forecast(fit2, h = 30) plot(fcast) grid() fcast
/prova.R
no_license
karafede/forecast_ARIMA
R
false
false
1,993
r
library(ggplot2) library(forecast) library(tseries) value <- c(1.2, 1.7, 1.6, 1.2, 1.6, 1.3, 1.5, 1.9, 5.4, 4.2, 5.5, 6, 5.6, 6.2, 6.8, 7.1, 7.1, 5.8, 0, 5.2, 4.6, 3.6, 3, 3.8, 3.1, 3.4, 2, 3.1, 3.2, 1.6, 0.6, 3.3, 4.9, 6.5, 5.3, 3.5, 5.3, 7.2, 7.4, 7.3, 7.2, 4, 6.1, 4.3, 4, 2.4, 0.4, 2.4) sensor<-ts(value,frequency=24) # 24 hours fit <- auto.arima(sensor) LH.pred<-predict(fit,n.ahead=24) plot(sensor,ylim=c(0,10),xlim=c(0,5),type="o", lwd="1") lines(LH.pred$pred,col="red",type="o",lwd="1") grid() library(forecast) value <- c(1.2,1.7,1.6, 1.2, 1.6, 1.3, 1.5, 1.9, 5.4, 4.2, 5.5, 6.0, 5.6, 6.2, 6.8, 7.1, 7.1, 5.8, 0.0, 5.2, 4.6, 3.6, 3.0, 3.8, 3.1, 3.4, 2.0, 3.1, 3.2, 1.6, 0.6, 3.3, 4.9, 6.5, 5.3, 3.5, 5.3, 7.2, 7.4, 7.3, 7.2, 4.0, 6.1, 4.3, 4.0, 2.4, 0.4, 2.4, 1.2,1.7,1.6, 1.2, 1.6, 1.3, 1.5, 1.9, 5.4, 4.2, 5.5, 6.0, 5.6, 6.2, 6.8, 7.1, 7.1, 5.8, 0.0, 5.2, 4.6, 3.6, 3.0, 3.8, 3.1, 3.4, 2.0, 3.1, 3.2, 1.6, 0.6, 3.3, 4.9, 6.5, 5.3, 3.5, 5.3, 7.2, 7.4, 7.3, 7.2, 4.0, 6.1, 4.3, 4.0, 2.4, 0.4, 2.4) sensor <- ts(value,frequency=24) # consider adding a start so you get nicer labelling on your chart. fit <- auto.arima(sensor) fcast <- forecast(fit) plot(fcast) grid() fcast library(forecast) value <- c(1.2,1.7,1.6, 1.2, 1.6, 1.3, 1.5, 1.9, 5.4, 4.2, 5.5, 6.0, 5.6, 6.2, 6.8, 7.1, 7.1, 5.8, 0.0, 5.2, 4.6, 3.6, 3.0, 3.8, 3.1, 3.4, 2.0, 3.1, 3.2, 1.6, 0.6, 3.3, 4.9, 6.5, 5.3, 3.5, 5.3, 7.2, 7.4, 7.3, 7.2, 4.0, 6.1, 4.3, 4.0, 2.4, 0.4, 2.4, 1.2,1.7,1.6, 1.2, 1.6, 1.3, 1.5, 1.9, 5.4, 4.2, 5.5, 6.0, 5.6, 6.2, 6.8, 7.1, 7.1, 5.8, 0.0, 5.2, 4.6, 3.6, 3.0, 3.8, 3.1, 3.4, 2.0, 3.1, 3.2, 1.6, 0.6, 3.3, 4.9, 6.5, 5.3, 3.5, 5.3, 7.2, 7.4, 7.3, 7.2, 4.0, 6.1, 4.3, 4.0, 2.4, 0.4, 2.4) sensor <- ts(value,frequency=24) # consider adding a start so you get nicer labelling on your chart. fit <- auto.arima(sensor) fit2 = arima(sensor, order=c(4,1,9)) fcast <- forecast(fit2, h = 30) plot(fcast) grid() fcast
################################################################### ## Cross-validation ################################################################### fect.cv <- function(Y, # Outcome variable, (T*N) matrix X, # Explanatory variables: (T*N*p) array D, # Indicator for treated unit (tr==1) I, II, T.on, T.off = NULL, method = "ife", criterion = "mspe", k = 5, # CV time cv.prop = 0.1, cv.treat = TRUE, cv.nobs = 3, r = 0, # initial number of factors considered if CV==1 r.end, nlambda = 10, lambda = NULL, force, hasRevs = 1, tol, # tolerance level norm.para = NULL, group.level = NULL, group = NULL ) { ##-------------------------------## ## Parsing data ##-------------------------------## placebo.pos <- na.pos <- NULL ## unit id and time TT <- dim(Y)[1] N <- dim(Y)[2] if (is.null(X) == FALSE) { p <- dim(X)[3] } else { p <- 0 X <- array(0, dim = c(1, 1, 0)) } ## replicate data YY <- Y YY[which(II == 0)] <- 0 ## reset to 0 t.on <- c(T.on) T0.min <- min(apply(II, 2, sum)) ## --------- initial fit using fastplm --------- ## data.ini <- matrix(NA, (TT*N), (2 + 1 + p)) data.ini[, 2] <- rep(1:N, each = TT) ## unit fe data.ini[, 3] <- rep(1:TT, N) ## time fe data.ini[, 1] <- c(Y) ## outcome if (p > 0) { ## covar for (i in 1:p) { data.ini[, (3 + i)] <- c(X[, , i]) } } ## observed Y0 indicator: oci <- which(c(II) == 1) initialOut <- initialFit(data = data.ini, force = force, oci = oci) Y0 <- initialOut$Y0 beta0 <- initialOut$beta0 if (p > 0 && sum(is.na(beta0)) > 0) { beta0[which(is.na(beta0))] <- 0 } ## ------------- restrictions on candidate hyper parameters ---------- ## obs.con <- (sum(II) - r.end * (N + TT) + r.end^2 - p) <= 0 if (obs.con) { while((sum(II) - r.end * (N + TT) + r.end^2 - p) <= 0) { r.end <- r.end - 1 } } if (r.end >= T0.min) { if (method %in% c("both", "ife")) { cat("Facotr number should not be greater than ", T0.min-1, "\n", sep = "") } r.end <- T0.min-1 } else { if (obs.con) { if (method %in% c("both", "ife")) { cat("Facotr number should not be greater than ", r.end, "\n", sep = "") } } } ##-------------------------------## ## ----------- Main Algorithm ----------- ## ##-------------------------------## validX <- 1 ## no multi-colinearity CV.out.ife <- CV.out.mc <- NULL ##----------------------------------------------------## ## Cross-validation of r and lambda ## ##----------------------------------------------------## r.max <- min(TT, r.end) r.cv <- 0 ## initial value if (method %in% c("ife", "both") && r.max == 0) { r.cv <- 0 cat("Cross validation cannot be performed since available pre-treatment records of treated units are too few. So set r.cv = 0.\n ") est.best <- inter_fe_ub(YY, Y0, X, II, beta0, 0, force = force, tol) } else { r.old <- r ## save the minimal number of factors cat("Cross-validating ...","\n") ## ----- ## ## ------------- initialize ------------ ## ## ----- ## cv.pos <- which(t.on<=0) t.on.cv <- t.on[cv.pos] count.on.cv <- as.numeric(table(t.on.cv)) ## tot.id <- which(c(II)==1) ## observed control data ## cv.count <- ceiling((sum(II)*sum(II))/sum(I)) rm.count <- floor(sum(II)*cv.prop) cv.count <- sum(II) - rm.count ociCV <- matrix(NA, cv.count, k) ## store indicator rmCV <- matrix(NA, rm.count, k) ## removed indicator Y0CV <- array(NA, dim = c(TT, N, k)) ## store initial Y0 if (p > 0) { beta0CV <- array(NA, dim = c(p, 1, k)) } else { beta0CV <- array(0, dim = c(1, 0, k)) ## store initial beta0 } ## cv.id.all <- c() for (i in 1:k) { cv.n <- 0 repeat{ cv.n <- cv.n + 1 ## cv.id <- cv.sample(II, as.integer(sum(II) - cv.count)) cv.id <- cv.sample(II, D, rm.count, cv.nobs, cv.treat) ## cv.id <- sample(oci, as.integer(sum(II) - cv.count), replace = FALSE) II.cv <- II II.cv[cv.id] <- 0 con1 <- sum(apply(II.cv, 1, sum) >= 1) == TT con2 <- sum(apply(II.cv, 2, sum) >= 1) == N if (con1 & con2) { break } if (cv.n > 100) { stop("Some units have too few pre-treatment observations. Try to remove them.") } ## cv.id.all <- c(cv.id.all, list(cv.id)) } rmCV[,i] <- cv.id ocicv <- setdiff(oci, cv.id) ociCV[,i] <- ocicv initialOutCv <- initialFit(data = data.ini, force = force, oci = ocicv) Y0CV[,,i] <- initialOutCv$Y0 if (p > 0) { beta0cv <- initialOutCv$beta0 if (sum(is.na(beta0cv)) > 0) { beta0cv[which(is.na(beta0cv))] <- 0 } beta0CV[,,i] <- beta0cv } } ## --------------------------------------------- ## ## ---------------- cross validation for ife model ------------------ ## ## --------------------------------------------- ## if (method %in% c("ife", "both")) { # cat("Interactive fixed effects model...\n") r.pc <- est.pc.best <- MSPE.best <- MSPE.pc.best <- NULL if (criterion == "PC") { CV.out.ife <- matrix(NA, (r.max - r.old + 1), 6) colnames(CV.out.ife) <- c("r", "sigma2", "IC", "PC", "MSPTATT", "MSE") } else { CV.out.ife <- matrix(NA, (r.max - r.old + 1), 7) colnames(CV.out.ife) <- c("r", "sigma2", "IC", "PC", "MSPE", "MSPTATT", "MSE") } CV.out.ife[,"r"] <- c(r.old:r.max) CV.out.ife[,"PC"] <- CV.out.ife[,"MSPE"] <- 1e20 for (i in 1:dim(CV.out.ife)[1]) { ## cross-validation loop starts ## inter FE based on control, before & after r <- CV.out.ife[i, "r"] ## k <- 5 if (criterion %in% c("mspe", "both")) { SSE <- 0 for (ii in 1:k) { II.cv <- II II.cv[rmCV[,ii]] <- 0 YY.cv <- YY YY.cv[rmCV[,ii]] <- 0 est.cv.fit <- inter_fe_ub(YY.cv, as.matrix(Y0CV[,,ii]), X, II.cv, as.matrix(beta0CV[,,ii]), r, force, tol)$fit SSE <- SSE + sum((YY[rmCV[,ii]]-est.cv.fit[rmCV[,ii]])^2) } MSPE <- SSE/(k*(sum(II) - cv.count)) } est.cv <- inter_fe_ub(YY, Y0, X, II, beta0, r, force, tol) ## overall sigma2 <- est.cv$sigma2 IC <- est.cv$IC PC <- est.cv$PC eff.v.cv <- c(Y - est.cv$fit)[cv.pos] meff <- as.numeric(tapply(eff.v.cv, t.on.cv, mean)) MSPTATT <- sum(meff^2*count.on.cv)/sum(count.on.cv) MSE <- sum(eff.v.cv^2)/length(eff.v.cv) if(!is.null(norm.para)) { if (criterion %in% c("mspe", "both")) { MSPE <- MSPE*(norm.para[1]^2) } sigma2 <- sigma2*(norm.para[1]^2) IC <- est.cv$IC - log(est.cv$sigma2) + log(sigma2) PC <- PC*(norm.para[1]^2) } if (criterion %in% c("mspe", "both")) { if ((min(CV.out.ife[,"MSPE"]) - MSPE) > 0.05*min(CV.out.ife[,"MSPE"])) { ## at least 10% improvement for MPSE MSPE.best <- MSPE est.best <- est.cv r.cv <- r } else { if (r == r.cv + 1) cat("*") } } if (PC < min(CV.out.ife[,"PC"])) { if (criterion == "both") { MSPE.pc.best <- MSPE } est.pc.best <- est.cv r.pc <- r } if (criterion != "pc") { CV.out.ife[i, 2:7] <- c(sigma2, IC, PC, MSPE, MSPTATT, MSE) } else { CV.out.ife[i, 2:6] <- c(sigma2, IC, PC, MSPTATT, MSE) } if (criterion == "pc") { cat("\n r = ",r, "; sigma2 = ", sprintf("%.5f",sigma2), "; IC = ", sprintf("%.5f",IC), "; PC = ", sprintf("%.5f",PC), "; MSPTATT = ", sprintf("%.5f",MSPTATT), "; MSE = ", sprintf("%.5f",MSE), sep="") } else { cat("\n r = ",r, "; sigma2 = ", sprintf("%.5f",sigma2), "; IC = ", sprintf("%.5f",IC), "; PC = ", sprintf("%.5f",PC), "; MSPE = ", sprintf("%.5f",MSPE), "; MSPTATT = ", sprintf("%.5f",MSPTATT), "; MSE = ", sprintf("%.5f",MSE), sep="") } } ## end of while: search for r_star over #MSPE.best <- min(CV.out[,"MSPE"]) #PC.best <- min(CV.out[,"PC"]) ## compare if (criterion == "both") { if (r.cv > r.pc) { cat("\n\n Factor number selected via cross validation may be larger than the true number. Using the PC criterion.\n\n ") r.cv <- r.pc est.best <- est.pc.best MSPE.best <- MSPE.pc.best } est.best.ife <- est.best MSPE.best.ife <- MSPE.best } else if (criterion == "pc") { est.best.ife <- est.pc.best r.cv <- r.pc } else { est.best.ife <- est.best MSPE.best.ife <- MSPE.best } if (r > (TT-1)) {cat(" (r hits maximum)")} cat("\n\n r* = ",r.cv, sep="") cat("\n\n") } ## ------------------------------------- ## ## ---------------- cross validation for mc --------------- ## ## ------------------------------------- ## if (method %in% c("mc", "both")) { cat("Matrix completion method...\n") eigen.all <- NULL if (is.null(lambda) || length(lambda) == 1) { ## create the hyper-parameter sequence ## biggest candidate lambda ## Y.lambda <- YY ## if (p > 0) { ## for (i in 1:p) { ## Y.lambda <- Y.lambda - X[,,i] * beta0[i,1] ## } ## } Y.lambda <- YY - Y0 ## Y.lambda[which(II == 0)] <- Y0[which(II == 0)] Y.lambda[which(II == 0)] <- 0 eigen.all <- svd( Y.lambda / (TT * N) )$d lambda.max <- log10(max(eigen.all)) lambda <- rep(NA, nlambda) lambda.by <- 3/(nlambda - 2) for (i in 1:(nlambda - 1)) { lambda[i] <- 10^(lambda.max - (i - 1) * lambda.by) } lambda[nlambda] <- 0 } else { Y.lambda <- YY - Y0 Y.lambda[which(II == 0)] <- 0 eigen.all <- svd( Y.lambda / (TT * N) )$d } ## store all MSPE MSPE.best <- NULL CV.out.mc <- matrix(NA, length(lambda), 4) colnames(CV.out.mc) <- c("lambda.norm", "MSPE", "MSPTATT", "MSE") CV.out.mc[,"lambda.norm"] <- c(lambda/max(eigen.all)) CV.out.mc[,"MSPE"] <- 1e20 for (i in 1:length(lambda)) { ## k <- 5 SSE <- 0 for (ii in 1:k) { II.cv <- II II.cv[rmCV[,ii]] <- 0 YY.cv <- YY YY.cv[rmCV[,ii]] <- 0 est.cv.fit <- inter_fe_mc(YY.cv, as.matrix(Y0CV[,,ii]), X, II.cv, as.matrix(beta0CV[,,ii]), 1, lambda[i], force, tol)$fit SSE <- SSE + sum((YY[rmCV[,ii]]-est.cv.fit[rmCV[,ii]])^2) } MSPE <- SSE/(k*(sum(II) - cv.count)) est.cv <- inter_fe_mc(YY, Y0, X, II, beta0, 1, lambda[i], force, tol) ## overall ## sigma2 <- est.cv$sigma2 eff.v.cv <- c(Y - est.cv$fit)[cv.pos] meff <- as.numeric(tapply(eff.v.cv, t.on.cv, mean)) MSPTATT <- sum(meff^2*count.on.cv)/sum(count.on.cv) MSE <- sum(eff.v.cv^2)/length(eff.v.cv) if(!is.null(norm.para)){ MSPE <- MSPE*(norm.para[1]^2) ## sigma2 <- sigma2*(norm.para[1]^2) } if ((min(CV.out.mc[,"MSPE"]) - MSPE) > 0.05*min(CV.out.mc[,"MSPE"])) { ## at least 10% improvement for MPSE est.best <- est.cv lambda.cv <- lambda[i] MSPE.best <- MSPE } else { if (i > 1) { if (lambda.cv == lambda[i-1]) cat("*") } } ## CV.out[i, "MSPE"] <- MSPE ## CV.out[i, "sigma2"] <- sigma2 CV.out.mc[i, 2:4] <- c(MSPE, MSPTATT, MSE) cat("\n lambda.norm = ", sprintf("%.5f",lambda[i]/max(eigen.all)),"; MSPE = ", sprintf("%.5f",MSPE), "; MSPTATT = ", sprintf("%.5f",MSPTATT), "; MSE = ", sprintf("%.5f",MSE), sep="") } est.best.mc <- est.best MSPE.best.mc <- MSPE.best cat("\n\n lambda.norm* = ",lambda.cv/max(eigen.all), sep="") cat("\n\n") } } ## End of Cross-Validation if (method == "ife") { est.best <- est.best.ife validF <- ifelse(r.cv > 0, 1, 0) } else if (method == "mc") { est.best <- est.best.mc validF <- est.best$validF } else { if (MSPE.best.ife <= MSPE.best.mc) { est.best <- est.best.ife validF <- ifelse(r.cv > 0, 1, 0) method <- "ife" } else { est.best <- est.best.mc validF <- est.best$validF method <- "mc" } cat("\n\n Recommended method through cross-validation: ", method, sep = "") cat("\n\n") } validX <- est.best$validX ##------------------------------## ## ----------- Summarize -------------- ## ##------------------------------## ## 00. run a fect to obtain residuals if (method == "ife") { if (r.cv == 0) { est.fect <- est.best } else { est.fect <- inter_fe_ub(YY, Y0, X, II, beta0, 0, force = force, tol) } } else { est.fect <- inter_fe_ub(YY, Y0, X, II, beta0, 0, force = force, tol) } ##-------------------------------## ## ATT and Counterfactuals ## ##-------------------------------## ## we first adjustment for normalization if (!is.null(norm.para)) { Y <- Y * norm.para[1] if (method == "ife") { ## variance of the error term sigma2 <- est.best$sigma2 * (norm.para[1]^2) IC <- est.best$IC - log(est.best$sigma2) + log(sigma2) PC <- est.best$PC * (norm.para[1]^2) est.best$sigma2 <- sigma2 est.best$IC <- IC est.best$PC <- PC } ## output of estimates est.best$mu <- est.best$mu * norm.para[1] if (method == "ife" && r.cv > 0) { est.best$lambda <- est.best$lambda * norm.para[1] est.best$VNT <- est.best$VNT * norm.para[1] } if (force%in%c(1, 3)) { est.best$alpha <- est.best$alpha * norm.para[1] } if (force%in%c(2,3)) { est.best$xi <- est.best$xi * norm.para[1] } #if (p>0) { # est.best$beta <- est.best$beta * norm.para[1] #} est.best$residuals <- est.best$residuals * norm.para[1] est.best$fit <- est.best$fit * norm.para[1] est.fect$fit <- est.fect$fit * norm.para[1] est.fect$sigma2 <- est.fect$sigma2 * norm.para[1] } ## 0. revelant parameters sigma2 <- IC <- PC <- NULL if (method == "ife") { sigma2 <- est.best$sigma2 IC <- est.best$IC PC <- est.best$PC } if (p>0) { na.pos <- is.nan(est.best$beta) beta <- est.best$beta if( sum(na.pos) > 0 ) { beta[na.pos] <- NA } } else { beta <- NA } ## 1. estimated att and counterfactuals eff <- Y - est.best$fit att.avg <- sum(eff * D)/(sum(D)) ## att.avg.unit tr.pos <- which(apply(D, 2, sum) > 0) att.unit <- sapply(1:length(tr.pos), function(vec){return(sum(eff[, tr.pos[vec]] * D[, tr.pos[vec]]) / sum(D[, tr.pos[vec]]))}) att.avg.unit <- mean(att.unit) eff.equiv <- Y - est.fect$fit equiv.att.avg <- sum(eff.equiv * D) / (sum(D)) ## 2. rmse for treated units' observations under control tr <- which(apply(D, 2, sum) > 0) tr.co <- which((as.matrix(1 - D[,tr]) * as.matrix(II[,tr])) == 1) eff.tr <- as.matrix(eff[,tr]) v.eff.tr <- eff.tr[tr.co] rmse <- sqrt(mean(v.eff.tr^2)) ## 3. unbalanced output if (0%in%I) { eff[which(I == 0)] <- NA est.best$fit[which(I == 0)] <- NA ## eff.equiv[which(I == 0)] <- NA } est.best$residuals[which(II == 0)] <- NA if (method == "mc") { est.best$sigma2 <- mean(c(est.best$residuals[which(II == 1)])^2) ## mean squared error of residuals } ## 4. dynamic effects t.on <- c(T.on) eff.v <- c(eff) ## a vector rm.pos1 <- which(is.na(eff.v)) rm.pos2 <- which(is.na(t.on)) eff.v.use1 <- eff.v t.on.use <- t.on n.on.use <- rep(1:N, each = TT) eff.equiv.v <- c(eff.equiv) if (NA %in% eff.v | NA %in% t.on) { eff.v.use1 <- eff.v[-c(rm.pos1, rm.pos2)] t.on.use <- t.on[-c(rm.pos1, rm.pos2)] n.on.use <- n.on.use[-c(rm.pos1, rm.pos2)] eff.equiv.v <- eff.equiv.v[-c(rm.pos1, rm.pos2)] } pre.pos <- which(t.on.use <= 0) eff.pre <- cbind(eff.v.use1[pre.pos], t.on.use[pre.pos], n.on.use[pre.pos]) colnames(eff.pre) <- c("eff", "period", "unit") eff.pre.equiv <- cbind(eff.equiv.v[pre.pos], t.on.use[pre.pos], n.on.use[pre.pos]) colnames(eff.pre.equiv) <- c("eff.equiv", "period", "unit") pre.sd <- tapply(eff.pre.equiv[,1], eff.pre.equiv[,2], sd) pre.sd <- cbind(pre.sd, sort(unique(eff.pre.equiv[, 2])), table(eff.pre.equiv[, 2])) colnames(pre.sd) <- c("sd", "period", "count") time.on <- sort(unique(t.on.use)) att.on <- as.numeric(tapply(eff.v.use1, t.on.use, mean)) ## NA already removed count.on <- as.numeric(table(t.on.use)) eff.off <- eff.equiv <- off.sd <- NULL ## 6. switch-off effects if (hasRevs == 1) { t.off <- c(T.off) rm.pos3 <- which(is.na(t.off)) eff.v.use2 <- eff.v t.off.use <- t.off if (NA %in% eff.v | NA %in% t.off) { eff.v.use2 <- eff.v[-c(rm.pos1, rm.pos3)] t.off.use <- t.off[-c(rm.pos1, rm.pos3)] } off.pos <- which(t.off.use > 0) eff.off <- cbind(eff.v.use2[off.pos], t.off.use[off.pos], n.on.use[off.pos]) colnames(eff.off) <- c("eff", "period", "unit") eff.off.equiv <- cbind(eff.equiv.v[off.pos], t.off.use[off.pos], n.on.use[off.pos]) colnames(eff.off.equiv) <- c("off.equiv", "period", "unit") off.sd <- tapply(eff.off.equiv[,1], eff.off.equiv[,2], sd) off.sd <- cbind(off.sd, sort(unique(eff.off.equiv[, 2])), table(eff.off.equiv[, 2])) colnames(off.sd) <- c("sd", "period", "count") time.off <- sort(unique(t.off.use)) att.off <- as.numeric(tapply(eff.v.use2, t.off.use, mean)) ## NA already removed count.off <- as.numeric(table(t.off.use)) } ## 7. cohort effects if (!is.null(group)) { cohort <- cbind(c(group), c(D), c(eff.v)) rm.pos <- unique(c(rm.pos1, which(cohort[, 2] == 0))) cohort <- cohort[-rm.pos, ] g.level <- sort(unique(cohort[, 1])) raw.group.att <- as.numeric(tapply(cohort[, 3], cohort[, 1], mean)) group.att <- rep(NA, length(group.level)) group.att[which(group.level %in% g.level)] <- raw.group.att } ##-------------------------------## ## Storage ##-------------------------------## ##control group residuals out<-list( ## main results sigma2 = est.best$sigma2, sigma2.fect = est.fect$sigma2, T.on = T.on, Y.ct = est.best$fit, eff = eff, att.avg = att.avg, att.avg.unit = att.avg.unit, ## supporting force = force, T = TT, N = N, p = p, est = est.best, method = method, mu = est.best$mu, beta = beta, validX = validX, validF = validF, niter = est.best$niter, time = time.on, att = att.on, count = count.on, eff.pre = eff.pre, eff.pre.equiv = eff.pre.equiv, pre.sd = pre.sd, rmse = rmse, res = est.best$res) if (hasRevs == 1) { out <- c(out, list(time.off = time.off, att.off = att.off, count.off = count.off, eff.off = eff.off, eff.off.equiv = eff.off.equiv, off.sd = off.sd)) } if (force == 1) { out<-c(out, list(alpha = est.best$alpha)) } else if (force == 2) { out<-c(out,list(xi = est.best$xi)) } else if (force == 3) { out<-c(out,list(alpha = est.best$alpha, xi = est.best$xi)) } if (method == "ife") { out <- c(out, list(r.cv = r.cv, IC = IC, PC = PC)) if (r.cv > 0) { out <- c(out, list(factor = as.matrix(est.best$factor), lambda = as.matrix(est.best$lambda))) } } if (method == "mc") { out <- c(out, list(lambda.cv = lambda.cv, lambda.seq = lambda, lambda.norm = lambda.cv / max(eigen.all), eigen.all = eigen.all)) } ## CV results if (!is.null(CV.out.ife)) { out <- c(out, list(CV.out.ife = CV.out.ife)) } if (!is.null(CV.out.mc)) { out <- c(out, list(CV.out.mc = CV.out.mc)) } if (!is.null(group)) { out <- c(out, list(group.att = group.att)) } return(out) } ## cross-validation function ends
/R/cv.R
no_license
ccepeda10/fect
R
false
false
24,295
r
################################################################### ## Cross-validation ################################################################### fect.cv <- function(Y, # Outcome variable, (T*N) matrix X, # Explanatory variables: (T*N*p) array D, # Indicator for treated unit (tr==1) I, II, T.on, T.off = NULL, method = "ife", criterion = "mspe", k = 5, # CV time cv.prop = 0.1, cv.treat = TRUE, cv.nobs = 3, r = 0, # initial number of factors considered if CV==1 r.end, nlambda = 10, lambda = NULL, force, hasRevs = 1, tol, # tolerance level norm.para = NULL, group.level = NULL, group = NULL ) { ##-------------------------------## ## Parsing data ##-------------------------------## placebo.pos <- na.pos <- NULL ## unit id and time TT <- dim(Y)[1] N <- dim(Y)[2] if (is.null(X) == FALSE) { p <- dim(X)[3] } else { p <- 0 X <- array(0, dim = c(1, 1, 0)) } ## replicate data YY <- Y YY[which(II == 0)] <- 0 ## reset to 0 t.on <- c(T.on) T0.min <- min(apply(II, 2, sum)) ## --------- initial fit using fastplm --------- ## data.ini <- matrix(NA, (TT*N), (2 + 1 + p)) data.ini[, 2] <- rep(1:N, each = TT) ## unit fe data.ini[, 3] <- rep(1:TT, N) ## time fe data.ini[, 1] <- c(Y) ## outcome if (p > 0) { ## covar for (i in 1:p) { data.ini[, (3 + i)] <- c(X[, , i]) } } ## observed Y0 indicator: oci <- which(c(II) == 1) initialOut <- initialFit(data = data.ini, force = force, oci = oci) Y0 <- initialOut$Y0 beta0 <- initialOut$beta0 if (p > 0 && sum(is.na(beta0)) > 0) { beta0[which(is.na(beta0))] <- 0 } ## ------------- restrictions on candidate hyper parameters ---------- ## obs.con <- (sum(II) - r.end * (N + TT) + r.end^2 - p) <= 0 if (obs.con) { while((sum(II) - r.end * (N + TT) + r.end^2 - p) <= 0) { r.end <- r.end - 1 } } if (r.end >= T0.min) { if (method %in% c("both", "ife")) { cat("Facotr number should not be greater than ", T0.min-1, "\n", sep = "") } r.end <- T0.min-1 } else { if (obs.con) { if (method %in% c("both", "ife")) { cat("Facotr number should not be greater than ", r.end, "\n", sep = "") } } } ##-------------------------------## ## ----------- Main Algorithm ----------- ## ##-------------------------------## validX <- 1 ## no multi-colinearity CV.out.ife <- CV.out.mc <- NULL ##----------------------------------------------------## ## Cross-validation of r and lambda ## ##----------------------------------------------------## r.max <- min(TT, r.end) r.cv <- 0 ## initial value if (method %in% c("ife", "both") && r.max == 0) { r.cv <- 0 cat("Cross validation cannot be performed since available pre-treatment records of treated units are too few. So set r.cv = 0.\n ") est.best <- inter_fe_ub(YY, Y0, X, II, beta0, 0, force = force, tol) } else { r.old <- r ## save the minimal number of factors cat("Cross-validating ...","\n") ## ----- ## ## ------------- initialize ------------ ## ## ----- ## cv.pos <- which(t.on<=0) t.on.cv <- t.on[cv.pos] count.on.cv <- as.numeric(table(t.on.cv)) ## tot.id <- which(c(II)==1) ## observed control data ## cv.count <- ceiling((sum(II)*sum(II))/sum(I)) rm.count <- floor(sum(II)*cv.prop) cv.count <- sum(II) - rm.count ociCV <- matrix(NA, cv.count, k) ## store indicator rmCV <- matrix(NA, rm.count, k) ## removed indicator Y0CV <- array(NA, dim = c(TT, N, k)) ## store initial Y0 if (p > 0) { beta0CV <- array(NA, dim = c(p, 1, k)) } else { beta0CV <- array(0, dim = c(1, 0, k)) ## store initial beta0 } ## cv.id.all <- c() for (i in 1:k) { cv.n <- 0 repeat{ cv.n <- cv.n + 1 ## cv.id <- cv.sample(II, as.integer(sum(II) - cv.count)) cv.id <- cv.sample(II, D, rm.count, cv.nobs, cv.treat) ## cv.id <- sample(oci, as.integer(sum(II) - cv.count), replace = FALSE) II.cv <- II II.cv[cv.id] <- 0 con1 <- sum(apply(II.cv, 1, sum) >= 1) == TT con2 <- sum(apply(II.cv, 2, sum) >= 1) == N if (con1 & con2) { break } if (cv.n > 100) { stop("Some units have too few pre-treatment observations. Try to remove them.") } ## cv.id.all <- c(cv.id.all, list(cv.id)) } rmCV[,i] <- cv.id ocicv <- setdiff(oci, cv.id) ociCV[,i] <- ocicv initialOutCv <- initialFit(data = data.ini, force = force, oci = ocicv) Y0CV[,,i] <- initialOutCv$Y0 if (p > 0) { beta0cv <- initialOutCv$beta0 if (sum(is.na(beta0cv)) > 0) { beta0cv[which(is.na(beta0cv))] <- 0 } beta0CV[,,i] <- beta0cv } } ## --------------------------------------------- ## ## ---------------- cross validation for ife model ------------------ ## ## --------------------------------------------- ## if (method %in% c("ife", "both")) { # cat("Interactive fixed effects model...\n") r.pc <- est.pc.best <- MSPE.best <- MSPE.pc.best <- NULL if (criterion == "PC") { CV.out.ife <- matrix(NA, (r.max - r.old + 1), 6) colnames(CV.out.ife) <- c("r", "sigma2", "IC", "PC", "MSPTATT", "MSE") } else { CV.out.ife <- matrix(NA, (r.max - r.old + 1), 7) colnames(CV.out.ife) <- c("r", "sigma2", "IC", "PC", "MSPE", "MSPTATT", "MSE") } CV.out.ife[,"r"] <- c(r.old:r.max) CV.out.ife[,"PC"] <- CV.out.ife[,"MSPE"] <- 1e20 for (i in 1:dim(CV.out.ife)[1]) { ## cross-validation loop starts ## inter FE based on control, before & after r <- CV.out.ife[i, "r"] ## k <- 5 if (criterion %in% c("mspe", "both")) { SSE <- 0 for (ii in 1:k) { II.cv <- II II.cv[rmCV[,ii]] <- 0 YY.cv <- YY YY.cv[rmCV[,ii]] <- 0 est.cv.fit <- inter_fe_ub(YY.cv, as.matrix(Y0CV[,,ii]), X, II.cv, as.matrix(beta0CV[,,ii]), r, force, tol)$fit SSE <- SSE + sum((YY[rmCV[,ii]]-est.cv.fit[rmCV[,ii]])^2) } MSPE <- SSE/(k*(sum(II) - cv.count)) } est.cv <- inter_fe_ub(YY, Y0, X, II, beta0, r, force, tol) ## overall sigma2 <- est.cv$sigma2 IC <- est.cv$IC PC <- est.cv$PC eff.v.cv <- c(Y - est.cv$fit)[cv.pos] meff <- as.numeric(tapply(eff.v.cv, t.on.cv, mean)) MSPTATT <- sum(meff^2*count.on.cv)/sum(count.on.cv) MSE <- sum(eff.v.cv^2)/length(eff.v.cv) if(!is.null(norm.para)) { if (criterion %in% c("mspe", "both")) { MSPE <- MSPE*(norm.para[1]^2) } sigma2 <- sigma2*(norm.para[1]^2) IC <- est.cv$IC - log(est.cv$sigma2) + log(sigma2) PC <- PC*(norm.para[1]^2) } if (criterion %in% c("mspe", "both")) { if ((min(CV.out.ife[,"MSPE"]) - MSPE) > 0.05*min(CV.out.ife[,"MSPE"])) { ## at least 10% improvement for MPSE MSPE.best <- MSPE est.best <- est.cv r.cv <- r } else { if (r == r.cv + 1) cat("*") } } if (PC < min(CV.out.ife[,"PC"])) { if (criterion == "both") { MSPE.pc.best <- MSPE } est.pc.best <- est.cv r.pc <- r } if (criterion != "pc") { CV.out.ife[i, 2:7] <- c(sigma2, IC, PC, MSPE, MSPTATT, MSE) } else { CV.out.ife[i, 2:6] <- c(sigma2, IC, PC, MSPTATT, MSE) } if (criterion == "pc") { cat("\n r = ",r, "; sigma2 = ", sprintf("%.5f",sigma2), "; IC = ", sprintf("%.5f",IC), "; PC = ", sprintf("%.5f",PC), "; MSPTATT = ", sprintf("%.5f",MSPTATT), "; MSE = ", sprintf("%.5f",MSE), sep="") } else { cat("\n r = ",r, "; sigma2 = ", sprintf("%.5f",sigma2), "; IC = ", sprintf("%.5f",IC), "; PC = ", sprintf("%.5f",PC), "; MSPE = ", sprintf("%.5f",MSPE), "; MSPTATT = ", sprintf("%.5f",MSPTATT), "; MSE = ", sprintf("%.5f",MSE), sep="") } } ## end of while: search for r_star over #MSPE.best <- min(CV.out[,"MSPE"]) #PC.best <- min(CV.out[,"PC"]) ## compare if (criterion == "both") { if (r.cv > r.pc) { cat("\n\n Factor number selected via cross validation may be larger than the true number. Using the PC criterion.\n\n ") r.cv <- r.pc est.best <- est.pc.best MSPE.best <- MSPE.pc.best } est.best.ife <- est.best MSPE.best.ife <- MSPE.best } else if (criterion == "pc") { est.best.ife <- est.pc.best r.cv <- r.pc } else { est.best.ife <- est.best MSPE.best.ife <- MSPE.best } if (r > (TT-1)) {cat(" (r hits maximum)")} cat("\n\n r* = ",r.cv, sep="") cat("\n\n") } ## ------------------------------------- ## ## ---------------- cross validation for mc --------------- ## ## ------------------------------------- ## if (method %in% c("mc", "both")) { cat("Matrix completion method...\n") eigen.all <- NULL if (is.null(lambda) || length(lambda) == 1) { ## create the hyper-parameter sequence ## biggest candidate lambda ## Y.lambda <- YY ## if (p > 0) { ## for (i in 1:p) { ## Y.lambda <- Y.lambda - X[,,i] * beta0[i,1] ## } ## } Y.lambda <- YY - Y0 ## Y.lambda[which(II == 0)] <- Y0[which(II == 0)] Y.lambda[which(II == 0)] <- 0 eigen.all <- svd( Y.lambda / (TT * N) )$d lambda.max <- log10(max(eigen.all)) lambda <- rep(NA, nlambda) lambda.by <- 3/(nlambda - 2) for (i in 1:(nlambda - 1)) { lambda[i] <- 10^(lambda.max - (i - 1) * lambda.by) } lambda[nlambda] <- 0 } else { Y.lambda <- YY - Y0 Y.lambda[which(II == 0)] <- 0 eigen.all <- svd( Y.lambda / (TT * N) )$d } ## store all MSPE MSPE.best <- NULL CV.out.mc <- matrix(NA, length(lambda), 4) colnames(CV.out.mc) <- c("lambda.norm", "MSPE", "MSPTATT", "MSE") CV.out.mc[,"lambda.norm"] <- c(lambda/max(eigen.all)) CV.out.mc[,"MSPE"] <- 1e20 for (i in 1:length(lambda)) { ## k <- 5 SSE <- 0 for (ii in 1:k) { II.cv <- II II.cv[rmCV[,ii]] <- 0 YY.cv <- YY YY.cv[rmCV[,ii]] <- 0 est.cv.fit <- inter_fe_mc(YY.cv, as.matrix(Y0CV[,,ii]), X, II.cv, as.matrix(beta0CV[,,ii]), 1, lambda[i], force, tol)$fit SSE <- SSE + sum((YY[rmCV[,ii]]-est.cv.fit[rmCV[,ii]])^2) } MSPE <- SSE/(k*(sum(II) - cv.count)) est.cv <- inter_fe_mc(YY, Y0, X, II, beta0, 1, lambda[i], force, tol) ## overall ## sigma2 <- est.cv$sigma2 eff.v.cv <- c(Y - est.cv$fit)[cv.pos] meff <- as.numeric(tapply(eff.v.cv, t.on.cv, mean)) MSPTATT <- sum(meff^2*count.on.cv)/sum(count.on.cv) MSE <- sum(eff.v.cv^2)/length(eff.v.cv) if(!is.null(norm.para)){ MSPE <- MSPE*(norm.para[1]^2) ## sigma2 <- sigma2*(norm.para[1]^2) } if ((min(CV.out.mc[,"MSPE"]) - MSPE) > 0.05*min(CV.out.mc[,"MSPE"])) { ## at least 10% improvement for MPSE est.best <- est.cv lambda.cv <- lambda[i] MSPE.best <- MSPE } else { if (i > 1) { if (lambda.cv == lambda[i-1]) cat("*") } } ## CV.out[i, "MSPE"] <- MSPE ## CV.out[i, "sigma2"] <- sigma2 CV.out.mc[i, 2:4] <- c(MSPE, MSPTATT, MSE) cat("\n lambda.norm = ", sprintf("%.5f",lambda[i]/max(eigen.all)),"; MSPE = ", sprintf("%.5f",MSPE), "; MSPTATT = ", sprintf("%.5f",MSPTATT), "; MSE = ", sprintf("%.5f",MSE), sep="") } est.best.mc <- est.best MSPE.best.mc <- MSPE.best cat("\n\n lambda.norm* = ",lambda.cv/max(eigen.all), sep="") cat("\n\n") } } ## End of Cross-Validation if (method == "ife") { est.best <- est.best.ife validF <- ifelse(r.cv > 0, 1, 0) } else if (method == "mc") { est.best <- est.best.mc validF <- est.best$validF } else { if (MSPE.best.ife <= MSPE.best.mc) { est.best <- est.best.ife validF <- ifelse(r.cv > 0, 1, 0) method <- "ife" } else { est.best <- est.best.mc validF <- est.best$validF method <- "mc" } cat("\n\n Recommended method through cross-validation: ", method, sep = "") cat("\n\n") } validX <- est.best$validX ##------------------------------## ## ----------- Summarize -------------- ## ##------------------------------## ## 00. run a fect to obtain residuals if (method == "ife") { if (r.cv == 0) { est.fect <- est.best } else { est.fect <- inter_fe_ub(YY, Y0, X, II, beta0, 0, force = force, tol) } } else { est.fect <- inter_fe_ub(YY, Y0, X, II, beta0, 0, force = force, tol) } ##-------------------------------## ## ATT and Counterfactuals ## ##-------------------------------## ## we first adjustment for normalization if (!is.null(norm.para)) { Y <- Y * norm.para[1] if (method == "ife") { ## variance of the error term sigma2 <- est.best$sigma2 * (norm.para[1]^2) IC <- est.best$IC - log(est.best$sigma2) + log(sigma2) PC <- est.best$PC * (norm.para[1]^2) est.best$sigma2 <- sigma2 est.best$IC <- IC est.best$PC <- PC } ## output of estimates est.best$mu <- est.best$mu * norm.para[1] if (method == "ife" && r.cv > 0) { est.best$lambda <- est.best$lambda * norm.para[1] est.best$VNT <- est.best$VNT * norm.para[1] } if (force%in%c(1, 3)) { est.best$alpha <- est.best$alpha * norm.para[1] } if (force%in%c(2,3)) { est.best$xi <- est.best$xi * norm.para[1] } #if (p>0) { # est.best$beta <- est.best$beta * norm.para[1] #} est.best$residuals <- est.best$residuals * norm.para[1] est.best$fit <- est.best$fit * norm.para[1] est.fect$fit <- est.fect$fit * norm.para[1] est.fect$sigma2 <- est.fect$sigma2 * norm.para[1] } ## 0. revelant parameters sigma2 <- IC <- PC <- NULL if (method == "ife") { sigma2 <- est.best$sigma2 IC <- est.best$IC PC <- est.best$PC } if (p>0) { na.pos <- is.nan(est.best$beta) beta <- est.best$beta if( sum(na.pos) > 0 ) { beta[na.pos] <- NA } } else { beta <- NA } ## 1. estimated att and counterfactuals eff <- Y - est.best$fit att.avg <- sum(eff * D)/(sum(D)) ## att.avg.unit tr.pos <- which(apply(D, 2, sum) > 0) att.unit <- sapply(1:length(tr.pos), function(vec){return(sum(eff[, tr.pos[vec]] * D[, tr.pos[vec]]) / sum(D[, tr.pos[vec]]))}) att.avg.unit <- mean(att.unit) eff.equiv <- Y - est.fect$fit equiv.att.avg <- sum(eff.equiv * D) / (sum(D)) ## 2. rmse for treated units' observations under control tr <- which(apply(D, 2, sum) > 0) tr.co <- which((as.matrix(1 - D[,tr]) * as.matrix(II[,tr])) == 1) eff.tr <- as.matrix(eff[,tr]) v.eff.tr <- eff.tr[tr.co] rmse <- sqrt(mean(v.eff.tr^2)) ## 3. unbalanced output if (0%in%I) { eff[which(I == 0)] <- NA est.best$fit[which(I == 0)] <- NA ## eff.equiv[which(I == 0)] <- NA } est.best$residuals[which(II == 0)] <- NA if (method == "mc") { est.best$sigma2 <- mean(c(est.best$residuals[which(II == 1)])^2) ## mean squared error of residuals } ## 4. dynamic effects t.on <- c(T.on) eff.v <- c(eff) ## a vector rm.pos1 <- which(is.na(eff.v)) rm.pos2 <- which(is.na(t.on)) eff.v.use1 <- eff.v t.on.use <- t.on n.on.use <- rep(1:N, each = TT) eff.equiv.v <- c(eff.equiv) if (NA %in% eff.v | NA %in% t.on) { eff.v.use1 <- eff.v[-c(rm.pos1, rm.pos2)] t.on.use <- t.on[-c(rm.pos1, rm.pos2)] n.on.use <- n.on.use[-c(rm.pos1, rm.pos2)] eff.equiv.v <- eff.equiv.v[-c(rm.pos1, rm.pos2)] } pre.pos <- which(t.on.use <= 0) eff.pre <- cbind(eff.v.use1[pre.pos], t.on.use[pre.pos], n.on.use[pre.pos]) colnames(eff.pre) <- c("eff", "period", "unit") eff.pre.equiv <- cbind(eff.equiv.v[pre.pos], t.on.use[pre.pos], n.on.use[pre.pos]) colnames(eff.pre.equiv) <- c("eff.equiv", "period", "unit") pre.sd <- tapply(eff.pre.equiv[,1], eff.pre.equiv[,2], sd) pre.sd <- cbind(pre.sd, sort(unique(eff.pre.equiv[, 2])), table(eff.pre.equiv[, 2])) colnames(pre.sd) <- c("sd", "period", "count") time.on <- sort(unique(t.on.use)) att.on <- as.numeric(tapply(eff.v.use1, t.on.use, mean)) ## NA already removed count.on <- as.numeric(table(t.on.use)) eff.off <- eff.equiv <- off.sd <- NULL ## 6. switch-off effects if (hasRevs == 1) { t.off <- c(T.off) rm.pos3 <- which(is.na(t.off)) eff.v.use2 <- eff.v t.off.use <- t.off if (NA %in% eff.v | NA %in% t.off) { eff.v.use2 <- eff.v[-c(rm.pos1, rm.pos3)] t.off.use <- t.off[-c(rm.pos1, rm.pos3)] } off.pos <- which(t.off.use > 0) eff.off <- cbind(eff.v.use2[off.pos], t.off.use[off.pos], n.on.use[off.pos]) colnames(eff.off) <- c("eff", "period", "unit") eff.off.equiv <- cbind(eff.equiv.v[off.pos], t.off.use[off.pos], n.on.use[off.pos]) colnames(eff.off.equiv) <- c("off.equiv", "period", "unit") off.sd <- tapply(eff.off.equiv[,1], eff.off.equiv[,2], sd) off.sd <- cbind(off.sd, sort(unique(eff.off.equiv[, 2])), table(eff.off.equiv[, 2])) colnames(off.sd) <- c("sd", "period", "count") time.off <- sort(unique(t.off.use)) att.off <- as.numeric(tapply(eff.v.use2, t.off.use, mean)) ## NA already removed count.off <- as.numeric(table(t.off.use)) } ## 7. cohort effects if (!is.null(group)) { cohort <- cbind(c(group), c(D), c(eff.v)) rm.pos <- unique(c(rm.pos1, which(cohort[, 2] == 0))) cohort <- cohort[-rm.pos, ] g.level <- sort(unique(cohort[, 1])) raw.group.att <- as.numeric(tapply(cohort[, 3], cohort[, 1], mean)) group.att <- rep(NA, length(group.level)) group.att[which(group.level %in% g.level)] <- raw.group.att } ##-------------------------------## ## Storage ##-------------------------------## ##control group residuals out<-list( ## main results sigma2 = est.best$sigma2, sigma2.fect = est.fect$sigma2, T.on = T.on, Y.ct = est.best$fit, eff = eff, att.avg = att.avg, att.avg.unit = att.avg.unit, ## supporting force = force, T = TT, N = N, p = p, est = est.best, method = method, mu = est.best$mu, beta = beta, validX = validX, validF = validF, niter = est.best$niter, time = time.on, att = att.on, count = count.on, eff.pre = eff.pre, eff.pre.equiv = eff.pre.equiv, pre.sd = pre.sd, rmse = rmse, res = est.best$res) if (hasRevs == 1) { out <- c(out, list(time.off = time.off, att.off = att.off, count.off = count.off, eff.off = eff.off, eff.off.equiv = eff.off.equiv, off.sd = off.sd)) } if (force == 1) { out<-c(out, list(alpha = est.best$alpha)) } else if (force == 2) { out<-c(out,list(xi = est.best$xi)) } else if (force == 3) { out<-c(out,list(alpha = est.best$alpha, xi = est.best$xi)) } if (method == "ife") { out <- c(out, list(r.cv = r.cv, IC = IC, PC = PC)) if (r.cv > 0) { out <- c(out, list(factor = as.matrix(est.best$factor), lambda = as.matrix(est.best$lambda))) } } if (method == "mc") { out <- c(out, list(lambda.cv = lambda.cv, lambda.seq = lambda, lambda.norm = lambda.cv / max(eigen.all), eigen.all = eigen.all)) } ## CV results if (!is.null(CV.out.ife)) { out <- c(out, list(CV.out.ife = CV.out.ife)) } if (!is.null(CV.out.mc)) { out <- c(out, list(CV.out.mc = CV.out.mc)) } if (!is.null(group)) { out <- c(out, list(group.att = group.att)) } return(out) } ## cross-validation function ends
#!/usr/bin/env Rscript autoroxy::rox_on() library(magrittr) repo <- git2r::repository() branch_name <- (git2r::branches(repo) %>% Filter(git2r::is_head, .) %>% extract2(1))@name version <- gsub("^.*/([^/]*)$", "\\1", branch_name) package_version(version) date <- as.character(Sys.Date()) desc <- read.dcf("DESCRIPTION") desc[, "Version"] <- version if ("Date" %in% colnames(desc)) { desc[, "Date"] <- date } write.dcf(desc, "DESCRIPTION") devtools::document() git2r::add(repo, "DESCRIPTION") git2r::commit(repo, paste("bump version to", version)) master <- Reduce(c, git2r::config())$gitflow.branch.master if (is.null(master)) { warning("Config option gitflow.branch.master not set, are you running git flow?") } else { changes_since_master <- system(paste0('git log $(git rev-list --first-parent ', master, '.. | tail -n 1).. --format=format:"%b" | sed "/^$/d"'), intern=TRUE) news <- character() news <- readLines("NEWS.md") news <- c(paste0("Version ", version, " (", date, ")"), "===", "", changes_since_master, "", "", news) writeLines(news, "NEWS.md") }
/makeR/gitflow/post-flow-release-start
no_license
krlmlr/MakefileR
R
false
false
1,081
#!/usr/bin/env Rscript autoroxy::rox_on() library(magrittr) repo <- git2r::repository() branch_name <- (git2r::branches(repo) %>% Filter(git2r::is_head, .) %>% extract2(1))@name version <- gsub("^.*/([^/]*)$", "\\1", branch_name) package_version(version) date <- as.character(Sys.Date()) desc <- read.dcf("DESCRIPTION") desc[, "Version"] <- version if ("Date" %in% colnames(desc)) { desc[, "Date"] <- date } write.dcf(desc, "DESCRIPTION") devtools::document() git2r::add(repo, "DESCRIPTION") git2r::commit(repo, paste("bump version to", version)) master <- Reduce(c, git2r::config())$gitflow.branch.master if (is.null(master)) { warning("Config option gitflow.branch.master not set, are you running git flow?") } else { changes_since_master <- system(paste0('git log $(git rev-list --first-parent ', master, '.. | tail -n 1).. --format=format:"%b" | sed "/^$/d"'), intern=TRUE) news <- character() news <- readLines("NEWS.md") news <- c(paste0("Version ", version, " (", date, ")"), "===", "", changes_since_master, "", "", news) writeLines(news, "NEWS.md") }