content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
library(magrittr) library(tidyverse) url <- "http://lib.stat.cmu.edu/datasets/wseries" series <- scan( url,skip = 35,nlines = 23,what = list(year = integer(0), pattern = character(0))) #Permutation ordering the columns perm <- order(series$year) series <- list(year = series$year[perm], pattern = series$pattern[perm]) series$year series$pattern #this changes the FY column from integers to factors (just like in tableau) companiesData$fy <- factor(companiesData$fy, ordered = TRUE) #adding a new column called 'margin' companiesData$margin <- (companiesData$profit / companiesData$revenue) * 100 #round figures under 'margin' companiesData$margin <- round(companiesData$margin, 1) #creating margin in 'tidyverse' companiesData %>% mutate(Margin = round(profit / revenue*100, 1)) delays <- Newyorkflights %>% group_by(dest) %>% summarise( count = n(), dist = mean(distance, na.rm = TRUE), delay = mean(arr_delay, na.rm = TRUE) ) %>% filter(count > 20, dest!= "HNL") head(delays)
/Data Munging Training.R
no_license
elikplim25/R---Codes
R
false
false
1,130
r
library(magrittr) library(tidyverse) url <- "http://lib.stat.cmu.edu/datasets/wseries" series <- scan( url,skip = 35,nlines = 23,what = list(year = integer(0), pattern = character(0))) #Permutation ordering the columns perm <- order(series$year) series <- list(year = series$year[perm], pattern = series$pattern[perm]) series$year series$pattern #this changes the FY column from integers to factors (just like in tableau) companiesData$fy <- factor(companiesData$fy, ordered = TRUE) #adding a new column called 'margin' companiesData$margin <- (companiesData$profit / companiesData$revenue) * 100 #round figures under 'margin' companiesData$margin <- round(companiesData$margin, 1) #creating margin in 'tidyverse' companiesData %>% mutate(Margin = round(profit / revenue*100, 1)) delays <- Newyorkflights %>% group_by(dest) %>% summarise( count = n(), dist = mean(distance, na.rm = TRUE), delay = mean(arr_delay, na.rm = TRUE) ) %>% filter(count > 20, dest!= "HNL") head(delays)
\name{NISTcaratSItOkg} \alias{NISTcaratSItOkg} \title{Convert carat, metric to kilogram } \usage{NISTcaratSItOkg(caratSI)} \description{\code{NISTcaratSItOkg} converts from carat, metric to kilogram (kg) } \arguments{ \item{caratSI}{carat, metric } } \value{kilogram (kg) } \source{ National Institute of Standards and Technology (NIST), 2014 NIST Guide to SI Units B.8 Factors for Units Listed Alphabetically \url{http://physics.nist.gov/Pubs/SP811/appenB8.html} } \references{ National Institute of Standards and Technology (NIST), 2014 NIST Guide to SI Units B.8 Factors for Units Listed Alphabetically \url{http://physics.nist.gov/Pubs/SP811/appenB8.html} } \author{Jose Gama} \examples{ NISTcaratSItOkg(10) } \keyword{programming}
/man/NISTcaratSItOkg.Rd
no_license
cran/NISTunits
R
false
false
737
rd
\name{NISTcaratSItOkg} \alias{NISTcaratSItOkg} \title{Convert carat, metric to kilogram } \usage{NISTcaratSItOkg(caratSI)} \description{\code{NISTcaratSItOkg} converts from carat, metric to kilogram (kg) } \arguments{ \item{caratSI}{carat, metric } } \value{kilogram (kg) } \source{ National Institute of Standards and Technology (NIST), 2014 NIST Guide to SI Units B.8 Factors for Units Listed Alphabetically \url{http://physics.nist.gov/Pubs/SP811/appenB8.html} } \references{ National Institute of Standards and Technology (NIST), 2014 NIST Guide to SI Units B.8 Factors for Units Listed Alphabetically \url{http://physics.nist.gov/Pubs/SP811/appenB8.html} } \author{Jose Gama} \examples{ NISTcaratSItOkg(10) } \keyword{programming}
# calls WSM, for testing WSM interactively outside of server # set working directory only if it exists has_wd <- tryCatch({ workdir <- "~/Beatrice2/R_ELF/R_NEST/MCDA_App_Shiny/MCDA_06262019/src/dams_mcda" setwd(workdir) message("set Working Directory: ", workdir) }, error=function(e){ message("Working Directory does NOT exist.") }) source("WSM.R") library(abind) library(data.table) DamsData <- read.csv('DamsData_Workshop.csv') # this is the dataset for the individual dams, where rows = dams and cols = criteria DamsData <- data.frame(DamsData) TestData <- read.csv('EqualPrefs_forLiveSite.csv', row.names = "DAM") RawCriteriaMatrix <- data.frame(TestData)#test preference data for 8 dams, 14 criteria each # criteria input identifiers criteria_inputs <- c( "FishBiomass", "RiverRec", "Reservoir", "ProjectCost", "BreachDamage", "NumProperties", "ElectricityGeneration", "AvoidEmissions", "IndigenousLifeways", "IndustrialHistory", "CommunityIdentity", "Aesthetics", "Health", "Justice" ) #dam display names dam_names <- as.list(c('WestEnfield','Medway','E.Millinocket', 'Dolby','North Twin','Millinocket','Millinocket Lake','Ripogenus')) # alternative display names (for labeling tables and graphs) alternative_names <- as.list(c( "Remove Dam", "Improve Fish Passage", "Improve Hydro", "Hydro And Fish", "Keep and Maintain Dam" )) # list of dams available_dams <- seq(1:8) # list of alternatives available_alternatives <- seq(1:5) # matrix setup matrix_cols <- length(criteria_inputs) # 14 default (output size) matrix_rows <- length(available_dams) # 8 default matrix_levs_ind <- length(available_alternatives)# 5 default message("Decision Criteria", matrix_cols, "Dams", matrix_rows, "Decision Alternatives", matrix_levs_ind) #---------------------------------------- # SINGLE DAM PROCEDURE FOR PREFERENCES # # Build Preference Matrix with blank levels, no normalization # score will be raw values from 0-1 based on user input #---------------------------------------- #Ind_* prefix indicates that we are dealing with individual or single dams, where the 5 decision alternatives are taken into account #locally and not as a part of the larger set of 8 dams. Ind_PrefMatrix <- array(data = NA, dim=c(8,14)) #This is a 3-D blank matrix message("Fill User Preference Matrix") # weights in matrix for (n in 1:matrix_rows){ for (k in 1:matrix_cols){ x <- RawCriteriaMatrix[n,k] Ind_PrefMatrix[n,k] <- tryCatch({ #message("A", x, ', ', crit_imp) (x) }, error=function(e){ (NA) }) } #End dams (rows) for loop. } #End criteria (columns) for loop. Ind_PrefMatrix <- array(rep(Ind_PrefMatrix,5), dim=c(dim(Ind_PrefMatrix), 5)) message("fill Ind Pref Matrix") #This subsets by dam (row) and transforms individual dam matrices WestEnf_PrefMatrix <- subset(Ind_PrefMatrix[1,,]) WestEnf_PrefMatrix <- data.frame(t(WestEnf_PrefMatrix)) Med_PrefMatrix <- subset(Ind_PrefMatrix[2,,]) Med_PrefMatrix <- data.frame(t(Med_PrefMatrix)) EastMill_PrefMatrix <- subset(Ind_PrefMatrix[3,,]) EastMill_PrefMatrix <- data.frame(t(EastMill_PrefMatrix)) Dolby_PrefMatrix <- subset(Ind_PrefMatrix[4,,]) Dolby_PrefMatrix <- data.frame(t(Dolby_PrefMatrix)) NorthTw_PrefMatrix <- subset(Ind_PrefMatrix[5,,]) NorthTw_PrefMatrix <- data.frame(t(NorthTw_PrefMatrix)) Mill_PrefMatrix <- subset(Ind_PrefMatrix[6,,]) Mill_PrefMatrix <- data.frame(t(Mill_PrefMatrix)) MillLake_PrefMatrix <- subset(Ind_PrefMatrix[7,,]) MillLake_PrefMatrix <- data.frame(t(MillLake_PrefMatrix)) Rip_PrefMatrix <- subset(Ind_PrefMatrix[8,,]) Rip_PrefMatrix <- data.frame(t(Rip_PrefMatrix)) #---------------------------------------- # SINGLE DAM DATA NORMALIATION PROCEDURE #Data Normalization using Min / Max Vectors # Retrieve criteria values for each dam (referred to as Ind_DamsDataMartrix), for each MCDA scenario (from server?) a 3D matrix [dams,criteria,alternatives] # Normalization procedure: # get maximum and minimum criteria value for each criterion, each dam, produces two 2D matrices [dams, max/min criteria] # for positive values: norm = (f - f_min) / (f_max - f_min) # for negative values (like cost): norm = 1 - (f - f_max) / (f_min - f_max) # result is 3D matrix with dam-specific criteria values normalized by min and max criteria sampled over all alternatives #---------------------------------------- #retrieve DamsData to manipulate into DamsDataMatrix Ind_DamsDataMatrix <- array(data=NA, dim = c(8, 14, 5)) #creates empty 3d array in shape we want Remove <- cbind(DamsData$FishBiomass_Remove, DamsData$RiverRec_Rem, DamsData$ResStorage_Rem, DamsData$Cost_Remove, DamsData$Damage_Rem, DamsData$Properties_Rem, DamsData$AvgAnnualGen_Rem, DamsData$EmissionsReduc_Rem, DamsData$Culture_Remove, DamsData$History_Remove, DamsData$Community_Remove, DamsData$Aesthetics_Remove, DamsData$Health_Remove, DamsData$Justice_Remove) Improve_Fish <- cbind(DamsData$FishBiomass_ImproveFish, DamsData$RiverRec, DamsData$ResStorage, DamsData$Cost_ImproveFish, DamsData$Damage, DamsData$Properties,DamsData$AvgAnnualGen, DamsData$EmissionsReduc, DamsData$Culture_ImproveFish, DamsData$History_ImproveFish, DamsData$Community_ImproveFish, DamsData$Aesthetics_ImproveFish, DamsData$Health_ImproveFish, DamsData$Justice_ImproveFish) Improve_Hydro <- cbind(DamsData$FishBiomass_ImproveHydro, DamsData$RiverRec, DamsData$ResStorage, DamsData$Cost_ImproveHydro, DamsData$Damage, DamsData$Properties,DamsData$AvgAnnualGen_Add, DamsData$EmissionsReduc_Add, DamsData$Culture_ImproveHydro, DamsData$History_ImproveHydro, DamsData$Community_ImproveHydro, DamsData$Aesthetics_ImproveHydro, DamsData$Health_ImproveHydro, DamsData$Justice_ImproveHydro) FishANDHydro <- cbind(DamsData$FishBiomass_FishANDHydro, DamsData$RiverRec, DamsData$ResStorage, DamsData$Cost_FishANDHydro, DamsData$Damage, DamsData$Properties, DamsData$AvgAnnualGen_Add, DamsData$EmissionsReduc_Add, DamsData$Culture_FishANDHydro, DamsData$History_FishANDHydro, DamsData$Community_FishANDHydro, DamsData$Aesthetics_FishANDHydro, DamsData$Health_FishANDHydro, DamsData$Justice_FishANDHydro) KeepMaintain <- cbind(DamsData$FishBiomass_KeepMaintain, DamsData$RiverRec, DamsData$ResStorage, DamsData$Cost_KeepMaintain, DamsData$Damage, DamsData$Properties, DamsData$AvgAnnualGen, DamsData$EmissionsReduc, DamsData$Culture_KeepMaintain, DamsData$History_KeepMaintain, DamsData$Community_KeepMaintain, DamsData$Aesthetics_KeepMaintain, DamsData$Health_KeepMaintain, DamsData$Justice_KeepMaintain) #This abind creates our 3D matrix Ind_DamsDataMatrix <- abind(Remove, Improve_Fish, Improve_Hydro, FishANDHydro, KeepMaintain, along = 3, force.array=TRUE) #------------------------SUBSET BY DAM (row)-------------------------- WestEnf_DataMatrix <- subset(Ind_DamsDataMatrix[1,,]) WestEnf_DataMatrix <- data.frame(t(WestEnf_DataMatrix)) Med_DataMatrix <- subset(Ind_DamsDataMatrix[2,,]) Med_DataMatrix <- data.frame(t(Med_DataMatrix)) EastMill_DataMatrix <- subset(Ind_DamsDataMatrix[3,,]) EastMill_DataMatrix <- data.frame(t(EastMill_DataMatrix)) Dolby_DataMatrix <- subset(Ind_DamsDataMatrix[4,,]) Dolby_DataMatrix <- data.frame(t(Dolby_DataMatrix)) NorthTw_DataMatrix <- subset(Ind_DamsDataMatrix[5,,]) NorthTw_DataMatrix <- data.frame(t(NorthTw_DataMatrix)) Mill_DataMatrix <- subset(Ind_DamsDataMatrix[6,,]) Mill_DataMatrix <- data.frame(t(Mill_DataMatrix)) MillLake_DataMatrix <- subset(Ind_DamsDataMatrix[7,,]) MillLake_DataMatrix <- data.frame(t(MillLake_DataMatrix)) Rip_DataMatrix <- subset(Ind_DamsDataMatrix[8,,]) Rip_DataMatrix <- data.frame(t(Rip_DataMatrix)) AllDataMatrix <- array(data=NA, dim=c(5,14,8)) AllDataMatrix <- provideDimnames(AllDataMatrix, sep="_", base=list("alternative", "criterion", "dam")) AllDataMatrix[,,1] <- simplify2array(WestEnf_DataMatrix) AllDataMatrix[,,2] <- simplify2array(Med_DataMatrix) AllDataMatrix[,,3] <- simplify2array(EastMill_DataMatrix) AllDataMatrix[,,4] <- simplify2array(Dolby_DataMatrix) AllDataMatrix[,,5] <- simplify2array(NorthTw_DataMatrix) AllDataMatrix[,,6] <- simplify2array(Mill_DataMatrix) AllDataMatrix[,,7] <- simplify2array(MillLake_DataMatrix) AllDataMatrix[,,8] <- simplify2array(Rip_DataMatrix) #--------NORMALIZATION FOR INDIVIDUAL DAMS RESULTS------------------- # iterate each dam & criteria for min,max MaxVectors <- array(data=NA, dim=c(matrix_cols, matrix_rows)) MinVectors <- array(data=NA, dim=c(matrix_cols, matrix_rows)) for (p in 1:matrix_rows){ min_vector_list <- list("list", matrix_cols) max_vector_list <- list("list", matrix_cols) for ( k in 1:matrix_cols ){ if (p==1){ #message("dam ", p, " column ", k, " vector ", AllDataMatrix[,k,p]) } min_vector_list[[k]] <- min(AllDataMatrix[,k,p], na.rm=FALSE) max_vector_list[[k]] <- max(AllDataMatrix[,k,p], na.rm=FALSE) } MaxVectors[,p] <- unlist(max_vector_list) MinVectors[,p] <- unlist(min_vector_list) } #message("min vector for dam 1 ", MinVectors[,1]) #message("max vector for dam 1 ", MaxVectors[,1]) #---------------------------------------- # SINGLE DAM WEIGHTING PROCEDURE #Build Weighting Matrix for ind. dams # score will be min/max normalized values from 0-1 # array of rows that use minimization (cost, damage-related, properties impacted) min_crit_columns <- c(4, 5, 6) #---------------------------------------- # make normalized values of each value in 3d matrix, [alt, crit, dam] Ind_NormalizedMatrix <- array(data=NA, dim = c(matrix_levs_ind,matrix_cols,matrix_rows)) # array of rows that use minimization (cost or damage-related) min_crit_columns <- c(4, 5, 6) # make normalized values of each value in matrix for (k in 1:matrix_cols){ for (n in 1:matrix_rows){ for (p in 1:matrix_levs_ind){ x <- AllDataMatrix[p,k,n] crit_min_x <- MinVectors[k,n] crit_max_x <- MaxVectors[k,n] # debug Ind_NormalizedMatrix #if (n == 1){ message("NormalMatrx dam ", n, " criteria ", k, " alt ", p, ' min ', crit_min_x, ' max ', crit_max_x) } Ind_NormalizedMatrix[p,k,n] <- tryCatch({ if (k %in% min_crit_columns){ # alternative method # maximize normalization (1-(x-crit_min_x) / (crit_max_x - crit_min_x)) }else{ # default method # minimize normilization ((x - crit_min_x) / (crit_max_x - crit_min_x)) } }, error=function(e){ (NA) }) } } } is.nan.data.frame <- function(a){ do.call(cbind, lapply(a, is.nan)) } Ind_NormalizedMatrix[is.nan.data.frame(Ind_NormalizedMatrix)] <- 0 Ind_NormalizedMatrix[2:5,6,3] <- c(1,1,1,1)#This replaces properties NaN at East Millinocket #Ind_NormalizedMatrix[1,5,3] <- 1 #This replaces damage 0 value for Remove at East Millinocket Ind_NormalizedMatrix[1,1,2] <- 1 #This fish habitat NaN at Medway Ind_NormalizedMatrix[5,3,1:3] <- 1#This replaces the reservoir storage NaN at West Enfield, Medway, East Millinocket Ind_NormalizedMatrix[1,2,7] <- 1 #This replaces the river rec NaN at Millinocket Lake #message('Ind_Normalized column ', Ind_NormalizedMatrix[1,,1]) #---------------------------------------- # SINGLE DAM WEIGHTING PROCEDURE #---------------------------------------- Dam1Results <- (Ind_NormalizedMatrix[,,1]*(WestEnf_PrefMatrix)) Dam2Results <- (Ind_NormalizedMatrix[,,2]*(Med_PrefMatrix)) Dam3Results <- (Ind_NormalizedMatrix[,,3]*(EastMill_PrefMatrix)) Dam4Results <- (Ind_NormalizedMatrix[,,4]*(Dolby_PrefMatrix)) Dam5Results <- (Ind_NormalizedMatrix[,,5]*(NorthTw_PrefMatrix)) Dam6Results <- (Ind_NormalizedMatrix[,,6]*(Mill_PrefMatrix)) Dam7Results <- (Ind_NormalizedMatrix[,,7]*(MillLake_PrefMatrix)) Dam8Results <- (Ind_NormalizedMatrix[,,8]*(Rip_PrefMatrix)) # store all results in one data structure WeightedResults <- array( data=NA, dim=c(matrix_levs_ind,matrix_cols,matrix_rows)) WeightedResults[,,1] <- as.matrix(Dam1Results) WeightedResults[,,2] <- as.matrix(Dam2Results) WeightedResults[,,3] <- as.matrix(Dam3Results) WeightedResults[,,4] <- as.matrix(Dam4Results) WeightedResults[,,5] <- as.matrix(Dam5Results) WeightedResults[,,6] <- as.matrix(Dam6Results) WeightedResults[,,6] <- as.matrix(Dam6Results) WeightedResults[,,7] <- as.matrix(Dam7Results) WeightedResults[,,8] <- as.matrix(Dam8Results) WeightedResults <- round(WeightedResults, 0) # sum scores ScoreSums <- array(data=NA, dim=c(matrix_rows, matrix_levs_ind)) for (damid in 1:matrix_rows){ for (j in 1:matrix_levs_ind){ # debug #if (damid==1){ message( "Scoresum dam: ", damid, " j ", j, " to sum ", WeightedResults[j,,damid], " orig_to_sum ", Dam1Results[j, 1:matrix_cols]) } ScoreSums[damid, j] <- sum(as.numeric(WeightedResults[j,,damid])) } } # Ind ScoreSum Ind_scoresum <- round(as.data.frame(ScoreSums, rownames = dam_names),0) message("Ind_scoresum ", Ind_scoresum) rownames(Ind_scoresum) <- dam_names colnames(Ind_scoresum) <- alternative_names # Ind WeightedScoreMatrix Ind_WeightedScoreMatrix <- as.data.frame(rbind(Dam1Results, Dam2Results, Dam3Results, Dam4Results, Dam5Results, Dam6Results, Dam7Results, Dam8Results)) colnames(Ind_WeightedScoreMatrix)<- criteria_inputs # call WSM results <- WSM(RawCriteriaMatrix, DamsData, Ind_WeightedScoreMatrix, Ind_scoresum) # results are list(Ind_WeightedScoreMatrix, Ind_scoresum, scoresum_total, fname) #message("WSM Results", results) #---------------------------------------- # Final Outputs: TABLES #---------------------------------------- #dam display names dam_names <- as.list(c('WestEnfield','Medway','E.Millinocket','Dolby','North Twin','Millinocket','Millinocket Lake','Ripogenus')) # alternative display names (for labeling tables and graphs) alternative_names <- as.list(c("Remove Dam", "Improve Fish Passage", "Improve Hydro", "Improve Hydro AND Fish", "Keep and Maintain Dam")) # West Enfield/Dam 1 output table(s) Dam1RawTable <- setDT(WestEnf_DataMatrix) row.names(Dam1RawTable) <- alternative_names colnames(Dam1RawTable) <- criteria_inputs Dam1NormTable <- setDT(data.frame(round(Ind_NormalizedMatrix[,,1], 1)*100)) row.names(Dam1NormTable) <- alternative_names colnames(Dam1NormTable) <- criteria_inputs Dam1ScoreTable <- setDT(round(Dam1Results, 1)*100) row.names(Dam1ScoreTable) <- alternative_names colnames(Dam1ScoreTable) <- criteria_inputs # Medway/Dam 2 output table(s) Dam2RawTable <- setDT(Med_DataMatrix) rownames(Dam2RawTable) <- alternative_names colnames(Dam2RawTable) <- criteria_inputs Dam2NormTable <- setDT(data.frame(Ind_NormalizedMatrix[,,2])) row.names(Dam2NormTable) <- alternative_names colnames(Dam2NormTable) <- criteria_inputs Dam2ScoreTable <- setDT(Dam2Results) row.names(Dam2ScoreTable) <- alternative_names colnames(Dam2ScoreTable) <- criteria_inputs # Millinocket/Dam 3 output table(s) Dam3RawTable <- setDT(EastMill_DataMatrix) rownames(Dam3RawTable) <- alternative_names colnames(Dam3RawTable) <- criteria_inputs Dam3NormTable <- setDT(data.frame(Ind_NormalizedMatrix[,,3])) row.names(Dam3NormTable) <- alternative_names colnames(Dam3NormTable) <- criteria_inputs Dam3ScoreTable <- setDT(Dam3Results) row.names(Dam3ScoreTable) <- alternative_names colnames(Dam3ScoreTable) <- criteria_inputs # East Millinocket/Dam 4 output table(s) Dam4RawTable <- setDT(Dolby_DataMatrix) rownames(Dam4RawTable) <- alternative_names colnames(Dam4RawTable) <- criteria_inputs Dam4NormTable <- setDT(data.frame(Ind_NormalizedMatrix[,,4])) row.names(Dam4NormTable) <- alternative_names colnames(Dam4NormTable) <- criteria_inputs Dam4ScoreTable <- setDT(Dam4Results) row.names(Dam4ScoreTable) <- alternative_names colnames(Dam4ScoreTable) <- criteria_inputs # North Twin/Dam 5 output table(s) Dam5RawTable <- setDT(NorthTw_DataMatrix) rownames(Dam5RawTable) <- alternative_names colnames(Dam5RawTable) <- criteria_inputs Dam5NormTable <- setDT(data.frame(Ind_NormalizedMatrix[,,5])) row.names(Dam5NormTable) <- alternative_names colnames(Dam5NormTable) <- criteria_inputs Dam5ScoreTable <- setDT(Dam5Results) row.names(Dam5ScoreTable) <- alternative_names colnames(Dam5ScoreTable) <- criteria_inputs # Dolby/Dam 6 output table(s) Dam6RawTable <- setDT(Mill_DataMatrix) rownames(Dam6RawTable) <- alternative_names colnames(Dam6RawTable) <- criteria_inputs Dam6NormTable <- setDT(data.frame(Ind_NormalizedMatrix[,,6])) row.names(Dam6NormTable) <- alternative_names colnames(Dam6NormTable) <- criteria_inputs Dam6ScoreTable <- setDT(Dam6Results) row.names(Dam6ScoreTable) <- alternative_names colnames(Dam6ScoreTable) <- criteria_inputs # Millinocket Lake /Dam 7 output table(s) Dam7RawTable <- setDT(MillLake_DataMatrix) rownames(Dam7RawTable) <- alternative_names colnames(Dam7RawTable) <- criteria_inputs Dam7NormTable <- setDT(data.frame(Ind_NormalizedMatrix[,,7])) row.names(Dam7NormTable) <- alternative_names colnames(Dam7NormTable) <- criteria_inputs Dam7ScoreTable <- setDT(Dam7Results) row.names(Dam7ScoreTable) <- alternative_names colnames(Dam4ScoreTable) <- criteria_inputs # Ripogenus/Dam 8 output table(s) Dam8RawTable <- setDT(Rip_DataMatrix) rownames(Dam8RawTable) <- alternative_names colnames(Dam8RawTable) <- criteria_inputs Dam8NormTable <- setDT(data.frame(Ind_NormalizedMatrix[,,8])) row.names(Dam8NormTable) <- alternative_names colnames(Dam8NormTable) <- criteria_inputs Dam8ScoreTable <- setDT(Dam8Results) row.names(Dam8ScoreTable) <- alternative_names colnames(Dam8ScoreTable) <- criteria_inputs #------------------------------------------------------- ## bars for ALL Dam MCDA score results Score_compare <- as.matrix(Ind_scoresum) colnames(Score_compare) <- alternative_names rownames(Score_compare) <- dam_names # Graph ALL DAM alternative scores with adjacent bars grouped by dam WSMPlota <- barplot(t(Score_compare), ylim= c(0,100), main="Dam Decision Recommendation Comparison", ylab= "MCDA Score", beside=TRUE, col=rainbow(5), cex.axis=0.8, names.arg= dam_names, cex=0.7) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #----------------------------------------------- # stacked bars for ALL dam MCDA scores (broken down by criteria) CritAlt <- as.matrix(Ind_WeightedScoreMatrix) colnames(CritAlt) <- criteria_inputs # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlotb <- barplot(t(CritAlt), ylim= c(0,100), main="Dam Decision Alternative Comparison", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= c(alternative_names, alternative_names, alternative_names, alternative_names, alternative_names, alternative_names, alternative_names, alternative_names), cex=0.7) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)); #-------------------------------------------------------- ## stacked bars data table for West Enfield Dam results Score1 <- as.matrix(Ind_scoresum[1,]) rownames(Score1) <- alternative_names # Graph West Enfield alternative scores WSMPlot1a <- barplot((Score1), ylim= c(0,100), main="West Enfield Dam Recommendation", ylab= "Decision Alternative Score", names.arg= alternative_names, beside=TRUE, col=rainbow(5)) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #------------------------------------------------------- # Graph alternatives (broken down by criteria) for West Enfield CritAlt1 <- as.matrix(Dam1Results) colnames(CritAlt1) <- criteria_inputs rownames(CritAlt1) <- alternative_names # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot1b <- barplot(t(CritAlt1), ylim= c(0,100), main="West Enfield Dam", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= alternative_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)); #-------------------------------------------------------- ## stacked bars data table for Medway Dam results Score2 <- as.matrix(Ind_scoresum[2,]) rownames(Score2) <- alternative_names # Graph alternative scores WSMPlot2a <- barplot((Score2), ylim= c(0,100), main="Medway Dam Recommendation", ylab= "Decision Alternative Score", names.arg= alternative_names, beside=TRUE, col=rainbow(5)) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #-------------------------------------------------------- # Graph alternatives (broken down by criteria) for Medway dam CritAlt2 <- as.matrix(Dam2Results) colnames(CritAlt2) <- criteria_inputs rownames(CritAlt2) <- alternative_names # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot2b <- barplot(t(CritAlt2), ylim= c(0,100), main="Medway Dam", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= alternative_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)); #------------------------------------------------------- ## stacked bars data table for Millinocket/Quakish Dam results Score3 <- as.matrix(Ind_scoresum[3,]) rownames(Score3) <- alternative_names # Graph alternative scores WSMPlot3a <- barplot((Score3), ylim= c(0,100), main="East Millinocket Dam Recommendation", ylab= "Decision Alternative Score", names.arg= alternative_names, beside=TRUE, col=rainbow(5)) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #-------------------------------------------------------- # Graph alternatives (broken down by criteria) for individual dams CritAlt3 <- as.matrix(Dam3Results) colnames(CritAlt3) <- criteria_inputs rownames(CritAlt3) <- alternative_names # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot3b <- barplot(t(CritAlt3), ylim= c(0,100), main="East Millnocket Dam", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= alternative_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)); #------------------------------------------------------- ## stacked bars data table for East Millinocket Dam results Score4 <- as.matrix(Ind_scoresum[4,]) rownames(Score4) <- alternative_names # Graph alternative scores WSMPlot4a <- barplot((Score4), ylim= c(0,100), main="Dolby Dam Recommendation", ylab= "Decision Alternative Score", names.arg= alternative_names, beside=TRUE, col=rainbow(5)) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #-------------------------------------------------------- # Graph alternatives (broken down by criteria) for individual dams CritAlt4 <- as.matrix(Dam4Results) colnames(CritAlt4) <- criteria_inputs rownames(CritAlt4) <- alternative_names # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot4b <- barplot(t(CritAlt4), ylim= c(0,100), main="Dolby Dam", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= alternative_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)); #------------------------------------------------------- ## stacked bars data table for North Twin Dam results Score5 <- as.matrix(Ind_scoresum[5,]) rownames(Score5) <- alternative_names # Graph alternative scores WSMPlot5a <- barplot((Score5), ylim= c(0,100), main="North Twin Dam Recommendation", ylab= "Decision Alternative Score", names.arg= alternative_names, beside=TRUE, col=rainbow(5)) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #-------------------------------------------------------- # Graph alternatives (broken down by criteria) for individual dams CritAlt5 <- as.matrix(Dam5Results) colnames(CritAlt5) <- criteria_inputs rownames(CritAlt5) <- alternative_names # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot5b <- barplot(t(CritAlt5), ylim= c(0,100), main="North Twin Dam", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= alternative_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)); #------------------------------------------------------- ## stacked bars data table for Dolby Dam results Score6 <- as.matrix(Ind_scoresum[6,]) rownames(Score6) <- alternative_names # Graph alternative scores WSMPlot6a <- barplot((Score6), ylim= c(0,100), main="Millinocket/Quakish Dam Recommendation", ylab= "Decision Alternative Score", names.arg= alternative_names, beside=TRUE, col=rainbow(5)) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #-------------------------------------------------------- # Graph alternatives (broken down by criteria) for individual dams CritAlt6 <- as.matrix(Dam6Results) colnames(CritAlt6) <- criteria_inputs rownames(CritAlt6) <- alternative_names # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot6b <- barplot(t(CritAlt6), ylim= c(0,100), main="Millinocket/Quakish Dam", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= alternative_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)); #------------------------------------------------------- ## stacked bars data table for Millinocket Lake Dam results Score7 <- as.matrix(Ind_scoresum[7,]) rownames(Score7) <- alternative_names # Graph alternative scores WSMPlot7a <- barplot((Score7), ylim= c(0,100), main="Millinocket Lake Dam Recommendation", ylab= "Decision Alternative Score", names.arg= alternative_names, beside=TRUE, col=rainbow(5)) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #-------------------------------------------------------- # Graph alternatives (broken down by criteria) for individual dams CritAlt7 <- as.matrix(Dam7Results) colnames(CritAlt7) <- criteria_inputs rownames(CritAlt7) <- alternative_names # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot7b <- barplot(t(CritAlt7), ylim= c(0,100), main="Millinocket Lake Dam", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= alternative_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)); #------------------------------------------------------- ## stacked bars data table for Ripogenus Dam results Score8 <- as.matrix(Ind_scoresum[8,]) rownames(Score8) <- alternative_names # Graph alternative scores WSMPlot8a <- barplot((Score8), ylim= c(0,100), main="Ripogenus Dam Recommendation", ylab= "Decision Alternative Score", names.arg= alternative_names, beside=TRUE, col=rainbow(5)) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #-------------------------------------------------------- # Graph alternatives (broken down by criteria) for individual dams CritAlt8 <- as.matrix(Dam8Results) colnames(CritAlt8) <- criteria_inputs rownames(CritAlt8) <- alternative_names # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot8b <- barplot(t(CritAlt8), ylim= c(0,100), main="Ripogenus Dam", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= alternative_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)) #-------------------------------------------------------- #Graph for Top Scenario for ALL dams across ALL scenarios Dam1Scen <- t(WeightedScoreMatrix[1,,]) Dam2Scen <- t(WeightedScoreMatrix[2,,]) Dam3Scen <- t(WeightedScoreMatrix[3,,]) Dam4Scen <- t(WeightedScoreMatrix[4,,]) Dam5Scen <- t(WeightedScoreMatrix[5,,]) Dam6Scen <- t(WeightedScoreMatrix[6,,]) Dam7Scen <- t(WeightedScoreMatrix[7,,]) Dam8Scen <- t(WeightedScoreMatrix[8,,]) MCDASum <- data.frame(cbind(Dam1Scen, Dam2Scen, Dam3Scen, Dam4Scen, Dam5Scen, Dam6Scen, Dam7Scen, Dam8Scen, idxScen, scoresum_total)) MCDASum <- data.frame(setorder(MCDASum,-scoresum_total)) MCDASum_forGraph <- t(MCDASum[1,]) DamsTopScenGraph <- data.frame(cbind(MCDASum_forGraph[1:14], MCDASum_forGraph[15:28], MCDASum_forGraph[29:42], MCDASum_forGraph[43:56], MCDASum_forGraph[57:70], MCDASum_forGraph[71:84], MCDASum_forGraph[85:98], MCDASum_forGraph[99:112])) DamsTopScenGraph <- as.matrix(DamsTopScenGraph) colnames(DamsTopScenGraph) <- dam_names rownames(DamsTopScenGraph) <- criteria_inputs # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot9 <- barplot((DamsTopScenGraph), ylim= c(0,100), main="Top Dam Scenario", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= dam_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14))
/src/dams_mcda/WSM_graphs_test.R
no_license
dams-mcda/Dams-MCDA
R
false
false
30,324
r
# calls WSM, for testing WSM interactively outside of server # set working directory only if it exists has_wd <- tryCatch({ workdir <- "~/Beatrice2/R_ELF/R_NEST/MCDA_App_Shiny/MCDA_06262019/src/dams_mcda" setwd(workdir) message("set Working Directory: ", workdir) }, error=function(e){ message("Working Directory does NOT exist.") }) source("WSM.R") library(abind) library(data.table) DamsData <- read.csv('DamsData_Workshop.csv') # this is the dataset for the individual dams, where rows = dams and cols = criteria DamsData <- data.frame(DamsData) TestData <- read.csv('EqualPrefs_forLiveSite.csv', row.names = "DAM") RawCriteriaMatrix <- data.frame(TestData)#test preference data for 8 dams, 14 criteria each # criteria input identifiers criteria_inputs <- c( "FishBiomass", "RiverRec", "Reservoir", "ProjectCost", "BreachDamage", "NumProperties", "ElectricityGeneration", "AvoidEmissions", "IndigenousLifeways", "IndustrialHistory", "CommunityIdentity", "Aesthetics", "Health", "Justice" ) #dam display names dam_names <- as.list(c('WestEnfield','Medway','E.Millinocket', 'Dolby','North Twin','Millinocket','Millinocket Lake','Ripogenus')) # alternative display names (for labeling tables and graphs) alternative_names <- as.list(c( "Remove Dam", "Improve Fish Passage", "Improve Hydro", "Hydro And Fish", "Keep and Maintain Dam" )) # list of dams available_dams <- seq(1:8) # list of alternatives available_alternatives <- seq(1:5) # matrix setup matrix_cols <- length(criteria_inputs) # 14 default (output size) matrix_rows <- length(available_dams) # 8 default matrix_levs_ind <- length(available_alternatives)# 5 default message("Decision Criteria", matrix_cols, "Dams", matrix_rows, "Decision Alternatives", matrix_levs_ind) #---------------------------------------- # SINGLE DAM PROCEDURE FOR PREFERENCES # # Build Preference Matrix with blank levels, no normalization # score will be raw values from 0-1 based on user input #---------------------------------------- #Ind_* prefix indicates that we are dealing with individual or single dams, where the 5 decision alternatives are taken into account #locally and not as a part of the larger set of 8 dams. Ind_PrefMatrix <- array(data = NA, dim=c(8,14)) #This is a 3-D blank matrix message("Fill User Preference Matrix") # weights in matrix for (n in 1:matrix_rows){ for (k in 1:matrix_cols){ x <- RawCriteriaMatrix[n,k] Ind_PrefMatrix[n,k] <- tryCatch({ #message("A", x, ', ', crit_imp) (x) }, error=function(e){ (NA) }) } #End dams (rows) for loop. } #End criteria (columns) for loop. Ind_PrefMatrix <- array(rep(Ind_PrefMatrix,5), dim=c(dim(Ind_PrefMatrix), 5)) message("fill Ind Pref Matrix") #This subsets by dam (row) and transforms individual dam matrices WestEnf_PrefMatrix <- subset(Ind_PrefMatrix[1,,]) WestEnf_PrefMatrix <- data.frame(t(WestEnf_PrefMatrix)) Med_PrefMatrix <- subset(Ind_PrefMatrix[2,,]) Med_PrefMatrix <- data.frame(t(Med_PrefMatrix)) EastMill_PrefMatrix <- subset(Ind_PrefMatrix[3,,]) EastMill_PrefMatrix <- data.frame(t(EastMill_PrefMatrix)) Dolby_PrefMatrix <- subset(Ind_PrefMatrix[4,,]) Dolby_PrefMatrix <- data.frame(t(Dolby_PrefMatrix)) NorthTw_PrefMatrix <- subset(Ind_PrefMatrix[5,,]) NorthTw_PrefMatrix <- data.frame(t(NorthTw_PrefMatrix)) Mill_PrefMatrix <- subset(Ind_PrefMatrix[6,,]) Mill_PrefMatrix <- data.frame(t(Mill_PrefMatrix)) MillLake_PrefMatrix <- subset(Ind_PrefMatrix[7,,]) MillLake_PrefMatrix <- data.frame(t(MillLake_PrefMatrix)) Rip_PrefMatrix <- subset(Ind_PrefMatrix[8,,]) Rip_PrefMatrix <- data.frame(t(Rip_PrefMatrix)) #---------------------------------------- # SINGLE DAM DATA NORMALIATION PROCEDURE #Data Normalization using Min / Max Vectors # Retrieve criteria values for each dam (referred to as Ind_DamsDataMartrix), for each MCDA scenario (from server?) a 3D matrix [dams,criteria,alternatives] # Normalization procedure: # get maximum and minimum criteria value for each criterion, each dam, produces two 2D matrices [dams, max/min criteria] # for positive values: norm = (f - f_min) / (f_max - f_min) # for negative values (like cost): norm = 1 - (f - f_max) / (f_min - f_max) # result is 3D matrix with dam-specific criteria values normalized by min and max criteria sampled over all alternatives #---------------------------------------- #retrieve DamsData to manipulate into DamsDataMatrix Ind_DamsDataMatrix <- array(data=NA, dim = c(8, 14, 5)) #creates empty 3d array in shape we want Remove <- cbind(DamsData$FishBiomass_Remove, DamsData$RiverRec_Rem, DamsData$ResStorage_Rem, DamsData$Cost_Remove, DamsData$Damage_Rem, DamsData$Properties_Rem, DamsData$AvgAnnualGen_Rem, DamsData$EmissionsReduc_Rem, DamsData$Culture_Remove, DamsData$History_Remove, DamsData$Community_Remove, DamsData$Aesthetics_Remove, DamsData$Health_Remove, DamsData$Justice_Remove) Improve_Fish <- cbind(DamsData$FishBiomass_ImproveFish, DamsData$RiverRec, DamsData$ResStorage, DamsData$Cost_ImproveFish, DamsData$Damage, DamsData$Properties,DamsData$AvgAnnualGen, DamsData$EmissionsReduc, DamsData$Culture_ImproveFish, DamsData$History_ImproveFish, DamsData$Community_ImproveFish, DamsData$Aesthetics_ImproveFish, DamsData$Health_ImproveFish, DamsData$Justice_ImproveFish) Improve_Hydro <- cbind(DamsData$FishBiomass_ImproveHydro, DamsData$RiverRec, DamsData$ResStorage, DamsData$Cost_ImproveHydro, DamsData$Damage, DamsData$Properties,DamsData$AvgAnnualGen_Add, DamsData$EmissionsReduc_Add, DamsData$Culture_ImproveHydro, DamsData$History_ImproveHydro, DamsData$Community_ImproveHydro, DamsData$Aesthetics_ImproveHydro, DamsData$Health_ImproveHydro, DamsData$Justice_ImproveHydro) FishANDHydro <- cbind(DamsData$FishBiomass_FishANDHydro, DamsData$RiverRec, DamsData$ResStorage, DamsData$Cost_FishANDHydro, DamsData$Damage, DamsData$Properties, DamsData$AvgAnnualGen_Add, DamsData$EmissionsReduc_Add, DamsData$Culture_FishANDHydro, DamsData$History_FishANDHydro, DamsData$Community_FishANDHydro, DamsData$Aesthetics_FishANDHydro, DamsData$Health_FishANDHydro, DamsData$Justice_FishANDHydro) KeepMaintain <- cbind(DamsData$FishBiomass_KeepMaintain, DamsData$RiverRec, DamsData$ResStorage, DamsData$Cost_KeepMaintain, DamsData$Damage, DamsData$Properties, DamsData$AvgAnnualGen, DamsData$EmissionsReduc, DamsData$Culture_KeepMaintain, DamsData$History_KeepMaintain, DamsData$Community_KeepMaintain, DamsData$Aesthetics_KeepMaintain, DamsData$Health_KeepMaintain, DamsData$Justice_KeepMaintain) #This abind creates our 3D matrix Ind_DamsDataMatrix <- abind(Remove, Improve_Fish, Improve_Hydro, FishANDHydro, KeepMaintain, along = 3, force.array=TRUE) #------------------------SUBSET BY DAM (row)-------------------------- WestEnf_DataMatrix <- subset(Ind_DamsDataMatrix[1,,]) WestEnf_DataMatrix <- data.frame(t(WestEnf_DataMatrix)) Med_DataMatrix <- subset(Ind_DamsDataMatrix[2,,]) Med_DataMatrix <- data.frame(t(Med_DataMatrix)) EastMill_DataMatrix <- subset(Ind_DamsDataMatrix[3,,]) EastMill_DataMatrix <- data.frame(t(EastMill_DataMatrix)) Dolby_DataMatrix <- subset(Ind_DamsDataMatrix[4,,]) Dolby_DataMatrix <- data.frame(t(Dolby_DataMatrix)) NorthTw_DataMatrix <- subset(Ind_DamsDataMatrix[5,,]) NorthTw_DataMatrix <- data.frame(t(NorthTw_DataMatrix)) Mill_DataMatrix <- subset(Ind_DamsDataMatrix[6,,]) Mill_DataMatrix <- data.frame(t(Mill_DataMatrix)) MillLake_DataMatrix <- subset(Ind_DamsDataMatrix[7,,]) MillLake_DataMatrix <- data.frame(t(MillLake_DataMatrix)) Rip_DataMatrix <- subset(Ind_DamsDataMatrix[8,,]) Rip_DataMatrix <- data.frame(t(Rip_DataMatrix)) AllDataMatrix <- array(data=NA, dim=c(5,14,8)) AllDataMatrix <- provideDimnames(AllDataMatrix, sep="_", base=list("alternative", "criterion", "dam")) AllDataMatrix[,,1] <- simplify2array(WestEnf_DataMatrix) AllDataMatrix[,,2] <- simplify2array(Med_DataMatrix) AllDataMatrix[,,3] <- simplify2array(EastMill_DataMatrix) AllDataMatrix[,,4] <- simplify2array(Dolby_DataMatrix) AllDataMatrix[,,5] <- simplify2array(NorthTw_DataMatrix) AllDataMatrix[,,6] <- simplify2array(Mill_DataMatrix) AllDataMatrix[,,7] <- simplify2array(MillLake_DataMatrix) AllDataMatrix[,,8] <- simplify2array(Rip_DataMatrix) #--------NORMALIZATION FOR INDIVIDUAL DAMS RESULTS------------------- # iterate each dam & criteria for min,max MaxVectors <- array(data=NA, dim=c(matrix_cols, matrix_rows)) MinVectors <- array(data=NA, dim=c(matrix_cols, matrix_rows)) for (p in 1:matrix_rows){ min_vector_list <- list("list", matrix_cols) max_vector_list <- list("list", matrix_cols) for ( k in 1:matrix_cols ){ if (p==1){ #message("dam ", p, " column ", k, " vector ", AllDataMatrix[,k,p]) } min_vector_list[[k]] <- min(AllDataMatrix[,k,p], na.rm=FALSE) max_vector_list[[k]] <- max(AllDataMatrix[,k,p], na.rm=FALSE) } MaxVectors[,p] <- unlist(max_vector_list) MinVectors[,p] <- unlist(min_vector_list) } #message("min vector for dam 1 ", MinVectors[,1]) #message("max vector for dam 1 ", MaxVectors[,1]) #---------------------------------------- # SINGLE DAM WEIGHTING PROCEDURE #Build Weighting Matrix for ind. dams # score will be min/max normalized values from 0-1 # array of rows that use minimization (cost, damage-related, properties impacted) min_crit_columns <- c(4, 5, 6) #---------------------------------------- # make normalized values of each value in 3d matrix, [alt, crit, dam] Ind_NormalizedMatrix <- array(data=NA, dim = c(matrix_levs_ind,matrix_cols,matrix_rows)) # array of rows that use minimization (cost or damage-related) min_crit_columns <- c(4, 5, 6) # make normalized values of each value in matrix for (k in 1:matrix_cols){ for (n in 1:matrix_rows){ for (p in 1:matrix_levs_ind){ x <- AllDataMatrix[p,k,n] crit_min_x <- MinVectors[k,n] crit_max_x <- MaxVectors[k,n] # debug Ind_NormalizedMatrix #if (n == 1){ message("NormalMatrx dam ", n, " criteria ", k, " alt ", p, ' min ', crit_min_x, ' max ', crit_max_x) } Ind_NormalizedMatrix[p,k,n] <- tryCatch({ if (k %in% min_crit_columns){ # alternative method # maximize normalization (1-(x-crit_min_x) / (crit_max_x - crit_min_x)) }else{ # default method # minimize normilization ((x - crit_min_x) / (crit_max_x - crit_min_x)) } }, error=function(e){ (NA) }) } } } is.nan.data.frame <- function(a){ do.call(cbind, lapply(a, is.nan)) } Ind_NormalizedMatrix[is.nan.data.frame(Ind_NormalizedMatrix)] <- 0 Ind_NormalizedMatrix[2:5,6,3] <- c(1,1,1,1)#This replaces properties NaN at East Millinocket #Ind_NormalizedMatrix[1,5,3] <- 1 #This replaces damage 0 value for Remove at East Millinocket Ind_NormalizedMatrix[1,1,2] <- 1 #This fish habitat NaN at Medway Ind_NormalizedMatrix[5,3,1:3] <- 1#This replaces the reservoir storage NaN at West Enfield, Medway, East Millinocket Ind_NormalizedMatrix[1,2,7] <- 1 #This replaces the river rec NaN at Millinocket Lake #message('Ind_Normalized column ', Ind_NormalizedMatrix[1,,1]) #---------------------------------------- # SINGLE DAM WEIGHTING PROCEDURE #---------------------------------------- Dam1Results <- (Ind_NormalizedMatrix[,,1]*(WestEnf_PrefMatrix)) Dam2Results <- (Ind_NormalizedMatrix[,,2]*(Med_PrefMatrix)) Dam3Results <- (Ind_NormalizedMatrix[,,3]*(EastMill_PrefMatrix)) Dam4Results <- (Ind_NormalizedMatrix[,,4]*(Dolby_PrefMatrix)) Dam5Results <- (Ind_NormalizedMatrix[,,5]*(NorthTw_PrefMatrix)) Dam6Results <- (Ind_NormalizedMatrix[,,6]*(Mill_PrefMatrix)) Dam7Results <- (Ind_NormalizedMatrix[,,7]*(MillLake_PrefMatrix)) Dam8Results <- (Ind_NormalizedMatrix[,,8]*(Rip_PrefMatrix)) # store all results in one data structure WeightedResults <- array( data=NA, dim=c(matrix_levs_ind,matrix_cols,matrix_rows)) WeightedResults[,,1] <- as.matrix(Dam1Results) WeightedResults[,,2] <- as.matrix(Dam2Results) WeightedResults[,,3] <- as.matrix(Dam3Results) WeightedResults[,,4] <- as.matrix(Dam4Results) WeightedResults[,,5] <- as.matrix(Dam5Results) WeightedResults[,,6] <- as.matrix(Dam6Results) WeightedResults[,,6] <- as.matrix(Dam6Results) WeightedResults[,,7] <- as.matrix(Dam7Results) WeightedResults[,,8] <- as.matrix(Dam8Results) WeightedResults <- round(WeightedResults, 0) # sum scores ScoreSums <- array(data=NA, dim=c(matrix_rows, matrix_levs_ind)) for (damid in 1:matrix_rows){ for (j in 1:matrix_levs_ind){ # debug #if (damid==1){ message( "Scoresum dam: ", damid, " j ", j, " to sum ", WeightedResults[j,,damid], " orig_to_sum ", Dam1Results[j, 1:matrix_cols]) } ScoreSums[damid, j] <- sum(as.numeric(WeightedResults[j,,damid])) } } # Ind ScoreSum Ind_scoresum <- round(as.data.frame(ScoreSums, rownames = dam_names),0) message("Ind_scoresum ", Ind_scoresum) rownames(Ind_scoresum) <- dam_names colnames(Ind_scoresum) <- alternative_names # Ind WeightedScoreMatrix Ind_WeightedScoreMatrix <- as.data.frame(rbind(Dam1Results, Dam2Results, Dam3Results, Dam4Results, Dam5Results, Dam6Results, Dam7Results, Dam8Results)) colnames(Ind_WeightedScoreMatrix)<- criteria_inputs # call WSM results <- WSM(RawCriteriaMatrix, DamsData, Ind_WeightedScoreMatrix, Ind_scoresum) # results are list(Ind_WeightedScoreMatrix, Ind_scoresum, scoresum_total, fname) #message("WSM Results", results) #---------------------------------------- # Final Outputs: TABLES #---------------------------------------- #dam display names dam_names <- as.list(c('WestEnfield','Medway','E.Millinocket','Dolby','North Twin','Millinocket','Millinocket Lake','Ripogenus')) # alternative display names (for labeling tables and graphs) alternative_names <- as.list(c("Remove Dam", "Improve Fish Passage", "Improve Hydro", "Improve Hydro AND Fish", "Keep and Maintain Dam")) # West Enfield/Dam 1 output table(s) Dam1RawTable <- setDT(WestEnf_DataMatrix) row.names(Dam1RawTable) <- alternative_names colnames(Dam1RawTable) <- criteria_inputs Dam1NormTable <- setDT(data.frame(round(Ind_NormalizedMatrix[,,1], 1)*100)) row.names(Dam1NormTable) <- alternative_names colnames(Dam1NormTable) <- criteria_inputs Dam1ScoreTable <- setDT(round(Dam1Results, 1)*100) row.names(Dam1ScoreTable) <- alternative_names colnames(Dam1ScoreTable) <- criteria_inputs # Medway/Dam 2 output table(s) Dam2RawTable <- setDT(Med_DataMatrix) rownames(Dam2RawTable) <- alternative_names colnames(Dam2RawTable) <- criteria_inputs Dam2NormTable <- setDT(data.frame(Ind_NormalizedMatrix[,,2])) row.names(Dam2NormTable) <- alternative_names colnames(Dam2NormTable) <- criteria_inputs Dam2ScoreTable <- setDT(Dam2Results) row.names(Dam2ScoreTable) <- alternative_names colnames(Dam2ScoreTable) <- criteria_inputs # Millinocket/Dam 3 output table(s) Dam3RawTable <- setDT(EastMill_DataMatrix) rownames(Dam3RawTable) <- alternative_names colnames(Dam3RawTable) <- criteria_inputs Dam3NormTable <- setDT(data.frame(Ind_NormalizedMatrix[,,3])) row.names(Dam3NormTable) <- alternative_names colnames(Dam3NormTable) <- criteria_inputs Dam3ScoreTable <- setDT(Dam3Results) row.names(Dam3ScoreTable) <- alternative_names colnames(Dam3ScoreTable) <- criteria_inputs # East Millinocket/Dam 4 output table(s) Dam4RawTable <- setDT(Dolby_DataMatrix) rownames(Dam4RawTable) <- alternative_names colnames(Dam4RawTable) <- criteria_inputs Dam4NormTable <- setDT(data.frame(Ind_NormalizedMatrix[,,4])) row.names(Dam4NormTable) <- alternative_names colnames(Dam4NormTable) <- criteria_inputs Dam4ScoreTable <- setDT(Dam4Results) row.names(Dam4ScoreTable) <- alternative_names colnames(Dam4ScoreTable) <- criteria_inputs # North Twin/Dam 5 output table(s) Dam5RawTable <- setDT(NorthTw_DataMatrix) rownames(Dam5RawTable) <- alternative_names colnames(Dam5RawTable) <- criteria_inputs Dam5NormTable <- setDT(data.frame(Ind_NormalizedMatrix[,,5])) row.names(Dam5NormTable) <- alternative_names colnames(Dam5NormTable) <- criteria_inputs Dam5ScoreTable <- setDT(Dam5Results) row.names(Dam5ScoreTable) <- alternative_names colnames(Dam5ScoreTable) <- criteria_inputs # Dolby/Dam 6 output table(s) Dam6RawTable <- setDT(Mill_DataMatrix) rownames(Dam6RawTable) <- alternative_names colnames(Dam6RawTable) <- criteria_inputs Dam6NormTable <- setDT(data.frame(Ind_NormalizedMatrix[,,6])) row.names(Dam6NormTable) <- alternative_names colnames(Dam6NormTable) <- criteria_inputs Dam6ScoreTable <- setDT(Dam6Results) row.names(Dam6ScoreTable) <- alternative_names colnames(Dam6ScoreTable) <- criteria_inputs # Millinocket Lake /Dam 7 output table(s) Dam7RawTable <- setDT(MillLake_DataMatrix) rownames(Dam7RawTable) <- alternative_names colnames(Dam7RawTable) <- criteria_inputs Dam7NormTable <- setDT(data.frame(Ind_NormalizedMatrix[,,7])) row.names(Dam7NormTable) <- alternative_names colnames(Dam7NormTable) <- criteria_inputs Dam7ScoreTable <- setDT(Dam7Results) row.names(Dam7ScoreTable) <- alternative_names colnames(Dam4ScoreTable) <- criteria_inputs # Ripogenus/Dam 8 output table(s) Dam8RawTable <- setDT(Rip_DataMatrix) rownames(Dam8RawTable) <- alternative_names colnames(Dam8RawTable) <- criteria_inputs Dam8NormTable <- setDT(data.frame(Ind_NormalizedMatrix[,,8])) row.names(Dam8NormTable) <- alternative_names colnames(Dam8NormTable) <- criteria_inputs Dam8ScoreTable <- setDT(Dam8Results) row.names(Dam8ScoreTable) <- alternative_names colnames(Dam8ScoreTable) <- criteria_inputs #------------------------------------------------------- ## bars for ALL Dam MCDA score results Score_compare <- as.matrix(Ind_scoresum) colnames(Score_compare) <- alternative_names rownames(Score_compare) <- dam_names # Graph ALL DAM alternative scores with adjacent bars grouped by dam WSMPlota <- barplot(t(Score_compare), ylim= c(0,100), main="Dam Decision Recommendation Comparison", ylab= "MCDA Score", beside=TRUE, col=rainbow(5), cex.axis=0.8, names.arg= dam_names, cex=0.7) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #----------------------------------------------- # stacked bars for ALL dam MCDA scores (broken down by criteria) CritAlt <- as.matrix(Ind_WeightedScoreMatrix) colnames(CritAlt) <- criteria_inputs # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlotb <- barplot(t(CritAlt), ylim= c(0,100), main="Dam Decision Alternative Comparison", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= c(alternative_names, alternative_names, alternative_names, alternative_names, alternative_names, alternative_names, alternative_names, alternative_names), cex=0.7) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)); #-------------------------------------------------------- ## stacked bars data table for West Enfield Dam results Score1 <- as.matrix(Ind_scoresum[1,]) rownames(Score1) <- alternative_names # Graph West Enfield alternative scores WSMPlot1a <- barplot((Score1), ylim= c(0,100), main="West Enfield Dam Recommendation", ylab= "Decision Alternative Score", names.arg= alternative_names, beside=TRUE, col=rainbow(5)) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #------------------------------------------------------- # Graph alternatives (broken down by criteria) for West Enfield CritAlt1 <- as.matrix(Dam1Results) colnames(CritAlt1) <- criteria_inputs rownames(CritAlt1) <- alternative_names # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot1b <- barplot(t(CritAlt1), ylim= c(0,100), main="West Enfield Dam", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= alternative_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)); #-------------------------------------------------------- ## stacked bars data table for Medway Dam results Score2 <- as.matrix(Ind_scoresum[2,]) rownames(Score2) <- alternative_names # Graph alternative scores WSMPlot2a <- barplot((Score2), ylim= c(0,100), main="Medway Dam Recommendation", ylab= "Decision Alternative Score", names.arg= alternative_names, beside=TRUE, col=rainbow(5)) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #-------------------------------------------------------- # Graph alternatives (broken down by criteria) for Medway dam CritAlt2 <- as.matrix(Dam2Results) colnames(CritAlt2) <- criteria_inputs rownames(CritAlt2) <- alternative_names # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot2b <- barplot(t(CritAlt2), ylim= c(0,100), main="Medway Dam", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= alternative_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)); #------------------------------------------------------- ## stacked bars data table for Millinocket/Quakish Dam results Score3 <- as.matrix(Ind_scoresum[3,]) rownames(Score3) <- alternative_names # Graph alternative scores WSMPlot3a <- barplot((Score3), ylim= c(0,100), main="East Millinocket Dam Recommendation", ylab= "Decision Alternative Score", names.arg= alternative_names, beside=TRUE, col=rainbow(5)) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #-------------------------------------------------------- # Graph alternatives (broken down by criteria) for individual dams CritAlt3 <- as.matrix(Dam3Results) colnames(CritAlt3) <- criteria_inputs rownames(CritAlt3) <- alternative_names # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot3b <- barplot(t(CritAlt3), ylim= c(0,100), main="East Millnocket Dam", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= alternative_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)); #------------------------------------------------------- ## stacked bars data table for East Millinocket Dam results Score4 <- as.matrix(Ind_scoresum[4,]) rownames(Score4) <- alternative_names # Graph alternative scores WSMPlot4a <- barplot((Score4), ylim= c(0,100), main="Dolby Dam Recommendation", ylab= "Decision Alternative Score", names.arg= alternative_names, beside=TRUE, col=rainbow(5)) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #-------------------------------------------------------- # Graph alternatives (broken down by criteria) for individual dams CritAlt4 <- as.matrix(Dam4Results) colnames(CritAlt4) <- criteria_inputs rownames(CritAlt4) <- alternative_names # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot4b <- barplot(t(CritAlt4), ylim= c(0,100), main="Dolby Dam", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= alternative_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)); #------------------------------------------------------- ## stacked bars data table for North Twin Dam results Score5 <- as.matrix(Ind_scoresum[5,]) rownames(Score5) <- alternative_names # Graph alternative scores WSMPlot5a <- barplot((Score5), ylim= c(0,100), main="North Twin Dam Recommendation", ylab= "Decision Alternative Score", names.arg= alternative_names, beside=TRUE, col=rainbow(5)) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #-------------------------------------------------------- # Graph alternatives (broken down by criteria) for individual dams CritAlt5 <- as.matrix(Dam5Results) colnames(CritAlt5) <- criteria_inputs rownames(CritAlt5) <- alternative_names # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot5b <- barplot(t(CritAlt5), ylim= c(0,100), main="North Twin Dam", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= alternative_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)); #------------------------------------------------------- ## stacked bars data table for Dolby Dam results Score6 <- as.matrix(Ind_scoresum[6,]) rownames(Score6) <- alternative_names # Graph alternative scores WSMPlot6a <- barplot((Score6), ylim= c(0,100), main="Millinocket/Quakish Dam Recommendation", ylab= "Decision Alternative Score", names.arg= alternative_names, beside=TRUE, col=rainbow(5)) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #-------------------------------------------------------- # Graph alternatives (broken down by criteria) for individual dams CritAlt6 <- as.matrix(Dam6Results) colnames(CritAlt6) <- criteria_inputs rownames(CritAlt6) <- alternative_names # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot6b <- barplot(t(CritAlt6), ylim= c(0,100), main="Millinocket/Quakish Dam", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= alternative_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)); #------------------------------------------------------- ## stacked bars data table for Millinocket Lake Dam results Score7 <- as.matrix(Ind_scoresum[7,]) rownames(Score7) <- alternative_names # Graph alternative scores WSMPlot7a <- barplot((Score7), ylim= c(0,100), main="Millinocket Lake Dam Recommendation", ylab= "Decision Alternative Score", names.arg= alternative_names, beside=TRUE, col=rainbow(5)) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #-------------------------------------------------------- # Graph alternatives (broken down by criteria) for individual dams CritAlt7 <- as.matrix(Dam7Results) colnames(CritAlt7) <- criteria_inputs rownames(CritAlt7) <- alternative_names # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot7b <- barplot(t(CritAlt7), ylim= c(0,100), main="Millinocket Lake Dam", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= alternative_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)); #------------------------------------------------------- ## stacked bars data table for Ripogenus Dam results Score8 <- as.matrix(Ind_scoresum[8,]) rownames(Score8) <- alternative_names # Graph alternative scores WSMPlot8a <- barplot((Score8), ylim= c(0,100), main="Ripogenus Dam Recommendation", ylab= "Decision Alternative Score", names.arg= alternative_names, beside=TRUE, col=rainbow(5)) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topleft", c("RemoveDam", "ImproveFish","ImproveHydro","Improve FishANDHydro","KeepMaintain"), cex=0.6, bty="n", fill=rainbow(5)); #-------------------------------------------------------- # Graph alternatives (broken down by criteria) for individual dams CritAlt8 <- as.matrix(Dam8Results) colnames(CritAlt8) <- criteria_inputs rownames(CritAlt8) <- alternative_names # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot8b <- barplot(t(CritAlt8), ylim= c(0,100), main="Ripogenus Dam", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= alternative_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14)) #-------------------------------------------------------- #Graph for Top Scenario for ALL dams across ALL scenarios Dam1Scen <- t(WeightedScoreMatrix[1,,]) Dam2Scen <- t(WeightedScoreMatrix[2,,]) Dam3Scen <- t(WeightedScoreMatrix[3,,]) Dam4Scen <- t(WeightedScoreMatrix[4,,]) Dam5Scen <- t(WeightedScoreMatrix[5,,]) Dam6Scen <- t(WeightedScoreMatrix[6,,]) Dam7Scen <- t(WeightedScoreMatrix[7,,]) Dam8Scen <- t(WeightedScoreMatrix[8,,]) MCDASum <- data.frame(cbind(Dam1Scen, Dam2Scen, Dam3Scen, Dam4Scen, Dam5Scen, Dam6Scen, Dam7Scen, Dam8Scen, idxScen, scoresum_total)) MCDASum <- data.frame(setorder(MCDASum,-scoresum_total)) MCDASum_forGraph <- t(MCDASum[1,]) DamsTopScenGraph <- data.frame(cbind(MCDASum_forGraph[1:14], MCDASum_forGraph[15:28], MCDASum_forGraph[29:42], MCDASum_forGraph[43:56], MCDASum_forGraph[57:70], MCDASum_forGraph[71:84], MCDASum_forGraph[85:98], MCDASum_forGraph[99:112])) DamsTopScenGraph <- as.matrix(DamsTopScenGraph) colnames(DamsTopScenGraph) <- dam_names rownames(DamsTopScenGraph) <- criteria_inputs # put 10% of the space between each bar, and make labels # smaller with horizontal y-axis labels WSMPlot9 <- barplot((DamsTopScenGraph), ylim= c(0,100), main="Top Dam Scenario", ylab="MCDA Score", col=rainbow(14), cex.axis=0.8, las=1, names.arg= dam_names, cex=0.7) legend("topleft", criteria_inputs, cex=0.6, bty="n", fill=rainbow(14))
## 12 Sept 2017 - Cat # Duration of Vegetativ Risk: Experiment data # Aim: To see if chlorophyll measures are different with experimental groups # Clear workspace rm(list=ls()) # remove everything currently held in the R memory options(stringsAsFactors=FALSE) graphics.off() # Load libraries library(dplyr) library(tidyr) library(ggplot2) library(lubridate) library(lme4) library(arm) # Set Working Directory setwd("~/Documents/git/freezingexperiment/analyses") d <-read.csv("input/buds_traits.csv", header=TRUE) bb<-read.csv("output/birches_buddata.csv", header=TRUE) area<-read.csv("input/SLA_buds.csv", header=TRUE) wt<-read.csv("input/SLA_weight.csv", header=TRUE) ## Start working on columns dx<-dplyr::select(d, NEW, TX, Bud, Chlorophyll) dx$bud <- ave(dx$Bud, dx$NEW, FUN = seq_along) dx$species<-substr(dx$NEW, 1, 6) dx<-filter(dx, species !="SAMRAC") dx<-dx[!is.na(dx$Chlorophyll),] dx$ID<-paste(dx$NEW, dx$bud, sep="_") bb$chlorophyll<-NA for(i in c(1:nrow(bb))){ for(j in c(1:nrow(dx))) if(bb$ID[i]==dx$ID[j]) bb$chlorophyll[i]<-dx$Chlorophyll[j] } chl.mod<-lm(chlorophyll~frost+bud+species+frost*bud, data=bb) display(chl.mod) qplot(species, chlorophyll, data = bb, geom = "boxplot", color=tx) + xlab("Species")+ylab("Chlorophyll") ggplot(bb, aes(x=bud, y=chlorophyll, color=as.factor(frost))) + geom_point() + geom_smooth(method="lm") dx$frz<-ifelse(dx$TX=="A", 0, 1) dx<-na.omit(dx) dx$spp<-substr(dx$NEW, 0,6) sp.mean<-aggregate(Chlorophyll~NEW + frz, dx, mean) sp.mean$species<-substr(sp.mean$NEW, 0, 6) qplot(spp, Chlorophyll, data = dx, geom = "boxplot", color=TX) + xlab("Species")+ylab("Chlorophyll measurement") mod<-lmer(Chlorophyll~frz + (1|spp), data=dx) summary(mod) hist(dx$Chlorophyll[dx$frz==0]) hist(dx$Chlorophyll[dx$frz==1]) hist(bb$chlorophyll[bb$frost==0]) hist(bb$chlorophyll[bb$frost==1]) #write.csv(bb, file=("~/Documents/git/freezingexperiment/analyses/output/buds_traits.csv"), row.names=FALSE) bb.chl<-bb[!is.na(bb$chlorophyll),] table(bb.chl$individ) area$ID<-paste(area$ID, area$number, sep=".") area$dry<-NA for(i in c(1:nrow(area))){ for(j in c(1:nrow(wt))) if(area$ID[i]==wt$ID[j]) area$dry[i]<-wt$dry[j] } area$sla<-area$Area/area$dry area$ID<-substr(area$ID, 1, 10) area$sla <- ave(area$sla, area$ID) area<-dplyr::select(area, ID, sla) area<-area[!duplicated(area),] bb<-read.csv("output/buds_traits.csv", header=TRUE) bb$sla<-NA for(i in c(1:nrow(bb))){ for(j in c(1:nrow(area))) if(bb$individ[i]==area$ID[j]) bb$sla[i]<-area$sla[j] } bb$dvr<-as.numeric(bb$dvr) bb$dvr<-ifelse(is.na(bb$dvr), 0, bb$dvr) bb$dvr.avg<-ave(bb$dvr, bb$individ) sla.mod<-lm(sla~tx+dvr.avg, data=bb) display(sla.mod) sla.mod2<-lm(sla~tx+dvr.avg+tx*dvr.avg, data=bb) display(sla.mod2) qplot(species, sla, data = bb, geom = "boxplot", color=tx) + xlab("Species")+ylab("SLA") #write.csv(bb, file=("~/Documents/git/freezingexperiment/analyses/output/buds_traits.csv"), row.names=FALSE)
/analyses/scripts/Chlorophyll.R
no_license
cchambe12/freezingexperiment
R
false
false
2,992
r
## 12 Sept 2017 - Cat # Duration of Vegetativ Risk: Experiment data # Aim: To see if chlorophyll measures are different with experimental groups # Clear workspace rm(list=ls()) # remove everything currently held in the R memory options(stringsAsFactors=FALSE) graphics.off() # Load libraries library(dplyr) library(tidyr) library(ggplot2) library(lubridate) library(lme4) library(arm) # Set Working Directory setwd("~/Documents/git/freezingexperiment/analyses") d <-read.csv("input/buds_traits.csv", header=TRUE) bb<-read.csv("output/birches_buddata.csv", header=TRUE) area<-read.csv("input/SLA_buds.csv", header=TRUE) wt<-read.csv("input/SLA_weight.csv", header=TRUE) ## Start working on columns dx<-dplyr::select(d, NEW, TX, Bud, Chlorophyll) dx$bud <- ave(dx$Bud, dx$NEW, FUN = seq_along) dx$species<-substr(dx$NEW, 1, 6) dx<-filter(dx, species !="SAMRAC") dx<-dx[!is.na(dx$Chlorophyll),] dx$ID<-paste(dx$NEW, dx$bud, sep="_") bb$chlorophyll<-NA for(i in c(1:nrow(bb))){ for(j in c(1:nrow(dx))) if(bb$ID[i]==dx$ID[j]) bb$chlorophyll[i]<-dx$Chlorophyll[j] } chl.mod<-lm(chlorophyll~frost+bud+species+frost*bud, data=bb) display(chl.mod) qplot(species, chlorophyll, data = bb, geom = "boxplot", color=tx) + xlab("Species")+ylab("Chlorophyll") ggplot(bb, aes(x=bud, y=chlorophyll, color=as.factor(frost))) + geom_point() + geom_smooth(method="lm") dx$frz<-ifelse(dx$TX=="A", 0, 1) dx<-na.omit(dx) dx$spp<-substr(dx$NEW, 0,6) sp.mean<-aggregate(Chlorophyll~NEW + frz, dx, mean) sp.mean$species<-substr(sp.mean$NEW, 0, 6) qplot(spp, Chlorophyll, data = dx, geom = "boxplot", color=TX) + xlab("Species")+ylab("Chlorophyll measurement") mod<-lmer(Chlorophyll~frz + (1|spp), data=dx) summary(mod) hist(dx$Chlorophyll[dx$frz==0]) hist(dx$Chlorophyll[dx$frz==1]) hist(bb$chlorophyll[bb$frost==0]) hist(bb$chlorophyll[bb$frost==1]) #write.csv(bb, file=("~/Documents/git/freezingexperiment/analyses/output/buds_traits.csv"), row.names=FALSE) bb.chl<-bb[!is.na(bb$chlorophyll),] table(bb.chl$individ) area$ID<-paste(area$ID, area$number, sep=".") area$dry<-NA for(i in c(1:nrow(area))){ for(j in c(1:nrow(wt))) if(area$ID[i]==wt$ID[j]) area$dry[i]<-wt$dry[j] } area$sla<-area$Area/area$dry area$ID<-substr(area$ID, 1, 10) area$sla <- ave(area$sla, area$ID) area<-dplyr::select(area, ID, sla) area<-area[!duplicated(area),] bb<-read.csv("output/buds_traits.csv", header=TRUE) bb$sla<-NA for(i in c(1:nrow(bb))){ for(j in c(1:nrow(area))) if(bb$individ[i]==area$ID[j]) bb$sla[i]<-area$sla[j] } bb$dvr<-as.numeric(bb$dvr) bb$dvr<-ifelse(is.na(bb$dvr), 0, bb$dvr) bb$dvr.avg<-ave(bb$dvr, bb$individ) sla.mod<-lm(sla~tx+dvr.avg, data=bb) display(sla.mod) sla.mod2<-lm(sla~tx+dvr.avg+tx*dvr.avg, data=bb) display(sla.mod2) qplot(species, sla, data = bb, geom = "boxplot", color=tx) + xlab("Species")+ylab("SLA") #write.csv(bb, file=("~/Documents/git/freezingexperiment/analyses/output/buds_traits.csv"), row.names=FALSE)
#rstudioapi::openProject("/home/rstudio/equity_analysis/") folder_number <- readline(prompt=paste("What is the name of this folder?", sep=" ")) trials_path <- file.path(folder_number, "trials/") parameters_path <- file.path(folder_number, "parameters.R") file.copy(parameters_path, "scripts/", overwrite=T) dir.create("trials") fil <- list.files(trials_path, full.names = T) file.copy(from = fil, to = "trials", overwrite = TRUE, recursive = FALSE, copy.mode = TRUE) source("scripts/0_all.R")
/backtests/04/deploy.R
no_license
riazarbi/equity_analysis_trials
R
false
false
504
r
#rstudioapi::openProject("/home/rstudio/equity_analysis/") folder_number <- readline(prompt=paste("What is the name of this folder?", sep=" ")) trials_path <- file.path(folder_number, "trials/") parameters_path <- file.path(folder_number, "parameters.R") file.copy(parameters_path, "scripts/", overwrite=T) dir.create("trials") fil <- list.files(trials_path, full.names = T) file.copy(from = fil, to = "trials", overwrite = TRUE, recursive = FALSE, copy.mode = TRUE) source("scripts/0_all.R")
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/pls.R \name{selectCompNum.pls} \alias{selectCompNum.pls} \title{Select optimal number of components for PLS model} \usage{ \method{selectCompNum}{pls}(model, ncomp = NULL) } \arguments{ \item{model}{PLS model (object of class \code{pls})} \item{ncomp}{number of components to select} } \value{ the same model with selected number of components } \description{ Allows user to select optimal number of components for PLS model } \details{ If number of components is not specified, the Wold's R criterion is used. See examples in help for \code{\link{pls}} function. }
/man/selectCompNum.pls.Rd
no_license
zeehio/mdatools
R
false
false
654
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/pls.R \name{selectCompNum.pls} \alias{selectCompNum.pls} \title{Select optimal number of components for PLS model} \usage{ \method{selectCompNum}{pls}(model, ncomp = NULL) } \arguments{ \item{model}{PLS model (object of class \code{pls})} \item{ncomp}{number of components to select} } \value{ the same model with selected number of components } \description{ Allows user to select optimal number of components for PLS model } \details{ If number of components is not specified, the Wold's R criterion is used. See examples in help for \code{\link{pls}} function. }
#' @title List Remote Packages #' #' @description #' List remote packages in `renv.lock` file. #' #' @export remote_packages = function(lock_file = "book/renv.lock") { packages = jsonlite::read_json(lock_file)[["Packages"]] packages = mlr3misc::discard(packages, function(x) !is.null(x$Repository) && x$Repository == "CRAN") names(packages) }
/R/remote_packages.R
permissive
mlr-org/mlr3book
R
false
false
349
r
#' @title List Remote Packages #' #' @description #' List remote packages in `renv.lock` file. #' #' @export remote_packages = function(lock_file = "book/renv.lock") { packages = jsonlite::read_json(lock_file)[["Packages"]] packages = mlr3misc::discard(packages, function(x) !is.null(x$Repository) && x$Repository == "CRAN") names(packages) }
#' @title validMCMC #' @author Oyvind Bleka <Oyvind.Bleka.at.fhi.no> #' @description Validates aposteriori samples from MCMC method #' @details This function takes samples from the MCMC as given in a matrix and shows the aposterior functions. #' @param mcmcfit A object returned by contLikMCMC #' @param trace Boolean for whether showing trace of samples. #' @param acf Boolean for whether showing autocorrelation function of samples. #' @export validMCMC <- function(mcmcfit,trace=TRUE,acf=TRUE) { txt <- colnames(mcmcfit$posttheta) #Ubound <- mcmcfit$Ubound #upper boundaries of parameters Ubound <- apply(mcmcfit$posttheta,2,max) Lbound <- apply(mcmcfit$posttheta,2,min) p <- length(txt) par(mfrow=c(p,1+sum(c(trace,acf)) ),mar = c(1.2,1,1,0.2), mgp = c(0,0.2,0)) for(i in 1:p) { dens <- density(mcmcfit$posttheta[,i],from=Lbound[i],to=Ubound[i],n=max(Ubound[i],1024)) xrange <- range(mcmcfit$posttheta[,i]) mled <-dnorm(dens$x,mcmcfit$MLE[i],sqrt(mcmcfit$Sigma[i,i])) #density of lazy bayes plot(dens$x,dens$y,ty="l",main=txt[i],xlab="",ylab="",ylim=c(0,max(mled,dens$y)),xlim=xrange ) lines(dens$x,mled,col=2,lty=2,ylab="",xlab="") if(trace) plot(mcmcfit$posttheta[,i],ty="l",ylab="",xlab="") if(acf) acf(mcmcfit$posttheta[,i],lag.max=200,ylab="",xlab="") } dev.new() op <- par(no.readonly = TRUE) dev.off() par(op) }
/euroformix_0.5.0/R/validMCMC.R
no_license
oyvble/euroformixArchive
R
false
false
1,350
r
#' @title validMCMC #' @author Oyvind Bleka <Oyvind.Bleka.at.fhi.no> #' @description Validates aposteriori samples from MCMC method #' @details This function takes samples from the MCMC as given in a matrix and shows the aposterior functions. #' @param mcmcfit A object returned by contLikMCMC #' @param trace Boolean for whether showing trace of samples. #' @param acf Boolean for whether showing autocorrelation function of samples. #' @export validMCMC <- function(mcmcfit,trace=TRUE,acf=TRUE) { txt <- colnames(mcmcfit$posttheta) #Ubound <- mcmcfit$Ubound #upper boundaries of parameters Ubound <- apply(mcmcfit$posttheta,2,max) Lbound <- apply(mcmcfit$posttheta,2,min) p <- length(txt) par(mfrow=c(p,1+sum(c(trace,acf)) ),mar = c(1.2,1,1,0.2), mgp = c(0,0.2,0)) for(i in 1:p) { dens <- density(mcmcfit$posttheta[,i],from=Lbound[i],to=Ubound[i],n=max(Ubound[i],1024)) xrange <- range(mcmcfit$posttheta[,i]) mled <-dnorm(dens$x,mcmcfit$MLE[i],sqrt(mcmcfit$Sigma[i,i])) #density of lazy bayes plot(dens$x,dens$y,ty="l",main=txt[i],xlab="",ylab="",ylim=c(0,max(mled,dens$y)),xlim=xrange ) lines(dens$x,mled,col=2,lty=2,ylab="",xlab="") if(trace) plot(mcmcfit$posttheta[,i],ty="l",ylab="",xlab="") if(acf) acf(mcmcfit$posttheta[,i],lag.max=200,ylab="",xlab="") } dev.new() op <- par(no.readonly = TRUE) dev.off() par(op) }
library(moments) library(Metrics) errMeasure1 <- function (vPred, vTarget) { a = cbind(vPred,vTarget); ret = apply(a,MARGIN=1,function(x) {v=unlist(strsplit(unlist(x[1]),split=" ")); ix = which(v==trimws(x[2]));r=0; if(length(ix) != 0 && ix<=12) r=1/(ix); return(r)}); #return(list(mean(ret),ret)) return(mean(ret)); }
/R/futil.R
no_license
tudor-m/Kaggle-OutbrainClick
R
false
false
335
r
library(moments) library(Metrics) errMeasure1 <- function (vPred, vTarget) { a = cbind(vPred,vTarget); ret = apply(a,MARGIN=1,function(x) {v=unlist(strsplit(unlist(x[1]),split=" ")); ix = which(v==trimws(x[2]));r=0; if(length(ix) != 0 && ix<=12) r=1/(ix); return(r)}); #return(list(mean(ret),ret)) return(mean(ret)); }
\name{beard} \alias{dbeard} \alias{pbeard} \alias{varbeard} \alias{esbeard} \title{Beard distribution} \description{Computes the pdf, cdf, value at risk and expected shortfall for the Beard distribution due to Beard (1959) given by \deqn{\begin{array}{ll} &\displaystyle f(x) = \frac {\displaystyle a \exp (b x) \left[ 1 + a \rho \right]^{\rho^{-1/b}}} {\displaystyle \left[ 1 + a \rho \exp (b x) \right]^{1 + \rho^{-1/b}}}, \\ &\displaystyle F (x) = 1 - \frac {\displaystyle \left[ 1 + a \rho \right]^{\rho^{-1/b}}} {\displaystyle \left[ 1 + a \rho \exp (b x) \right]^{\rho^{-1/b}}}, \\ &\displaystyle {\rm VaR}_p (X) = \frac {1}{b} \log \left[ \frac {1 + a \rho}{a \rho (1 - p)^{\rho^{1 / b}}} - \frac {1}{a \rho} \right], \\ &\displaystyle {\rm ES}_p (X) = \frac {1}{p b} \int_0^p \log \left[ -\frac {1}{a \rho} + \frac {1 + a \rho}{a \rho (1 - v)^{\rho^{1 / b}}} \right] dv \end{array}} for \eqn{x > 0}, \eqn{0 < p < 1}, \eqn{a > 0}, the first scale parameter, \eqn{b > 0}, the second scale parameter, and \eqn{\rho > 0}, the shape parameter.} \usage{ dbeard(x, a=1, b=1, rho=1, log=FALSE) pbeard(x, a=1, b=1, rho=1, log.p=FALSE, lower.tail=TRUE) varbeard(p, a=1, b=1, rho=1, log.p=FALSE, lower.tail=TRUE) esbeard(p, a=1, b=1, rho=1) } \arguments{ \item{x}{scaler or vector of values at which the pdf or cdf needs to be computed} \item{p}{scaler or vector of values at which the value at risk or expected shortfall needs to be computed} \item{a}{the value of the first scale parameter, must be positive, the default is 1} \item{b}{the value of the second scale parameter, must be positive, the default is 1} \item{rho}{the value of the shape parameter, must be positive, the default is 1} \item{log}{if TRUE then log(pdf) are returned} \item{log.p}{if TRUE then log(cdf) are returned and quantiles are computed for exp(p)} \item{lower.tail}{if FALSE then 1-cdf are returned and quantiles are computed for 1-p} } \value{An object of the same length as \code{x}, giving the pdf or cdf values computed at \code{x} or an object of the same length as \code{p}, giving the values at risk or expected shortfall computed at \code{p}.} \references{Stephen Chan, Saralees Nadarajah & Emmanuel Afuecheta (2016). An R Package for Value at Risk and Expected Shortfall, Communications in Statistics - Simulation and Computation, 45:9, 3416-3434, \doi{10.1080/03610918.2014.944658}} \author{Saralees Nadarajah} \examples{x=runif(10,min=0,max=1) dbeard(x) pbeard(x) varbeard(x) esbeard(x)}
/man/beard.Rd
no_license
cran/VaRES
R
false
false
2,547
rd
\name{beard} \alias{dbeard} \alias{pbeard} \alias{varbeard} \alias{esbeard} \title{Beard distribution} \description{Computes the pdf, cdf, value at risk and expected shortfall for the Beard distribution due to Beard (1959) given by \deqn{\begin{array}{ll} &\displaystyle f(x) = \frac {\displaystyle a \exp (b x) \left[ 1 + a \rho \right]^{\rho^{-1/b}}} {\displaystyle \left[ 1 + a \rho \exp (b x) \right]^{1 + \rho^{-1/b}}}, \\ &\displaystyle F (x) = 1 - \frac {\displaystyle \left[ 1 + a \rho \right]^{\rho^{-1/b}}} {\displaystyle \left[ 1 + a \rho \exp (b x) \right]^{\rho^{-1/b}}}, \\ &\displaystyle {\rm VaR}_p (X) = \frac {1}{b} \log \left[ \frac {1 + a \rho}{a \rho (1 - p)^{\rho^{1 / b}}} - \frac {1}{a \rho} \right], \\ &\displaystyle {\rm ES}_p (X) = \frac {1}{p b} \int_0^p \log \left[ -\frac {1}{a \rho} + \frac {1 + a \rho}{a \rho (1 - v)^{\rho^{1 / b}}} \right] dv \end{array}} for \eqn{x > 0}, \eqn{0 < p < 1}, \eqn{a > 0}, the first scale parameter, \eqn{b > 0}, the second scale parameter, and \eqn{\rho > 0}, the shape parameter.} \usage{ dbeard(x, a=1, b=1, rho=1, log=FALSE) pbeard(x, a=1, b=1, rho=1, log.p=FALSE, lower.tail=TRUE) varbeard(p, a=1, b=1, rho=1, log.p=FALSE, lower.tail=TRUE) esbeard(p, a=1, b=1, rho=1) } \arguments{ \item{x}{scaler or vector of values at which the pdf or cdf needs to be computed} \item{p}{scaler or vector of values at which the value at risk or expected shortfall needs to be computed} \item{a}{the value of the first scale parameter, must be positive, the default is 1} \item{b}{the value of the second scale parameter, must be positive, the default is 1} \item{rho}{the value of the shape parameter, must be positive, the default is 1} \item{log}{if TRUE then log(pdf) are returned} \item{log.p}{if TRUE then log(cdf) are returned and quantiles are computed for exp(p)} \item{lower.tail}{if FALSE then 1-cdf are returned and quantiles are computed for 1-p} } \value{An object of the same length as \code{x}, giving the pdf or cdf values computed at \code{x} or an object of the same length as \code{p}, giving the values at risk or expected shortfall computed at \code{p}.} \references{Stephen Chan, Saralees Nadarajah & Emmanuel Afuecheta (2016). An R Package for Value at Risk and Expected Shortfall, Communications in Statistics - Simulation and Computation, 45:9, 3416-3434, \doi{10.1080/03610918.2014.944658}} \author{Saralees Nadarajah} \examples{x=runif(10,min=0,max=1) dbeard(x) pbeard(x) varbeard(x) esbeard(x)}
#'Add percent in column #' #' @description #' col2percent transform columns to percent. #' #' @param x a dataframe #' @param start number of start column #' @param end number of last column (default=last) #' @param mult100 multiply by 100 if the number is a decimal fraction(T or F)(default=F) #' #' @return Return a dataframe with transformed columns. #' @export #' #' @examples #' v=data.frame(c(15,5,20,50,10)) #' col2percent(v,start=1) #' #' v=data.frame(c(0.15,0.05,0.2,0.5,0.1)) #' col2percent(v,start=1,mult100=TRUE) #' col2percent=function(x,start,end=ncol(x),mult100=FALSE){ a=x if(mult100==TRUE){ a=metools::col2num(a,start,end) a[,start:end]=a[,start:end]*100 a[,start:end]=mapply(paste0,a[,start:end],"%") }else{ a[,start:end]=mapply(paste0,a[,start:end],"%") } return(a) }
/R/col2percent.R
no_license
jvg0mes/metools
R
false
false
838
r
#'Add percent in column #' #' @description #' col2percent transform columns to percent. #' #' @param x a dataframe #' @param start number of start column #' @param end number of last column (default=last) #' @param mult100 multiply by 100 if the number is a decimal fraction(T or F)(default=F) #' #' @return Return a dataframe with transformed columns. #' @export #' #' @examples #' v=data.frame(c(15,5,20,50,10)) #' col2percent(v,start=1) #' #' v=data.frame(c(0.15,0.05,0.2,0.5,0.1)) #' col2percent(v,start=1,mult100=TRUE) #' col2percent=function(x,start,end=ncol(x),mult100=FALSE){ a=x if(mult100==TRUE){ a=metools::col2num(a,start,end) a[,start:end]=a[,start:end]*100 a[,start:end]=mapply(paste0,a[,start:end],"%") }else{ a[,start:end]=mapply(paste0,a[,start:end],"%") } return(a) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataset_functions.R \name{makeGRangesBRG} \alias{makeGRangesBRG} \alias{isBRG} \title{Constructing and checking for base-pair resolution GRanges objects} \usage{ makeGRangesBRG(dataset.gr, ncores = getOption("mc.cores", 2L)) isBRG(x) } \arguments{ \item{dataset.gr}{A disjoint GRanges object, or a list of such objects.} \item{ncores}{If \code{dataset.gr} is a list, the number of cores to use for computations.} \item{x}{Object to be tested.} } \value{ \code{makeGRangesBRG} returns a GRanges object for which \code{length(output) == sum(width(dataset.gr))}, and for which \code{all(width(output) == 1)}. \code{isBRG(x)} returns \code{TRUE} if \code{x} is a GRanges object with the above characteristics. } \description{ \code{makeGRangesBRG} splits up all ranges in \code{dataset.gr} to be each 1 basepair wide. For any range that is split up, all metadata information belonging to that range is inherited by its daughter ranges, and therefore the transformation is non-destructive. \code{isBRG} checks whether an object is a basepair resolution GRanges object. } \details{ Note that \code{makeGRangesBRG} doesn't perform any transformation on the metadata in the input. This function assumes that for an input GRanges object, any metadata for each range is equally correct when inherited by each individual base in that range. In other words, the dataset's "signal" (usually readcounts) fundamentally belongs to a single basepair position. } \section{Motivation}{ The motivating case for this function is a bigWig file (e.g. one imported by \code{rtracklayer}), as bigWig files typically use run-length compression on the data signal (the 'score' column), such that adjacent bases sharing the same signal are combined into a single range. As basepair-resolution genomic data is typically sparse, this compression has a minimal impact on memory usage, and removing it greatly enhances data handling as each index (each range) of the GRanges object corresponds to a single genomic position. } \section{Generating basepair-resolution GRanges from whole reads}{ If working with a GRanges object containing whole reads, one can obtain base-pair resolution information by using the strand-specific function \code{\link[GenomicRanges:intra-range-methods]{GenomicRanges::resize}} to select a single base from each read: set \code{width = 1} and use the \code{fix} argument to choose the strand-specific 5' or 3' end. Then, strand-specific coverage can be calculated using \code{\link[BRGenomics:getStrandedCoverage]{getStrandedCoverage}}. } \section{On the use of GRanges instead of GPos}{ The \code{\link[GenomicRanges:GPos-class]{GPos}} class is a more suitable container for data of this type, as the GPos class is specific to 1-bp-wide ranges. However, in early testing, we encountered some kind of compatibility limitations with the newer GPos class, and have not re-tested it since. If you have feedback on switching to this class, please contact the author. Users can readily coerce a basepair-resolution GRanges object to a GPos object via \code{gp <- GPos(gr, score = score(gr))}. } \examples{ if (.Platform$OS.type == "unix") { #--------------------------------------------------# # Make a bigWig file single width #--------------------------------------------------# # get local address for an included bigWig file bw_file <- system.file("extdata", "PROseq_dm6_chr4_plus.bw", package = "BRGenomics") # BRGenomics::import_bigWig automatically applies makeGRangesBRG; # therefore will import using rtracklayer bw <- rtracklayer::import.bw(bw_file) strand(bw) <- "+" range(width(bw)) length(bw) # make basepair-resolution (single-width) gr <- makeGRangesBRG(bw) isBRG(gr) range(width(gr)) length(gr) length(gr) == sum(width(bw)) sum(score(gr)) == sum(score(bw) * width(bw)) #--------------------------------------------------# # Reverse using getStrandedCoverage #--------------------------------------------------# # -> for more examples, see getStrandedCoverage undo <- getStrandedCoverage(gr, ncores = 1) isBRG(undo) range(width(undo)) length(undo) == length(bw) all(score(undo) == score(bw)) } } \seealso{ \code{\link[BRGenomics:getStrandedCoverage]{getStrandedCoverage}}, \code{\link[GenomicRanges:intra-range-methods]{GenomicRanges::resize()}} } \author{ Mike DeBerardine }
/man/makeGRangesBRG.Rd
no_license
mdeber/BRGenomics
R
false
true
4,578
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataset_functions.R \name{makeGRangesBRG} \alias{makeGRangesBRG} \alias{isBRG} \title{Constructing and checking for base-pair resolution GRanges objects} \usage{ makeGRangesBRG(dataset.gr, ncores = getOption("mc.cores", 2L)) isBRG(x) } \arguments{ \item{dataset.gr}{A disjoint GRanges object, or a list of such objects.} \item{ncores}{If \code{dataset.gr} is a list, the number of cores to use for computations.} \item{x}{Object to be tested.} } \value{ \code{makeGRangesBRG} returns a GRanges object for which \code{length(output) == sum(width(dataset.gr))}, and for which \code{all(width(output) == 1)}. \code{isBRG(x)} returns \code{TRUE} if \code{x} is a GRanges object with the above characteristics. } \description{ \code{makeGRangesBRG} splits up all ranges in \code{dataset.gr} to be each 1 basepair wide. For any range that is split up, all metadata information belonging to that range is inherited by its daughter ranges, and therefore the transformation is non-destructive. \code{isBRG} checks whether an object is a basepair resolution GRanges object. } \details{ Note that \code{makeGRangesBRG} doesn't perform any transformation on the metadata in the input. This function assumes that for an input GRanges object, any metadata for each range is equally correct when inherited by each individual base in that range. In other words, the dataset's "signal" (usually readcounts) fundamentally belongs to a single basepair position. } \section{Motivation}{ The motivating case for this function is a bigWig file (e.g. one imported by \code{rtracklayer}), as bigWig files typically use run-length compression on the data signal (the 'score' column), such that adjacent bases sharing the same signal are combined into a single range. As basepair-resolution genomic data is typically sparse, this compression has a minimal impact on memory usage, and removing it greatly enhances data handling as each index (each range) of the GRanges object corresponds to a single genomic position. } \section{Generating basepair-resolution GRanges from whole reads}{ If working with a GRanges object containing whole reads, one can obtain base-pair resolution information by using the strand-specific function \code{\link[GenomicRanges:intra-range-methods]{GenomicRanges::resize}} to select a single base from each read: set \code{width = 1} and use the \code{fix} argument to choose the strand-specific 5' or 3' end. Then, strand-specific coverage can be calculated using \code{\link[BRGenomics:getStrandedCoverage]{getStrandedCoverage}}. } \section{On the use of GRanges instead of GPos}{ The \code{\link[GenomicRanges:GPos-class]{GPos}} class is a more suitable container for data of this type, as the GPos class is specific to 1-bp-wide ranges. However, in early testing, we encountered some kind of compatibility limitations with the newer GPos class, and have not re-tested it since. If you have feedback on switching to this class, please contact the author. Users can readily coerce a basepair-resolution GRanges object to a GPos object via \code{gp <- GPos(gr, score = score(gr))}. } \examples{ if (.Platform$OS.type == "unix") { #--------------------------------------------------# # Make a bigWig file single width #--------------------------------------------------# # get local address for an included bigWig file bw_file <- system.file("extdata", "PROseq_dm6_chr4_plus.bw", package = "BRGenomics") # BRGenomics::import_bigWig automatically applies makeGRangesBRG; # therefore will import using rtracklayer bw <- rtracklayer::import.bw(bw_file) strand(bw) <- "+" range(width(bw)) length(bw) # make basepair-resolution (single-width) gr <- makeGRangesBRG(bw) isBRG(gr) range(width(gr)) length(gr) length(gr) == sum(width(bw)) sum(score(gr)) == sum(score(bw) * width(bw)) #--------------------------------------------------# # Reverse using getStrandedCoverage #--------------------------------------------------# # -> for more examples, see getStrandedCoverage undo <- getStrandedCoverage(gr, ncores = 1) isBRG(undo) range(width(undo)) length(undo) == length(bw) all(score(undo) == score(bw)) } } \seealso{ \code{\link[BRGenomics:getStrandedCoverage]{getStrandedCoverage}}, \code{\link[GenomicRanges:intra-range-methods]{GenomicRanges::resize()}} } \author{ Mike DeBerardine }
#!/usr/bin/Rscript # Setting Help 'usage: addNCBIamr2Gff.R [--input=<file> --gff=<file> --out=<chr> --database=<chr> --type=<chr>] options: -g, --gff=<file> GFF file to add NCBI AMR Annotations into -i, --input=<file> AMRFinder output -o, --out=<chr> Output file name [default: out.gff] -t, --type=<chr> Type of feature. Ex: resistance -d, --database=<chr> Name of databased which Blast came from' -> doc # Parse parameters suppressMessages(library(docopt)) opt <- docopt(doc) if (is.null(opt$gff)){ stop("At least one argument must be supplied (gff file)\n", call.=FALSE) } if (is.null(opt$input)){ stop("At least one argument must be supplied (AMRFinder output file)\n", call.=FALSE) } # Load libraries suppressMessages(library(ballgown)) suppressMessages(library(DataCombine)) suppressMessages(library(dplyr)) suppressMessages(library(stringr)) suppressMessages(library(tidyr)) # Function used to remove redundancy reduce_row = function(i) { d <- unlist(strsplit(i, split=",")) paste(unique(d), collapse = ',') } # Function to get Attribute Fields getAttributeField <- function (x, field, attrsep = ";") { s = strsplit(x, split = attrsep, fixed = TRUE) sapply(s, function(atts) { a = strsplit(atts, split = "=", fixed = TRUE) m = match(field, sapply(a, "[", 1)) if (!is.na(m)) { rv = a[[m]][2] } else { rv = as.character(NA) } return(rv) }) } # Operator to discard patterns found '%ni%' <- Negate('%in%') # Load GFF File gff <- gffRead(opt$gff) gff$ID <- getAttributeField(as.character(gff$attributes), "ID", ";") # Load NCBI AMRFinder output NCBIamr <- read.delim(opt$input) if (is.null(NCBIamr) == FALSE & dim(NCBIamr)[1] != 0) { # Get its ids ids <- NCBIamr$Protein.identifier # Subset based on gene IDs sub <- gff %>% filter(ID %in% ids) %>% select(seqname, source, feature, start, end, score, strand, frame, attributes) not <- gff %>% filter(ID %ni% ids) %>% select(seqname, source, feature, start, end, score, strand, frame, attributes) # Create Description NCBIamr$description <- paste("Additional_database=NDARO;NDARO_Gene_Name=", NCBIamr$Gene.symbol, ";", "NDARO_Gene_Product=", NCBIamr$Sequence.name, ";", "NDARO_Resistance_Category=", NCBIamr$Element.type, ";", "NDARO_Resistance_Target=", NCBIamr$Class, ";", "NDARO_Method=", NCBIamr$Method, ";", "NDARO_Closest_Sequence=", NCBIamr$Name.of.closest.sequence, sep = "") NCBIamr$description <- gsub(" ", "_", NCBIamr$description) ## Add New Source s <- sub$source sn <- opt$database snew <- paste(s, sn, sep = ",") sub$source <- snew ## Add New Feature f <- sub$feature fn <- opt$type fnew <- paste(f, fn, sep = ",") sub$feature <- fnew ## attributes sub$ID <- getAttributeField(as.character(sub$attributes), "ID", ";") sub <- merge.data.frame(sub, NCBIamr, by.x = "ID", by.y = "Protein.identifier", all = TRUE) sub <- unite(sub, "attributes", c("attributes", "description"), sep = ";") %>% select(seqname, source, feature, start, end, score, strand, frame, attributes) # Merge files merged_df <- merge.data.frame(sub, not, all = TRUE) feat <- merged_df$feature merged_df$feature <- sapply(feat, reduce_row) source <- merged_df$source merged_df$source <- sapply(source, reduce_row) merged_df <- merged_df[order(merged_df$seqname, merged_df$start),] # Write output write.table(merged_df, file = opt$out, quote = FALSE, sep = "\t", col.names = FALSE, row.names = FALSE) } else { # Load GFF file gff <- gffRead(opt$gff) # Write output write.table(gff, file = opt$out, quote = FALSE, sep = "\t", col.names = FALSE, row.names = FALSE) }
/addNCBIamr2Gff.R
no_license
fmalmeida/rscripts
R
false
false
3,740
r
#!/usr/bin/Rscript # Setting Help 'usage: addNCBIamr2Gff.R [--input=<file> --gff=<file> --out=<chr> --database=<chr> --type=<chr>] options: -g, --gff=<file> GFF file to add NCBI AMR Annotations into -i, --input=<file> AMRFinder output -o, --out=<chr> Output file name [default: out.gff] -t, --type=<chr> Type of feature. Ex: resistance -d, --database=<chr> Name of databased which Blast came from' -> doc # Parse parameters suppressMessages(library(docopt)) opt <- docopt(doc) if (is.null(opt$gff)){ stop("At least one argument must be supplied (gff file)\n", call.=FALSE) } if (is.null(opt$input)){ stop("At least one argument must be supplied (AMRFinder output file)\n", call.=FALSE) } # Load libraries suppressMessages(library(ballgown)) suppressMessages(library(DataCombine)) suppressMessages(library(dplyr)) suppressMessages(library(stringr)) suppressMessages(library(tidyr)) # Function used to remove redundancy reduce_row = function(i) { d <- unlist(strsplit(i, split=",")) paste(unique(d), collapse = ',') } # Function to get Attribute Fields getAttributeField <- function (x, field, attrsep = ";") { s = strsplit(x, split = attrsep, fixed = TRUE) sapply(s, function(atts) { a = strsplit(atts, split = "=", fixed = TRUE) m = match(field, sapply(a, "[", 1)) if (!is.na(m)) { rv = a[[m]][2] } else { rv = as.character(NA) } return(rv) }) } # Operator to discard patterns found '%ni%' <- Negate('%in%') # Load GFF File gff <- gffRead(opt$gff) gff$ID <- getAttributeField(as.character(gff$attributes), "ID", ";") # Load NCBI AMRFinder output NCBIamr <- read.delim(opt$input) if (is.null(NCBIamr) == FALSE & dim(NCBIamr)[1] != 0) { # Get its ids ids <- NCBIamr$Protein.identifier # Subset based on gene IDs sub <- gff %>% filter(ID %in% ids) %>% select(seqname, source, feature, start, end, score, strand, frame, attributes) not <- gff %>% filter(ID %ni% ids) %>% select(seqname, source, feature, start, end, score, strand, frame, attributes) # Create Description NCBIamr$description <- paste("Additional_database=NDARO;NDARO_Gene_Name=", NCBIamr$Gene.symbol, ";", "NDARO_Gene_Product=", NCBIamr$Sequence.name, ";", "NDARO_Resistance_Category=", NCBIamr$Element.type, ";", "NDARO_Resistance_Target=", NCBIamr$Class, ";", "NDARO_Method=", NCBIamr$Method, ";", "NDARO_Closest_Sequence=", NCBIamr$Name.of.closest.sequence, sep = "") NCBIamr$description <- gsub(" ", "_", NCBIamr$description) ## Add New Source s <- sub$source sn <- opt$database snew <- paste(s, sn, sep = ",") sub$source <- snew ## Add New Feature f <- sub$feature fn <- opt$type fnew <- paste(f, fn, sep = ",") sub$feature <- fnew ## attributes sub$ID <- getAttributeField(as.character(sub$attributes), "ID", ";") sub <- merge.data.frame(sub, NCBIamr, by.x = "ID", by.y = "Protein.identifier", all = TRUE) sub <- unite(sub, "attributes", c("attributes", "description"), sep = ";") %>% select(seqname, source, feature, start, end, score, strand, frame, attributes) # Merge files merged_df <- merge.data.frame(sub, not, all = TRUE) feat <- merged_df$feature merged_df$feature <- sapply(feat, reduce_row) source <- merged_df$source merged_df$source <- sapply(source, reduce_row) merged_df <- merged_df[order(merged_df$seqname, merged_df$start),] # Write output write.table(merged_df, file = opt$out, quote = FALSE, sep = "\t", col.names = FALSE, row.names = FALSE) } else { # Load GFF file gff <- gffRead(opt$gff) # Write output write.table(gff, file = opt$out, quote = FALSE, sep = "\t", col.names = FALSE, row.names = FALSE) }
# R_code_snow.r setwd("C:/lab/") install.packages("ncdf4") library(ncdf4) library(raster) snowmay <- raster("c_gls_SCE500_202005180000_CEURO_MODIS_V1.0.1.nc") # il warning message è normale cl <- colorRampPalette(c('darkblue','blue','light blue'))(100) # esercizio: plot snow cover with the cl palette plot(snowmay, col=cl) ### import snow data setwd("C:/lab/snow") rlist <- list.files(pattern=".tif") rlist # save raster into list # con lapply list_rast <- lapply(rlist, raster) snow.multitemp <- stack(list_rast) plot(snow.multitemp,col=cl) par(mfrow=c(1,2)) plot(snow.multitemp$snow2000r, col=cl) plot(snow.multitemp$snow2020r, col=cl) par(mfrow=c(1,2)) plot(snow.multitemp$snow2000r, col=cl, zlim=c(0,250)) plot(snow.multitemp$snow2020r, col=cl, zlim=c(0,250)) difsnow = snow.multitemp$snow2020r - snow.multitemp$snow2000r cldiff <- colorRampPalette(c('blue','white','red'))(100) plot(difsnow, col=cldiff) # prediction # go to IOL and download prediction.r into the folder snow source("prediction.r") plot(predicted.snow.2025.norm, col=cl) predicted.snow.2025.norm <- raster("predicted.snow.2025.norm.tif")
/R_code_snow.r
no_license
Giorgia-M/ecologia_del_paesaggio
R
false
false
1,126
r
# R_code_snow.r setwd("C:/lab/") install.packages("ncdf4") library(ncdf4) library(raster) snowmay <- raster("c_gls_SCE500_202005180000_CEURO_MODIS_V1.0.1.nc") # il warning message è normale cl <- colorRampPalette(c('darkblue','blue','light blue'))(100) # esercizio: plot snow cover with the cl palette plot(snowmay, col=cl) ### import snow data setwd("C:/lab/snow") rlist <- list.files(pattern=".tif") rlist # save raster into list # con lapply list_rast <- lapply(rlist, raster) snow.multitemp <- stack(list_rast) plot(snow.multitemp,col=cl) par(mfrow=c(1,2)) plot(snow.multitemp$snow2000r, col=cl) plot(snow.multitemp$snow2020r, col=cl) par(mfrow=c(1,2)) plot(snow.multitemp$snow2000r, col=cl, zlim=c(0,250)) plot(snow.multitemp$snow2020r, col=cl, zlim=c(0,250)) difsnow = snow.multitemp$snow2020r - snow.multitemp$snow2000r cldiff <- colorRampPalette(c('blue','white','red'))(100) plot(difsnow, col=cldiff) # prediction # go to IOL and download prediction.r into the folder snow source("prediction.r") plot(predicted.snow.2025.norm, col=cl) predicted.snow.2025.norm <- raster("predicted.snow.2025.norm.tif")
attach(dat) library(ggplot2) library(reshape2) ggplot(Pilot, aes(x=Subj1Score)) + geom_freqpoly() shapeddat <- melt(Pilot,id.vars='Item', measure.vars=c('Subj1Score','Subj2Score')) ggplot(shapeddat, aes(x=value, color=variable)) + geom_density(adjust=1/4) ggplot(shapeddat, aes(x=value, color=variable)) + stat_ecdf()
/BehavioralScreening/pilot.R
no_license
TDaltonC/fMRI-Scripts
R
false
false
322
r
attach(dat) library(ggplot2) library(reshape2) ggplot(Pilot, aes(x=Subj1Score)) + geom_freqpoly() shapeddat <- melt(Pilot,id.vars='Item', measure.vars=c('Subj1Score','Subj2Score')) ggplot(shapeddat, aes(x=value, color=variable)) + geom_density(adjust=1/4) ggplot(shapeddat, aes(x=value, color=variable)) + stat_ecdf()
final<-read.csv("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors = FALSE, skip=66636, nrows = 2880) colnames(final)<-names(d1) final$Date<-as.Date(final$Date, "%d/%m/%Y") final <- final[complete.cases(final),] datetime<-paste(final$Date, final$Time) dateTime <- setNames(dateTime, "DateTime") final<-cbind(datetime, final) final$datetime<-as.POSIXct(datetime) plot(final$Global_active_power~final$datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") dev.copy(png, "plot2.png", width=480, height=480) dev.off()
/plot2.R
no_license
rkaamya95/ExData_Plotting1
R
false
false
550
r
final<-read.csv("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors = FALSE, skip=66636, nrows = 2880) colnames(final)<-names(d1) final$Date<-as.Date(final$Date, "%d/%m/%Y") final <- final[complete.cases(final),] datetime<-paste(final$Date, final$Time) dateTime <- setNames(dateTime, "DateTime") final<-cbind(datetime, final) final$datetime<-as.POSIXct(datetime) plot(final$Global_active_power~final$datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") dev.copy(png, "plot2.png", width=480, height=480) dev.off()
library(opalr) ### Name: dsadmin.get_options ### Title: Get the DataSHIELD options ### Aliases: dsadmin.get_options ### ** Examples ## No test: o <- opal.login('administrator','password','https://opal-demo.obiba.org') dsadmin.get_options(o) opal.logout(o) ## End(No test)
/data/genthat_extracted_code/opalr/examples/dsadmin.get_options.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
280
r
library(opalr) ### Name: dsadmin.get_options ### Title: Get the DataSHIELD options ### Aliases: dsadmin.get_options ### ** Examples ## No test: o <- opal.login('administrator','password','https://opal-demo.obiba.org') dsadmin.get_options(o) opal.logout(o) ## End(No test)
#' @importFrom dplyr bind_rows #' @importFrom dplyr data_frame #' @importFrom dplyr select_ #' @importFrom htmltools htmlPreserve #' @importFrom knitr asis_output #' @importFrom tidyr spread_ #' print_dust_html <- function(x, ..., asis=TRUE, linebreak_at_end = getOption("pixie_html_linebreak", 2), interactive = getOption("pixie_interactive")) { if (is.null(interactive)) interactive <- interactive() if (!is.null(x$caption)) increment_pixie_count() label <- if (is.null(x[["label"]])) { chunk_label <- knitr::opts_current$get("label") if (is.null(chunk_label)) sprintf("tab:pixie-%s", getOption("pixie_count")) else sprintf("tab:%s", chunk_label) } else { sprintf("tab:%s", x[["label"]]) } label <- if (x[["bookdown"]]) { sprintf("(\\#%s)", label) } else { sprintf("Table %s: ", get_pixie_count()) } #* Determine the number of divisions #* It looks more complicated than it is, but the gist of it is #* total number of divisions: ceiling(total_rows / longtable_rows) #* The insane looking data frame is just to make a reference of what rows #* go in what division. if (!is.numeric(x$longtable) & x$longtable) longtable_rows <- 25L else if (!is.numeric(x$longtable) & !x$longtable) longtable_rows <- as.integer(max(x$body$row)) else longtable_rows <- as.integer(x$longtable) Divisions <- data.frame(div_num = rep(1:ceiling(max(x$body$row) / longtable_rows), each = longtable_rows)[1:max(x$body$row)], row_num = 1:max(x$body$row)) total_div <- max(Divisions$div_num) #* Format the table parts head <- part_prep_html(x$head, head = TRUE) body <- part_prep_html(x$body) foot <- if (!is.null(x$foot)) part_prep_html(x$foot) else NULL interfoot <- if (!is.null(x$interfoot)) part_prep_html(x$interfoot) else NULL tmpfile <- tempfile(fileext=".html") non_interactive <- "" #* Run a for loop to build all the table divisions for (i in 1:total_div){ tbl <- dplyr::bind_rows(head, body[Divisions$row_num[Divisions$div_num == i], , drop=FALSE], if (i == total_div) foot else interfoot) rows <- apply(tbl, 1, paste0, collapse = "\n") rows <- sprintf("<tr>\n%s\n</tr>", rows) html_code <- sprintf("<table align = '%s' style = 'border-collapse:%s;'>\n%s\n</table>%s", x[["justify"]], x$border_collapse, paste0(rows, collapse = "\n"), paste0(rep("</br>", linebreak_at_end), collapse = "")) if (!is.null(x$caption)) html_code <- sub(">", sprintf(">\n<caption>%s %s</caption>", label, x$caption), html_code) #* When interactive, write to a temporary file so that it #* can be displayed in the viewer if (interactive & asis){ write(html_code, tmpfile, append = i > 1) } else non_interactive <- paste0(non_interactive, html_code) } # print(html_code) if (interactive & asis){ getOption("viewer")(tmpfile) } else if (asis) knitr::asis_output(htmltools::htmlPreserve(non_interactive)) else htmltools::htmlPreserve(non_interactive) } #**** Helper functions part_prep_html <- function(part, head=FALSE) { numeric_classes <- c("double", "numeric") dh <- if (head) "th" else "td" #* apply a function, if any is indicated part <- perform_function(part) #* Perform any rounding logic <- part$round == "" & part$col_class %in% numeric_classes part$round[logic] <- getOption("digits") logic <- part$col_class %in% numeric_classes if (any(logic)) part$value[logic] <- as.character(roundSafe(part$value[logic], as.numeric(part$round[logic]))) #* Replacement logic <- !is.na(part[["replace"]]) part[["value"]][logic] <- part[["replace"]][logic] #* Bold and italic boldify <- part$bold part$bold[boldify] <- "font-weight:bold;" part$bold[!boldify] <- "" italicize <- part$italic part$italic[italicize] <- "font-style:italic;" part$italic[!italicize] <- "" #* Alignments. With horizontal alignment, first we determine #* default alignment for any cell without a given designation. #* The defaults are right aligned for numeric, left aligned for #* all otheres. The `default_halign` function is defined in #* `print_dust_latex.R` logic <- part$halign == "" part$halign[logic] <- vapply(X = part$col_class[logic], FUN = default_halign, FUN.VALUE = character(1), print_method = "html") part$halign <- with(part, sprintf("text-align:%s;", halign)) logic <- part$valign != "" part$valign[logic] <- with(part, sprintf("vertical-align:%s;", valign[logic])) #** Background logic <- part$bg != "" part$bg[logic] <- with(part, sprintf("background-color:%s;", bg[logic])) #* Font Family logic <- part$font_family != "" part$font_family[logic] <- with(part, sprintf("font-family:%s;", font_family[logic])) #* Font Color logic <- part$font_color != "" part$font_color[logic] <- with(part, sprintf("color:%s;", font_color[logic])) #* Font size logic <- part$font_size != "" part$font_size[logic] <- with(part, sprintf("font-size:%s%s;", font_size[logic], font_size_units[logic])) #* cell height and width logic <- part$height != "" part$height[logic] <- with(part, sprintf("height:%s%s;", height[logic], height_units[logic])) logic <- part$width != "" part$width[logic] <- with(part, sprintf("width:%s%s;", width[logic], width_units[logic])) #* Borders logic <- part$top_border != "" part$top_border[logic] <- with(part, sprintf("border-top:%s;", top_border[logic])) logic <- part$bottom_border != "" part$bottom_border[logic] <- with(part, sprintf("border-bottom:%s;", bottom_border[logic])) logic <- part$left_border != "" part$left_border[logic] <- with(part, sprintf("border-left:%s;", left_border[logic])) logic <- part$right_border != "" part$right_border[logic] <- with(part, sprintf("border-right:%s;", right_border[logic])) #* Set NA (missing) values to na_string logic <- is.na(part$value) & !is.na(part$na_string) part$value[logic] <- part$na_string[logic] #* Padding logic <- part$pad != "" part$pad[logic] <- with(part, sprintf("padding:%spx;", pad[logic])) #* Text Rotation logic <- part$rotate_degree != "" part$rotate_degree[logic] <- with(part, rotate_tag(rotate_degree[logic])) #* Generate css style definitions for each cell. part$value <- with(part, sprintf("<%s colspan = '%s'; rowspan = '%s'; style='%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s'>%s</%s>", dh, colspan, rowspan, bold, italic, halign, valign, bg, font_family, #6 font_color, font_size, height, width, #4 top_border, bottom_border, left_border, right_border, #4 rotate_degree, pad, #2 value, dh)) ncol <- max(part$col) part <- dplyr::filter_(part, "!(rowspan == 0 | colspan == 0)") logic <- part[["row"]] == part[["html_row"]] & part[["col"]] == part[["html_col"]] & part[["colspan"]] > 1 if ("html_row_pos" %in% names(part)) part[["html_row"]][logic] <- part[["html_row_pos"]][logic] if ("html_col_pos" %in% names(part)) part[["html_col"]][logic] <- part[["html_col_pos"]][logic] #* Spread to wide format for printing part <- dplyr::select_(part, "html_row", "html_col", "value") %>% tidyr::spread_("html_col", "value", fill = "") %>% dplyr::select_("-html_row") if (ncol(part) != ncol){ part <- dplyr::bind_cols(part, do.call("cbind", lapply(1:(ncol - ncol(part)), function(i) dplyr::data_frame(value = "")))) names(part) <- 1:ncol } part } #*********************************** #* Rotation tags vary by browser. To make the rotation as robust as #* possible, specifying a rotation applies tags for webkit (Chrome?), #* Mozilla, Internet Explorer, Opera, and a generic transformation. rotate_tag <- function(degree) { sprintf( paste0("-webkit-transform:rotate(%sdeg);", "-moz-transform:rotate(%sdeg);", "-ms-transform:rotate(%sdeg);", "-o-transform:rotate(%sdeg);", "transform:rotate(%sdeg);"), degree, degree, degree, degree, degree) }
/R/print_dust_html.R
no_license
rpietro/pixiedust
R
false
false
8,898
r
#' @importFrom dplyr bind_rows #' @importFrom dplyr data_frame #' @importFrom dplyr select_ #' @importFrom htmltools htmlPreserve #' @importFrom knitr asis_output #' @importFrom tidyr spread_ #' print_dust_html <- function(x, ..., asis=TRUE, linebreak_at_end = getOption("pixie_html_linebreak", 2), interactive = getOption("pixie_interactive")) { if (is.null(interactive)) interactive <- interactive() if (!is.null(x$caption)) increment_pixie_count() label <- if (is.null(x[["label"]])) { chunk_label <- knitr::opts_current$get("label") if (is.null(chunk_label)) sprintf("tab:pixie-%s", getOption("pixie_count")) else sprintf("tab:%s", chunk_label) } else { sprintf("tab:%s", x[["label"]]) } label <- if (x[["bookdown"]]) { sprintf("(\\#%s)", label) } else { sprintf("Table %s: ", get_pixie_count()) } #* Determine the number of divisions #* It looks more complicated than it is, but the gist of it is #* total number of divisions: ceiling(total_rows / longtable_rows) #* The insane looking data frame is just to make a reference of what rows #* go in what division. if (!is.numeric(x$longtable) & x$longtable) longtable_rows <- 25L else if (!is.numeric(x$longtable) & !x$longtable) longtable_rows <- as.integer(max(x$body$row)) else longtable_rows <- as.integer(x$longtable) Divisions <- data.frame(div_num = rep(1:ceiling(max(x$body$row) / longtable_rows), each = longtable_rows)[1:max(x$body$row)], row_num = 1:max(x$body$row)) total_div <- max(Divisions$div_num) #* Format the table parts head <- part_prep_html(x$head, head = TRUE) body <- part_prep_html(x$body) foot <- if (!is.null(x$foot)) part_prep_html(x$foot) else NULL interfoot <- if (!is.null(x$interfoot)) part_prep_html(x$interfoot) else NULL tmpfile <- tempfile(fileext=".html") non_interactive <- "" #* Run a for loop to build all the table divisions for (i in 1:total_div){ tbl <- dplyr::bind_rows(head, body[Divisions$row_num[Divisions$div_num == i], , drop=FALSE], if (i == total_div) foot else interfoot) rows <- apply(tbl, 1, paste0, collapse = "\n") rows <- sprintf("<tr>\n%s\n</tr>", rows) html_code <- sprintf("<table align = '%s' style = 'border-collapse:%s;'>\n%s\n</table>%s", x[["justify"]], x$border_collapse, paste0(rows, collapse = "\n"), paste0(rep("</br>", linebreak_at_end), collapse = "")) if (!is.null(x$caption)) html_code <- sub(">", sprintf(">\n<caption>%s %s</caption>", label, x$caption), html_code) #* When interactive, write to a temporary file so that it #* can be displayed in the viewer if (interactive & asis){ write(html_code, tmpfile, append = i > 1) } else non_interactive <- paste0(non_interactive, html_code) } # print(html_code) if (interactive & asis){ getOption("viewer")(tmpfile) } else if (asis) knitr::asis_output(htmltools::htmlPreserve(non_interactive)) else htmltools::htmlPreserve(non_interactive) } #**** Helper functions part_prep_html <- function(part, head=FALSE) { numeric_classes <- c("double", "numeric") dh <- if (head) "th" else "td" #* apply a function, if any is indicated part <- perform_function(part) #* Perform any rounding logic <- part$round == "" & part$col_class %in% numeric_classes part$round[logic] <- getOption("digits") logic <- part$col_class %in% numeric_classes if (any(logic)) part$value[logic] <- as.character(roundSafe(part$value[logic], as.numeric(part$round[logic]))) #* Replacement logic <- !is.na(part[["replace"]]) part[["value"]][logic] <- part[["replace"]][logic] #* Bold and italic boldify <- part$bold part$bold[boldify] <- "font-weight:bold;" part$bold[!boldify] <- "" italicize <- part$italic part$italic[italicize] <- "font-style:italic;" part$italic[!italicize] <- "" #* Alignments. With horizontal alignment, first we determine #* default alignment for any cell without a given designation. #* The defaults are right aligned for numeric, left aligned for #* all otheres. The `default_halign` function is defined in #* `print_dust_latex.R` logic <- part$halign == "" part$halign[logic] <- vapply(X = part$col_class[logic], FUN = default_halign, FUN.VALUE = character(1), print_method = "html") part$halign <- with(part, sprintf("text-align:%s;", halign)) logic <- part$valign != "" part$valign[logic] <- with(part, sprintf("vertical-align:%s;", valign[logic])) #** Background logic <- part$bg != "" part$bg[logic] <- with(part, sprintf("background-color:%s;", bg[logic])) #* Font Family logic <- part$font_family != "" part$font_family[logic] <- with(part, sprintf("font-family:%s;", font_family[logic])) #* Font Color logic <- part$font_color != "" part$font_color[logic] <- with(part, sprintf("color:%s;", font_color[logic])) #* Font size logic <- part$font_size != "" part$font_size[logic] <- with(part, sprintf("font-size:%s%s;", font_size[logic], font_size_units[logic])) #* cell height and width logic <- part$height != "" part$height[logic] <- with(part, sprintf("height:%s%s;", height[logic], height_units[logic])) logic <- part$width != "" part$width[logic] <- with(part, sprintf("width:%s%s;", width[logic], width_units[logic])) #* Borders logic <- part$top_border != "" part$top_border[logic] <- with(part, sprintf("border-top:%s;", top_border[logic])) logic <- part$bottom_border != "" part$bottom_border[logic] <- with(part, sprintf("border-bottom:%s;", bottom_border[logic])) logic <- part$left_border != "" part$left_border[logic] <- with(part, sprintf("border-left:%s;", left_border[logic])) logic <- part$right_border != "" part$right_border[logic] <- with(part, sprintf("border-right:%s;", right_border[logic])) #* Set NA (missing) values to na_string logic <- is.na(part$value) & !is.na(part$na_string) part$value[logic] <- part$na_string[logic] #* Padding logic <- part$pad != "" part$pad[logic] <- with(part, sprintf("padding:%spx;", pad[logic])) #* Text Rotation logic <- part$rotate_degree != "" part$rotate_degree[logic] <- with(part, rotate_tag(rotate_degree[logic])) #* Generate css style definitions for each cell. part$value <- with(part, sprintf("<%s colspan = '%s'; rowspan = '%s'; style='%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s'>%s</%s>", dh, colspan, rowspan, bold, italic, halign, valign, bg, font_family, #6 font_color, font_size, height, width, #4 top_border, bottom_border, left_border, right_border, #4 rotate_degree, pad, #2 value, dh)) ncol <- max(part$col) part <- dplyr::filter_(part, "!(rowspan == 0 | colspan == 0)") logic <- part[["row"]] == part[["html_row"]] & part[["col"]] == part[["html_col"]] & part[["colspan"]] > 1 if ("html_row_pos" %in% names(part)) part[["html_row"]][logic] <- part[["html_row_pos"]][logic] if ("html_col_pos" %in% names(part)) part[["html_col"]][logic] <- part[["html_col_pos"]][logic] #* Spread to wide format for printing part <- dplyr::select_(part, "html_row", "html_col", "value") %>% tidyr::spread_("html_col", "value", fill = "") %>% dplyr::select_("-html_row") if (ncol(part) != ncol){ part <- dplyr::bind_cols(part, do.call("cbind", lapply(1:(ncol - ncol(part)), function(i) dplyr::data_frame(value = "")))) names(part) <- 1:ncol } part } #*********************************** #* Rotation tags vary by browser. To make the rotation as robust as #* possible, specifying a rotation applies tags for webkit (Chrome?), #* Mozilla, Internet Explorer, Opera, and a generic transformation. rotate_tag <- function(degree) { sprintf( paste0("-webkit-transform:rotate(%sdeg);", "-moz-transform:rotate(%sdeg);", "-ms-transform:rotate(%sdeg);", "-o-transform:rotate(%sdeg);", "transform:rotate(%sdeg);"), degree, degree, degree, degree, degree) }
#' Runs multiple Fisher scoring steps #' #' @param x float-like, matrix-shaped Tensor where each row represents a sample's #' features. #' @param ... other arguments passed to specific methods. #' #' @seealso [glm_fit.tensorflow.tensor()] #' #' @export glm_fit <- function(x, ...) { UseMethod("glm_fit") } #' Runs one Fisher scoring step #' #' @inheritParams glm_fit #' @seealso [glm_fit_one_step.tensorflow.tensor()] #' @export glm_fit_one_step <- function(x, ...) { UseMethod("glm_fit_one_step") } #' @inheritParams glm_fit #' @export glm_fit.default <- function(x, ...) { glm_fit(tensorflow::tf$convert_to_tensor(x), ...) } #' @inheritParams glm_fit #' @export glm_fit_one_step.default <- function(x, ...) { glm_fit_one_step(tensorflow::tf$convert_to_tensor(x), ...) } #' Runs multiple Fisher scoring steps #' #' @inheritParams glm_fit #' @param response vector-shaped Tensor where each element represents a sample's #' observed response (to the corresponding row of features). Must have same `dtype` #' as `x`. #' @param model a string naming the model (see [glm_families]) or a `tfp$glm$ExponentialFamily-like` #' instance which implicitly characterizes a negative log-likelihood loss by specifying #' the distribuion's mean, gradient_mean, and variance. #' @param model_coefficients_start Optional (batch of) vector-shaped Tensor representing #' the initial model coefficients, one for each column in `x`. Must have same `dtype` #' as model_matrix. Default value: Zeros. #' @param predicted_linear_response_start Optional Tensor with shape, `dtype` matching #' `response`; represents offset shifted initial linear predictions based on #' `model_coefficients_start`. Default value: offset if model_coefficients is `NULL`, #' and `tf$linalg$matvec(x, model_coefficients_start) + offset` otherwise. #' @param l2_regularizer Optional scalar Tensor representing L2 regularization penalty. #' Default: `NULL` ie. no regularization. #' @param dispersion Optional (batch of) Tensor representing response dispersion. #' @param offset Optional Tensor representing constant shift applied to `predicted_linear_response`. #' @param convergence_criteria_fn callable taking: `is_converged_previous`, `iter_`, #' `model_coefficients_previous`, `predicted_linear_response_previous`, `model_coefficients_next`, #' `predicted_linear_response_next`, `response`, `model`, `dispersion` and returning #' a logical Tensor indicating that Fisher scoring has converged. #' @param learning_rate Optional (batch of) scalar Tensor used to dampen iterative progress. #' Typically only needed if optimization diverges, should be no larger than 1 and typically #' very close to 1. Default value: `NULL` (i.e., 1). #' @param fast_unsafe_numerics Optional Python bool indicating if faster, less numerically #' accurate methods can be employed for computing the weighted least-squares solution. Default #' value: TRUE (i.e., "fast but possibly diminished accuracy"). #' @param maximum_iterations Optional maximum number of iterations of Fisher scoring to run; #' "and-ed" with result of `convergence_criteria_fn`. Default value: `NULL` (i.e., infinity). #' @param name usesed as name prefix to ops created by this function. Default value: "fit". #' #' @family glm_fit #' #' @export glm_fit.tensorflow.tensor <- function(x, response, model, model_coefficients_start = NULL, predicted_linear_response_start = NULL, l2_regularizer = NULL, dispersion = NULL, offset = NULL, convergence_criteria_fn = NULL, learning_rate = NULL, fast_unsafe_numerics=TRUE, maximum_iterations = NULL, name = NULL, ...) { if (is.character(model)) model <- family_from_string(model) out <- tfp$glm$fit( model_matrix = x, response = response, model = model, model_coefficients_start = model_coefficients_start, predicted_linear_response_start = predicted_linear_response_start, l2_regularizer = l2_regularizer, dispersion = dispersion, offset = offset, convergence_criteria_fn = convergence_criteria_fn, learning_rate = learning_rate, fast_unsafe_numerics = fast_unsafe_numerics, maximum_iterations = maximum_iterations, name = name ) class(out) <- c("glm_fit") out } #' Runs one Fisher Scoring step #' @inheritParams glm_fit.tensorflow.tensor #' @family glm_fit #' @export glm_fit_one_step.tensorflow.tensor <- function(x, response, model, model_coefficients_start=NULL, predicted_linear_response_start=NULL, l2_regularizer=NULL, dispersion=NULL, offset=NULL, learning_rate=NULL, fast_unsafe_numerics=TRUE, name=NULL, ...) { if (is.character(model)) model <- family_from_string(model) out <- tfp$glm$fit_one_step( model_matrix = x, response = response, model = model, model_coefficients_start = model_coefficients_start, predicted_linear_response_start = predicted_linear_response_start, l2_regularizer = l2_regularizer, dispersion = dispersion, offset = offset, learning_rate = learning_rate, fast_unsafe_numerics = fast_unsafe_numerics, name = name ) class(out) <- "glm_fit" out } #' GLM families #' #' A list of models that can be used as the `model` argument in [glm_fit()]: #' #' * `Bernoulli`: `Bernoulli(probs=mean)` where `mean = sigmoid(matmul(X, weights))` #' * `BernoulliNormalCDF`: `Bernoulli(probs=mean)` where `mean = Normal(0, 1).cdf(matmul(X, weights))` #' * `GammaExp`: `Gamma(concentration=1, rate=1 / mean)` where `mean = exp(matmul(X, weights))` #' * `GammaSoftplus`: `Gamma(concentration=1, rate=1 / mean)` where `mean = softplus(matmul(X, weights))` #' * `LogNormal`: `LogNormal(loc=log(mean) - log(2) / 2, scale=sqrt(log(2)))` where #' `mean = exp(matmul(X, weights))`. #' * `LogNormalSoftplus`: `LogNormal(loc=log(mean) - log(2) / 2, scale=sqrt(log(2)))` where #' `mean = softplus(matmul(X, weights))` #' * `Normal`: `Normal(loc=mean, scale=1)` where `mean = matmul(X, weights)`. #' * `NormalReciprocal`: `Normal(loc=mean, scale=1)` where `mean = 1 / matmul(X, weights)` #' * `Poisson`: `Poisson(rate=mean)` where `mean = exp(matmul(X, weights))`. #' * `PoissonSoftplus`: `Poisson(rate=mean)` where `mean = softplus(matmul(X, weights))`. #' #' @family glm_fit #' @name glm_families #' @rdname glm_families NULL family_from_string <- function(model) { if (model == "Bernoulli") tfp$glm$Bernoulli() else if (model == "BernoulliNormalCDF") tfp$glm$BernoulliNormalCDF() else if (model == "GammaExp") tfp$glm$GammaExp() else if (model == "GammaSoftplus") tfp$glm$GammaSoftplus() else if (model == "LogNormal") tfp$glm$LogNormal() else if (model == "LogNormalSoftplus") tfp$glm$LogNormalSoftplus() else if (model == "Normal") tfp$glm$Normal() else if (model == "NormalReciprocal") tfp$glm$NormalReciprocal() else if (model == "Poisson") tfp$glm$Poisson() else if (model == "PoissonSoftplus") tfp$glm$PoissonSoftplus() else stop("Model ", model, "not implemented", call. = FALSE) }
/R/glm.R
permissive
jeffreypullin/tfprobability
R
false
false
7,783
r
#' Runs multiple Fisher scoring steps #' #' @param x float-like, matrix-shaped Tensor where each row represents a sample's #' features. #' @param ... other arguments passed to specific methods. #' #' @seealso [glm_fit.tensorflow.tensor()] #' #' @export glm_fit <- function(x, ...) { UseMethod("glm_fit") } #' Runs one Fisher scoring step #' #' @inheritParams glm_fit #' @seealso [glm_fit_one_step.tensorflow.tensor()] #' @export glm_fit_one_step <- function(x, ...) { UseMethod("glm_fit_one_step") } #' @inheritParams glm_fit #' @export glm_fit.default <- function(x, ...) { glm_fit(tensorflow::tf$convert_to_tensor(x), ...) } #' @inheritParams glm_fit #' @export glm_fit_one_step.default <- function(x, ...) { glm_fit_one_step(tensorflow::tf$convert_to_tensor(x), ...) } #' Runs multiple Fisher scoring steps #' #' @inheritParams glm_fit #' @param response vector-shaped Tensor where each element represents a sample's #' observed response (to the corresponding row of features). Must have same `dtype` #' as `x`. #' @param model a string naming the model (see [glm_families]) or a `tfp$glm$ExponentialFamily-like` #' instance which implicitly characterizes a negative log-likelihood loss by specifying #' the distribuion's mean, gradient_mean, and variance. #' @param model_coefficients_start Optional (batch of) vector-shaped Tensor representing #' the initial model coefficients, one for each column in `x`. Must have same `dtype` #' as model_matrix. Default value: Zeros. #' @param predicted_linear_response_start Optional Tensor with shape, `dtype` matching #' `response`; represents offset shifted initial linear predictions based on #' `model_coefficients_start`. Default value: offset if model_coefficients is `NULL`, #' and `tf$linalg$matvec(x, model_coefficients_start) + offset` otherwise. #' @param l2_regularizer Optional scalar Tensor representing L2 regularization penalty. #' Default: `NULL` ie. no regularization. #' @param dispersion Optional (batch of) Tensor representing response dispersion. #' @param offset Optional Tensor representing constant shift applied to `predicted_linear_response`. #' @param convergence_criteria_fn callable taking: `is_converged_previous`, `iter_`, #' `model_coefficients_previous`, `predicted_linear_response_previous`, `model_coefficients_next`, #' `predicted_linear_response_next`, `response`, `model`, `dispersion` and returning #' a logical Tensor indicating that Fisher scoring has converged. #' @param learning_rate Optional (batch of) scalar Tensor used to dampen iterative progress. #' Typically only needed if optimization diverges, should be no larger than 1 and typically #' very close to 1. Default value: `NULL` (i.e., 1). #' @param fast_unsafe_numerics Optional Python bool indicating if faster, less numerically #' accurate methods can be employed for computing the weighted least-squares solution. Default #' value: TRUE (i.e., "fast but possibly diminished accuracy"). #' @param maximum_iterations Optional maximum number of iterations of Fisher scoring to run; #' "and-ed" with result of `convergence_criteria_fn`. Default value: `NULL` (i.e., infinity). #' @param name usesed as name prefix to ops created by this function. Default value: "fit". #' #' @family glm_fit #' #' @export glm_fit.tensorflow.tensor <- function(x, response, model, model_coefficients_start = NULL, predicted_linear_response_start = NULL, l2_regularizer = NULL, dispersion = NULL, offset = NULL, convergence_criteria_fn = NULL, learning_rate = NULL, fast_unsafe_numerics=TRUE, maximum_iterations = NULL, name = NULL, ...) { if (is.character(model)) model <- family_from_string(model) out <- tfp$glm$fit( model_matrix = x, response = response, model = model, model_coefficients_start = model_coefficients_start, predicted_linear_response_start = predicted_linear_response_start, l2_regularizer = l2_regularizer, dispersion = dispersion, offset = offset, convergence_criteria_fn = convergence_criteria_fn, learning_rate = learning_rate, fast_unsafe_numerics = fast_unsafe_numerics, maximum_iterations = maximum_iterations, name = name ) class(out) <- c("glm_fit") out } #' Runs one Fisher Scoring step #' @inheritParams glm_fit.tensorflow.tensor #' @family glm_fit #' @export glm_fit_one_step.tensorflow.tensor <- function(x, response, model, model_coefficients_start=NULL, predicted_linear_response_start=NULL, l2_regularizer=NULL, dispersion=NULL, offset=NULL, learning_rate=NULL, fast_unsafe_numerics=TRUE, name=NULL, ...) { if (is.character(model)) model <- family_from_string(model) out <- tfp$glm$fit_one_step( model_matrix = x, response = response, model = model, model_coefficients_start = model_coefficients_start, predicted_linear_response_start = predicted_linear_response_start, l2_regularizer = l2_regularizer, dispersion = dispersion, offset = offset, learning_rate = learning_rate, fast_unsafe_numerics = fast_unsafe_numerics, name = name ) class(out) <- "glm_fit" out } #' GLM families #' #' A list of models that can be used as the `model` argument in [glm_fit()]: #' #' * `Bernoulli`: `Bernoulli(probs=mean)` where `mean = sigmoid(matmul(X, weights))` #' * `BernoulliNormalCDF`: `Bernoulli(probs=mean)` where `mean = Normal(0, 1).cdf(matmul(X, weights))` #' * `GammaExp`: `Gamma(concentration=1, rate=1 / mean)` where `mean = exp(matmul(X, weights))` #' * `GammaSoftplus`: `Gamma(concentration=1, rate=1 / mean)` where `mean = softplus(matmul(X, weights))` #' * `LogNormal`: `LogNormal(loc=log(mean) - log(2) / 2, scale=sqrt(log(2)))` where #' `mean = exp(matmul(X, weights))`. #' * `LogNormalSoftplus`: `LogNormal(loc=log(mean) - log(2) / 2, scale=sqrt(log(2)))` where #' `mean = softplus(matmul(X, weights))` #' * `Normal`: `Normal(loc=mean, scale=1)` where `mean = matmul(X, weights)`. #' * `NormalReciprocal`: `Normal(loc=mean, scale=1)` where `mean = 1 / matmul(X, weights)` #' * `Poisson`: `Poisson(rate=mean)` where `mean = exp(matmul(X, weights))`. #' * `PoissonSoftplus`: `Poisson(rate=mean)` where `mean = softplus(matmul(X, weights))`. #' #' @family glm_fit #' @name glm_families #' @rdname glm_families NULL family_from_string <- function(model) { if (model == "Bernoulli") tfp$glm$Bernoulli() else if (model == "BernoulliNormalCDF") tfp$glm$BernoulliNormalCDF() else if (model == "GammaExp") tfp$glm$GammaExp() else if (model == "GammaSoftplus") tfp$glm$GammaSoftplus() else if (model == "LogNormal") tfp$glm$LogNormal() else if (model == "LogNormalSoftplus") tfp$glm$LogNormalSoftplus() else if (model == "Normal") tfp$glm$Normal() else if (model == "NormalReciprocal") tfp$glm$NormalReciprocal() else if (model == "Poisson") tfp$glm$Poisson() else if (model == "PoissonSoftplus") tfp$glm$PoissonSoftplus() else stop("Model ", model, "not implemented", call. = FALSE) }
# Ready to summarize all our data # PLEASE! Remember to source the data, questionnaire, choices, and weights from preliminary_weighted_analysis.R source("code/data_merge_prep.R") dm_file <- dm_data %>% summarize(age_hoh = weighted_median(age_hoh, .), male_hoh = percent_response(sex_hoh, ., "male"), female_hoh = percent_response(sex_hoh, ., "female"), total_people = weighted_sum(size_hh, .), size_hh = weighted_median(size_hh, .), infants_female = round( 100 * (weighted_sum(nb_infants_female, .) / total_people), 0), children_female = round( 100 * (weighted_sum(nb_children_female, .) / total_people), 0), youth_female = round( 100 * (weighted_sum(nb_youth_female, .) / total_people), 0), adults_female = round( 100 * (weighted_sum(nb_adults_female, .) / total_people), 0), elderly_female = round( 100 * (weighted_sum(nb_elderly_female, .) / total_people), 0), infants_male = round( 100 * (weighted_sum(nb_infants_male, .) / total_people), 0), children_male = round( 100 * (weighted_sum(nb_children_male, .) / total_people), 0), youth_male = round( 100 * (weighted_sum(nb_youth_male, .) / total_people), 0), adults_male = round( 100 * (weighted_sum(nb_adults_male, .) / total_people), 0), elderly_male = round( 100 * (weighted_sum(nb_elderly_male, .) / total_people), 0), displ_2011 = percent_response(initially_displaced_year, ., "displaced_2011"), displ_2012 = percent_response(initially_displaced_year, ., "displaced_2012"), displ_2013 = percent_response(initially_displaced_year, ., "displaced_2013"), displ_2014 = percent_response(initially_displaced_year, ., "displaced_2014"), displ_2015 = percent_response(initially_displaced_year, ., "displaced_2015"), displ_2016 = percent_response(initially_displaced_year, ., "displaced_2016"), displ_2017 = percent_response(initially_displaced_year, ., "displaced_2017"), displ_2018 = percent_response(initially_displaced_year, ., "displaced_2018"), displ_2019 = percent_response(initially_displaced_year, ., "displaced_2019"), return_2011 = percent_response(returned_year, ., "displaced_2011"), return_2012 = percent_response(returned_year, ., "displaced_2012"), return_2013 = percent_response(returned_year, ., "displaced_2013"), return_2014 = percent_response(returned_year, ., "displaced_2014"), return_2015 = percent_response(returned_year, ., "displaced_2015"), return_2016 = percent_response(returned_year, ., "displaced_2016"), return_2017 = percent_response(returned_year, ., "displaced_2017"), return_2018 = percent_response(returned_year, ., "displaced_2018"), return_2019 = percent_response(returned_year, ., "displaced_2019"), displaced_once = num_percent_response(times_displaced_since_2011, ., 1), displaced_twice = num_percent_response(times_displaced_since_2011, ., 2), displaced_thrice = num_percent_response(times_displaced_since_2011, ., 3), displaced_four_plus = num_percent_response(times_displaced_since_2011, ., 4:max(times_displaced_since_2011, na.rm = T)), push_factor_1_name = select_percents(push_factors, 1, ., questions, choices, "label"), push_factor_1_pct = select_percents(push_factors, 1, ., questions, choices, "percent"), push_factor_2_name = select_percents(push_factors, 2, ., questions, choices, "label"), push_factor_2_pct = select_percents(push_factors, 2, ., questions, choices, "percent"), push_factor_3_name = select_percents(push_factors, 3, ., questions, choices, "label"), push_factor_3_pct = select_percents(push_factors, 3, ., questions, choices, "percent"), no_return_1_name = select_percents(didnt_return_home_reasons, 1, ., questions, choices, "label"), no_return_1_pct = select_percents(didnt_return_home_reasons, 1, ., questions, choices, "percent"), no_return_2_name = select_percents(didnt_return_home_reasons, 2, ., questions, choices, "label"), no_return_2_pct = select_percents(didnt_return_home_reasons, 2, ., questions, choices, "percent"), no_return_3_name = select_percents(didnt_return_home_reasons, 3, ., questions, choices, "label"), no_return_3_pct = select_percents(didnt_return_home_reasons, 3, ., questions, choices, "percent"), returnee_incident_1_name = select_percents(returnee_issues, 1, ., questions, choices, "label"), returnee_incident_1_pct = select_percents(returnee_issues, 1, ., questions, choices, "percent"), returnee_incident_2_name = select_percents(returnee_issues, 2, ., questions, choices, "label"), returnee_incident_2_pct = select_percents(returnee_issues, 2, ., questions, choices, "percent"), returnee_incident_3_name = select_percents(returnee_issues, 3, ., questions, choices, "label"), returnee_incident_3_pct = select_percents(returnee_issues, 3, ., questions, choices, "percent"), returnee_fcs_poor = percent_response(filter(., displacement_status == "returnee")[["fcs_category"]], filter(., displacement_status == "returnee"), "poor", x_name = "fcs_category"), returnee_fcs_borderline = percent_response(filter(., displacement_status == "returnee")[["fcs_category"]], filter(., displacement_status == "returnee"), "borderline", x_name = "fcs_category"), returnee_fcs_acceptable = percent_response(filter(., displacement_status == "returnee")[["fcs_category"]], filter(., displacement_status == "returnee"), "acceptable", x_name = "fcs_category"), idp_fcs_poor = percent_response(filter(., displacement_status == "idp")[["fcs_category"]], filter(., displacement_status == "idp"), "poor", x_name = "fcs_category"), idp_fcs_borderline = percent_response(filter(., displacement_status == "idp")[["fcs_category"]], filter(., displacement_status == "idp"), "borderline", x_name = "fcs_category"), idp_fcs_acceptable = percent_response(filter(., displacement_status == "idp")[["fcs_category"]], filter(., displacement_status == "idp"), "acceptable", x_name = "fcs_category"), non_displaced_fcs_poor = percent_response(filter(., displacement_status == "non_displaced")[["fcs_category"]], filter(., displacement_status == "non_displaced"), "poor", x_name = "fcs_category"), non_displaced_fcs_borderline = percent_response(filter(., displacement_status == "non_displaced")[["fcs_category"]], filter(., displacement_status == "non_displaced"), "borderline", x_name = "fcs_category"), non_displaced_fcs_acceptable = percent_response(filter(., displacement_status == "non_displaced")[["fcs_category"]], filter(., displacement_status == "non_displaced"), "acceptable", x_name = "fcs_category"), returnee_rcsi_low = percent_response(filter(., displacement_status == "returnee")[["rcsi_category"]], filter(., displacement_status == "returnee"), "low", x_name = "rcsi_category"), returnee_rcsi_medium = percent_response(filter(., displacement_status == "returnee")[["rcsi_category"]], filter(., displacement_status == "returnee"), "medium", x_name = "rcsi_category"), returnee_rcsi_high = percent_response(filter(., displacement_status == "returnee")[["rcsi_category"]], filter(., displacement_status == "returnee"), "high", x_name = "rcsi_category"), idp_rcsi_low = percent_response(filter(., displacement_status == "idp")[["rcsi_category"]], filter(., displacement_status == "idp"), "low", x_name = "rcsi_category"), idp_rcsi_medium = percent_response(filter(., displacement_status == "idp")[["rcsi_category"]], filter(., displacement_status == "idp"), "medium", x_name = "rcsi_category"), idp_rcsi_high = percent_response(filter(., displacement_status == "idp")[["rcsi_category"]], filter(., displacement_status == "idp"), "high", x_name = "rcsi_category"), non_displaced_rcsi_low = percent_response(filter(., displacement_status == "non_displaced")[["rcsi_category"]], filter(., displacement_status == "non_displaced"), "low", x_name = "rcsi_category"), non_displaced_rcsi_medium = percent_response(filter(., displacement_status == "non_displaced")[["rcsi_category"]], filter(., displacement_status == "non_displaced"), "medium", x_name = "rcsi_category"), non_displaced_rcsi_high = percent_response(filter(., displacement_status == "non_displaced")[["rcsi_category"]], filter(., displacement_status == "non_displaced"), "high", x_name = "rcsi_category"), food_source_1_name = select_percents(food_source, 1, ., questions, choices, "label"), food_source_1_pct = select_percents(food_source, 1, ., questions, choices, "percent"), food_source_2_name = select_percents(food_source, 2, ., questions, choices, "label"), food_source_2_pct = select_percents(food_source, 2, ., questions, choices, "percent"), food_source_3_name = select_percents(food_source, 3, ., questions, choices, "label"), food_source_3_pct = select_percents(food_source, 3, ., questions, choices, "percent"), crop_production = percent_response(agricultural_production, ., "crop_production"), livestock_production = percent_response(agricultural_production, ., "livestock_production"), fishing_production = percent_response(agricultural_production, ., "fishing_production"), previous_activity = percent_response(agricultural_production_prior2011, ., "yes"), crop_previously = percent_response(agricultural_activities_prior2011, ., "crop_production_activity"), livestock_previously = percent_response(agricultural_activities_prior2011, ., "livestock_activity"), fishing_previously = percent_response(agricultural_activities_prior2011, ., "fishing_activity"), other_activity_previously = percent_response(agricultural_activities_prior2011, ., "other"), crop_prod_challenges_1_name = select_percents(crop_production_challenges, 1, ., questions, choices, "label"), crop_prod_challenges_1_pct = select_percents(crop_production_challenges, 1, ., questions, choices, "percent"), crop_prod_challenges_2_name = select_percents(crop_production_challenges, 2, ., questions, choices, "label"), crop_prod_challenges_2_pct = select_percents(crop_production_challenges, 2, ., questions, choices, "percent"), crop_prod_challenges_3_name = select_percents(crop_production_challenges, 3, ., questions, choices, "label"), crop_prod_challenges_3_pct = select_percents(crop_production_challenges, 3, ., questions, choices, "percent"), crop_prod_challenges_4_name = select_percents(crop_production_challenges, 4, ., questions, choices, "label"), crop_prod_challenges_4_pct = select_percents(crop_production_challenges, 4, ., questions, choices, "percent"), livestock_prod_challenges_1_name = select_percents(livestock_production_challenges, 1, ., questions, choices, "label"), livestock_prod_challenges_1_pct = select_percents(livestock_production_challenges, 1, ., questions, choices, "percent"), livestock_prod_challenges_2_name = select_percents(livestock_production_challenges, 2, ., questions, choices, "label"), livestock_prod_challenges_2_pct = select_percents(livestock_production_challenges, 2, ., questions, choices, "percent"), livestock_prod_challenges_3_name = select_percents(livestock_production_challenges, 3, ., questions, choices, "label"), livestock_prod_challenges_3_pct = select_percents(livestock_production_challenges, 3, ., questions, choices, "percent"), livestock_prod_challenges_4_name = select_percents(livestock_production_challenges, 4, ., questions, choices, "label"), livestock_prod_challenges_4_pct = select_percents(livestock_production_challenges, 4, ., questions, choices, "percent"), fishing_challenges_1_name = select_percents(fishing_challenges, 1, ., questions, choices, "label"), fishing_challenges_1_pct = select_percents(fishing_challenges, 1, ., questions, choices, "percent"), fishing_challenges_2_name = select_percents(fishing_challenges, 2, ., questions, choices, "label"), fishing_challenges_2_pct = select_percents(fishing_challenges, 2, ., questions, choices, "percent"), fishing_challenges_3_name = select_percents(fishing_challenges, 3, ., questions, choices, "label"), fishing_challenges_3_pct = select_percents(fishing_challenges, 3, ., questions, choices, "percent"), fishing_challenges_4_name = select_percents(fishing_challenges, 4, ., questions, choices, "label"), fishing_challenges_4_pct = select_percents(fishing_challenges, 4, ., questions, choices, "percent"), type_crops_1_name = select_percents(types_crops, 1, ., questions, choices, "label"), type_crops_1_pct = select_percents(types_crops, 1, ., questions, choices, "percent"), type_crops_2_name = select_percents(types_crops, 2, ., questions, choices, "label"), type_crops_2_pct = select_percents(types_crops, 2, ., questions, choices, "percent"), type_crops_3_name = select_percents(types_crops, 3, ., questions, choices, "label"), type_crops_3_pct = select_percents(types_crops, 3, ., questions, choices, "percent"), crop_prod_impacted = percent_response(crisis_affected_crop_production, ., "yes"), livestock_prod_impacted = percent_response(crisis_affected_livestock_production, ., "yes"), returnee_adults = sum(filter(., displacement_status == "returnee")[["nb_over18"]] * dm_weights(filter(., displacement_status == "returnee"), "nb_over18")), returnee_adult_perm_job = sum(filter(., displacement_status == "returnee")[["permanent_job_adult"]] * dm_weights(filter(., displacement_status == "returnee"), "permanent_job_adult")), returnee_adult_temp_job = sum(filter(., displacement_status == "returnee")[["temporary_job_adult"]] * dm_weights(filter(., displacement_status == "returnee"), "temporary_job_adult")), returnee_adult_daily_labour = sum(filter(., displacement_status == "returnee")[["daily_labour_job_adult"]] * dm_weights(filter(., displacement_status == "returnee"), "daily_labour_job_adult")), returnee_adult_gvt_payroll = sum(filter(., displacement_status == "returnee")[["gvt_payroll_job_adult"]] * dm_weights(filter(., displacement_status == "returnee"), "gvt_payroll_job_adult")), idp_adults = sum(filter(., displacement_status == "idp")[["nb_over18"]] * dm_weights(filter(., displacement_status == "idp"), "nb_over18")), idp_adult_perm_job = sum(filter(., displacement_status == "idp")[["permanent_job_adult"]] * dm_weights(filter(., displacement_status == "idp"), "permanent_job_adult")), idp_adult_temp_job = sum(filter(., displacement_status == "idp")[["temporary_job_adult"]] * dm_weights(filter(., displacement_status == "idp"), "temporary_job_adult")), idp_adult_daily_labour = sum(filter(., displacement_status == "idp")[["daily_labour_job_adult"]] * dm_weights(filter(., displacement_status == "idp"), "daily_labour_job_adult")), idp_adult_gvt_payroll = sum(filter(., displacement_status == "idp")[["gvt_payroll_job_adult"]] * dm_weights(filter(., displacement_status == "idp"), "gvt_payroll_job_adult")), non_displaced_adults = sum(filter(., displacement_status == "non_displaced")[["nb_over18"]] * dm_weights(filter(., displacement_status == "non_displaced"), "nb_over18")), non_displaced_adult_perm_job = sum(filter(., displacement_status == "non_displaced")[["permanent_job_adult"]] * dm_weights(filter(., displacement_status == "non_displaced"), "permanent_job_adult")), non_displaced_adult_temp_job = sum(filter(., displacement_status == "non_displaced")[["temporary_job_adult"]] * dm_weights(filter(., displacement_status == "non_displaced"), "temporary_job_adult")), non_displaced_adult_daily_labour = sum(filter(., displacement_status == "non_displaced")[["daily_labour_job_adult"]] * dm_weights(filter(., displacement_status == "non_displaced"), "daily_labour_job_adult")), non_displaced_adult_gvt_payroll = sum(filter(., displacement_status == "non_displaced")[["gvt_payroll_job_adult"]] * dm_weights(filter(., displacement_status == "non_displaced"), "gvt_payroll_job_adult")), returnee_minors = sum(filter(., displacement_status == "returnee")[["nb_under18"]] * dm_weights(filter(., displacement_status == "returnee"), "nb_under18")), returnee_minor_perm_job = sum(filter(., displacement_status == "returnee")[["permanent_job_minor"]] * dm_weights(filter(., displacement_status == "returnee"), "permanent_job_minor")), returnee_minor_temp_job = sum(filter(., displacement_status == "returnee")[["temporary_job_minor"]] * dm_weights(filter(., displacement_status == "returnee"), "temporary_job_minor")), returnee_minor_daily_labour = sum(filter(., displacement_status == "returnee")[["daily_labour_job_minor"]] * dm_weights(filter(., displacement_status == "returnee"), "daily_labour_job_minor")), idp_minors = sum(filter(., displacement_status == "idp")[["nb_under18"]] * dm_weights(filter(., displacement_status == "idp"), "nb_under18")), idp_minor_perm_job = sum(filter(., displacement_status == "idp")[["permanent_job_minor"]] * dm_weights(filter(., displacement_status == "idp"), "permanent_job_minor")), idp_minor_temp_job = sum(filter(., displacement_status == "idp")[["temporary_job_minor"]] * dm_weights(filter(., displacement_status == "idp"), "temporary_job_minor")), idp_minor_daily_labour = sum(filter(., displacement_status == "idp")[["daily_labour_job_minor"]] * dm_weights(filter(., displacement_status == "idp"), "daily_labour_job_minor")), non_displaced_minors = sum(filter(., displacement_status == "non_displaced")[["nb_under18"]] * dm_weights(filter(., displacement_status == "non_displaced"), "nb_under18")), non_displaced_minor_perm_job = sum(filter(., displacement_status == "non_displaced")[["permanent_job_minor"]] * dm_weights(filter(., displacement_status == "non_displaced"), "permanent_job_minor")), non_displaced_minor_temp_job = sum(filter(., displacement_status == "non_displaced")[["temporary_job_minor"]] * dm_weights(filter(., displacement_status == "non_displaced"), "temporary_job_minor")), non_displaced_minor_daily_labour = sum(filter(., displacement_status == "non_displaced")[["daily_labour_job_minor"]] * dm_weights(filter(., displacement_status == "non_displaced"), "daily_labour_job_minor")), total_working_male = weighted_sum(calcul_type_institution_male, .), gvt_public_sector_male = round(100 * weighted_sum(gvt_public_sector_male, .) / total_working_male, 0), libyan_owned_business_male = round(100 * weighted_sum(libyan_owned_business_male, .) / total_working_male, 0), foreign_owned_business_male = round(100 * weighted_sum(foreign_owned_business_male, .) / total_working_male, 0), libyan_ngos_csos_male = round(100 * weighted_sum(libyan_ngos_csos_male, .) / total_working_male, 0), international_ngos_male = round(100 * weighted_sum(international_ngos_male, .) / total_working_male, 0), own_family_business_male = round(100 * weighted_sum(own_family_business_male, .) / total_working_male, 0), informal_irregular_labour_male = round(100 * weighted_sum(informal_irregular_labour_male, .) / total_working_male, 0), other_institution_male = round(100 * weighted_sum(other_institution_male, .) / total_working_male, 0), total_working_female = weighted_sum(calcul_type_institution_female, .), gvt_public_sector_female = round(100 * weighted_sum(gvt_public_sector_female, .) / total_working_female, 0), libyan_owned_business_female = round(100 * weighted_sum(libyan_owned_business_female, .) / total_working_female, 0), foreign_owned_business_female = round(100 * weighted_sum(foreign_owned_business_female, .) / total_working_female, 0), libyan_ngos_csos_female = round(100 * weighted_sum(libyan_ngos_csos_female, .) / total_working_female, 0), international_ngos_female = round(100 * weighted_sum(international_ngos_female, .) / total_working_female, 0), own_family_business_female = round(100 * weighted_sum(own_family_business_female, .) / total_working_female, 0), informal_irregular_labour_female = round(100 * weighted_sum(informal_irregular_labour_female, .) / total_working_female, 0), other_institution_female = round(100 * weighted_sum(other_institution_female, .) / total_working_female, 0), returnee_gvt_salary = weighted_median(filter(., displacement_status == "returnee")[["gvt_salary"]], filter(., displacement_status == "returnee"), x_name = "gvt_salary"), returnee_gvt_social_benefits = weighted_median(filter(., displacement_status == "returnee")[["gvt_social_benefits"]], filter(., displacement_status == "returnee"), x_name = "gvt_social_benefits"), returnee_non_gvt_salary = weighted_median(filter(., displacement_status == "returnee")[["non_gvt_salary"]], filter(., displacement_status == "returnee"), x_name = "non_gvt_salary"), returnee_casual_labour = weighted_median(filter(., displacement_status == "returnee")[["casual_labour"]], filter(., displacement_status == "returnee"), x_name = "casual_labour"), returnee_own_business_income = weighted_median(filter(., displacement_status == "returnee")[["own_business_income"]], filter(., displacement_status == "returnee"), x_name = "own_business_income"), returnee_remittances = weighted_median(filter(., displacement_status == "returnee")[["remittances"]], filter(., displacement_status == "returnee"), x_name = "remittances"), returnee_family_support = weighted_median(filter(., displacement_status == "returnee")[["family_support"]], filter(., displacement_status == "returnee"), x_name = "family_support"), returnee_humanitarian_assistance = weighted_median(filter(., displacement_status == "returnee")[["humanitarian_assistance"]], filter(., displacement_status == "returnee"), x_name = "humanitarian_assistance"), returnee_zakat = weighted_median(filter(., displacement_status == "returnee")[["zakat"]], filter(., displacement_status == "returnee"), x_name = "zakat"), returnee_income_other = weighted_median(filter(., displacement_status == "returnee")[["income_other"]], filter(., displacement_status == "returnee"), x_name = "income_other"), idp_gvt_salary = weighted_median(filter(., displacement_status == "idp")[["gvt_salary"]], filter(., displacement_status == "idp"), x_name = "gvt_salary"), idp_gvt_social_benefits = weighted_median(filter(., displacement_status == "idp")[["gvt_social_benefits"]], filter(., displacement_status == "idp"), x_name = "gvt_social_benefits"), idp_non_gvt_salary = weighted_median(filter(., displacement_status == "idp")[["non_gvt_salary"]], filter(., displacement_status == "idp"), x_name = "non_gvt_salary"), idp_casual_labour = weighted_median(filter(., displacement_status == "idp")[["casual_labour"]], filter(., displacement_status == "idp"), x_name = "casual_labour"), idp_own_business_income = weighted_median(filter(., displacement_status == "idp")[["own_business_income"]], filter(., displacement_status == "idp"), x_name = "own_business_income"), idp_remittances = weighted_median(filter(., displacement_status == "idp")[["remittances"]], filter(., displacement_status == "idp"), x_name = "remittances"), idp_family_support = weighted_median(filter(., displacement_status == "idp")[["family_support"]], filter(., displacement_status == "idp"), x_name = "family_support"), idp_humanitarian_assistance = weighted_median(filter(., displacement_status == "idp")[["humanitarian_assistance"]], filter(., displacement_status == "idp"), x_name = "humanitarian_assistance"), idp_zakat = weighted_median(filter(., displacement_status == "idp")[["zakat"]], filter(., displacement_status == "idp"), x_name = "zakat"), idp_income_other = weighted_median(filter(., displacement_status == "idp")[["income_other"]], filter(., displacement_status == "idp"), x_name = "income_other"), non_displaced_gvt_salary = weighted_median(filter(., displacement_status == "non_displaced")[["gvt_salary"]], filter(., displacement_status == "non_displaced"), x_name = "gvt_salary"), non_displaced_gvt_social_benefits = weighted_median(filter(., displacement_status == "non_displaced")[["gvt_social_benefits"]], filter(., displacement_status == "non_displaced"), x_name = "gvt_social_benefits"), non_displaced_non_gvt_salary = weighted_median(filter(., displacement_status == "non_displaced")[["non_gvt_salary"]], filter(., displacement_status == "non_displaced"), x_name = "non_gvt_salary"), non_displaced_casual_labour = weighted_median(filter(., displacement_status == "non_displaced")[["casual_labour"]], filter(., displacement_status == "non_displaced"), x_name = "casual_labour"), non_displaced_own_business_income = weighted_median(filter(., displacement_status == "non_displaced")[["own_business_income"]], filter(., displacement_status == "non_displaced"), x_name = "own_business_income"), non_displaced_remittances = weighted_median(filter(., displacement_status == "non_displaced")[["remittances"]], filter(., displacement_status == "non_displaced"), x_name = "remittances"), non_displaced_family_support = weighted_median(filter(., displacement_status == "non_displaced")[["family_support"]], filter(., displacement_status == "non_displaced"), x_name = "family_support"), non_displaced_humanitarian_assistance = weighted_median(filter(., displacement_status == "non_displaced")[["humanitarian_assistance"]], filter(., displacement_status == "non_displaced"), x_name = "humanitarian_assistance"), non_displaced_zakat = weighted_median(filter(., displacement_status == "non_displaced")[["zakat"]], filter(., displacement_status == "non_displaced"), x_name = "zakat"), non_displaced_income_other = weighted_median(filter(., displacement_status == "non_displaced")[["income_other"]], filter(., displacement_status == "non_displaced"), x_name = "income_other"), received_cash = round(100 * sum((cash_modality[!is.na(cash_modality)] > 0) * dm_weights(., "cash_modality")) / sum(dm_weights(., "cash_modality")), 0), returnee_cash_challenges = percent_response(filter(., displacement_status == "returnee")[["obtaining_cash_challenge"]], filter(., displacement_status == "returnee"), "yes", x_name = "obtaining_cash_challenge"), idp_cash_challenges = percent_response(filter(., displacement_status == "idp")[["obtaining_cash_challenge"]], filter(., displacement_status == "idp"), "yes", x_name = "obtaining_cash_challenge"), non_displaced_cash_challenges = percent_response(filter(., displacement_status == "non_displaced")[["obtaining_cash_challenge"]], filter(., displacement_status == "non_displaced"), "yes", x_name = "obtaining_cash_challenge"), cash_challenge_1_name = select_percents(obtaining_cash_challenge_reasons, 1, ., questions, choices, "label"), cash_challenge_1_pct = select_percents(obtaining_cash_challenge_reasons, 1, ., questions, choices, "percent"), cash_challenge_2_name = select_percents(obtaining_cash_challenge_reasons, 2, ., questions, choices, "label"), cash_challenge_2_pct = select_percents(obtaining_cash_challenge_reasons, 2, ., questions, choices, "percent"), cash_challenge_3_name = select_percents(obtaining_cash_challenge_reasons, 3, ., questions, choices, "label"), cash_challenge_3_pct = select_percents(obtaining_cash_challenge_reasons, 3, ., questions, choices, "percent"), cash_challenge_4_name = select_percents(obtaining_cash_challenge_reasons, 4, ., questions, choices, "label"), cash_challenge_4_pct = select_percents(obtaining_cash_challenge_reasons, 4, ., questions, choices, "percent"), returnee_food_expenditure = weighted_median(filter(., displacement_status == "returnee")[["food_expenditure"]], filter(., displacement_status == "returnee"), x_name = "food_expenditure"), returnee_rent_expenditure = weighted_median(filter(., displacement_status == "returnee")[["rent_expenditure"]], filter(., displacement_status == "returnee"), x_name = "rent_expenditure"), returnee_shelter_maintenance_expenditure = weighted_median(filter(., displacement_status == "returnee")[["shelter_maintenance_expenditure"]], filter(., displacement_status == "returnee"), x_name = "shelter_maintenance_expenditure"), returnee_water_expenditure = weighted_median(filter(., displacement_status == "returnee")[["water_expenditure"]], filter(., displacement_status == "returnee"), x_name = "water_expenditure"), returnee_nfi_expenditure = weighted_median(filter(., displacement_status == "returnee")[["nfi_expenditure"]], filter(., displacement_status == "returnee"), x_name = "nfi_expenditure"), returnee_utilities_expenditure = weighted_median(filter(., displacement_status == "returnee")[["utilities_expenditure"]], filter(., displacement_status == "returnee"), x_name = "utilities_expenditure"), returnee_fuel_expenditure = weighted_median(filter(., displacement_status == "returnee")[["fuel_expenditure"]], filter(., displacement_status == "returnee"), x_name = "fuel_expenditure"), returnee_health_related_expenditure = weighted_median(filter(., displacement_status == "returnee")[["health_related_expenditure"]], filter(., displacement_status == "returnee"), x_name = "health_related_expenditure"), returnee_education_related_expenditure = weighted_median(filter(., displacement_status == "returnee")[["education_related_expenditure"]], filter(., displacement_status == "returnee"), x_name = "education_related_expenditure"), returnee_transportation_expenditure = weighted_median(filter(., displacement_status == "returnee")[["transportation_expenditure"]], filter(., displacement_status == "returnee"), x_name = "transportation_expenditure"), returnee_mobile_phone_credit_expenditure = weighted_median(filter(., displacement_status == "returnee")[["mobile_phone_credit_expenditure"]], filter(., displacement_status == "returnee"), x_name = "mobile_phone_credit_expenditure"), returnee_productive_assets_expenditure = weighted_median(filter(., displacement_status == "returnee")[["productive_assets_expenditure"]], filter(., displacement_status == "returnee"), x_name = "productive_assets_expenditure"), returnee_debt_repayment_expenditure = weighted_median(filter(., displacement_status == "returnee")[["debt_repayment_expenditure"]], filter(., displacement_status == "returnee"), x_name = "debt_repayment_expenditure"), returnee_other_expenditure = weighted_median(filter(., displacement_status == "returnee")[["other_expenditure"]], filter(., displacement_status == "returnee"), x_name = "other_expenditure"), idp_food_expenditure = weighted_median(filter(., displacement_status == "idp")[["food_expenditure"]], filter(., displacement_status == "idp"), x_name = "food_expenditure"), idp_rent_expenditure = weighted_median(filter(., displacement_status == "idp")[["rent_expenditure"]], filter(., displacement_status == "idp"), x_name = "rent_expenditure"), idp_shelter_maintenance_expenditure = weighted_median(filter(., displacement_status == "idp")[["shelter_maintenance_expenditure"]], filter(., displacement_status == "idp"), x_name = "shelter_maintenance_expenditure"), idp_water_expenditure = weighted_median(filter(., displacement_status == "idp")[["water_expenditure"]], filter(., displacement_status == "idp"), x_name = "water_expenditure"), idp_nfi_expenditure = weighted_median(filter(., displacement_status == "idp")[["nfi_expenditure"]], filter(., displacement_status == "idp"), x_name = "nfi_expenditure"), idp_utilities_expenditure = weighted_median(filter(., displacement_status == "idp")[["utilities_expenditure"]], filter(., displacement_status == "idp"), x_name = "utilities_expenditure"), idp_fuel_expenditure = weighted_median(filter(., displacement_status == "idp")[["fuel_expenditure"]], filter(., displacement_status == "idp"), x_name = "fuel_expenditure"), idp_health_related_expenditure = weighted_median(filter(., displacement_status == "idp")[["health_related_expenditure"]], filter(., displacement_status == "idp"), x_name = "health_related_expenditure"), idp_education_related_expenditure = weighted_median(filter(., displacement_status == "idp")[["education_related_expenditure"]], filter(., displacement_status == "idp"), x_name = "education_related_expenditure"), idp_transportation_expenditure = weighted_median(filter(., displacement_status == "idp")[["transportation_expenditure"]], filter(., displacement_status == "idp"), x_name = "transportation_expenditure"), idp_mobile_phone_credit_expenditure = weighted_median(filter(., displacement_status == "idp")[["mobile_phone_credit_expenditure"]], filter(., displacement_status == "idp"), x_name = "mobile_phone_credit_expenditure"), idp_productive_assets_expenditure = weighted_median(filter(., displacement_status == "idp")[["productive_assets_expenditure"]], filter(., displacement_status == "idp"), x_name = "productive_assets_expenditure"), idp_debt_repayment_expenditure = weighted_median(filter(., displacement_status == "idp")[["debt_repayment_expenditure"]], filter(., displacement_status == "idp"), x_name = "debt_repayment_expenditure"), idp_other_expenditure = weighted_median(filter(., displacement_status == "idp")[["other_expenditure"]], filter(., displacement_status == "idp"), x_name = "other_expenditure"), non_displaced_food_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["food_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "food_expenditure"), non_displaced_rent_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["rent_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "rent_expenditure"), non_displaced_shelter_maintenance_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["shelter_maintenance_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "shelter_maintenance_expenditure"), non_displaced_water_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["water_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "water_expenditure"), non_displaced_nfi_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["nfi_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "nfi_expenditure"), non_displaced_utilities_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["utilities_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "utilities_expenditure"), non_displaced_fuel_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["fuel_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "fuel_expenditure"), non_displaced_health_related_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["health_related_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "health_related_expenditure"), non_displaced_education_related_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["education_related_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "education_related_expenditure"), non_displaced_transportation_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["transportation_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "transportation_expenditure"), non_displaced_mobile_phone_credit_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["mobile_phone_credit_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "mobile_phone_credit_expenditure"), non_displaced_productive_assets_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["productive_assets_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "productive_assets_expenditure"), non_displaced_debt_repayment_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["debt_repayment_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "debt_repayment_expenditure"), non_displaced_other_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["other_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "other_expenditure"), payment_modality_1_name = select_percents(hh_payment_modality, 1, ., questions, choices, "label"), payment_modality_1_pct = select_percents(hh_payment_modality, 1, ., questions, choices, "percent"), payment_modality_2_name = select_percents(hh_payment_modality, 2, ., questions, choices, "label"), payment_modality_2_pct = select_percents(hh_payment_modality, 2, ., questions, choices, "percent"), payment_modality_3_name = select_percents(hh_payment_modality, 3, ., questions, choices, "label"), payment_modality_3_pct = select_percents(hh_payment_modality, 3, ., questions, choices, "percent"), payment_modality_4_name = select_percents(hh_payment_modality, 4, ., questions, choices, "label"), payment_modality_4_pct = select_percents(hh_payment_modality, 4, ., questions, choices, "percent"), no_market_access = percent_response(hh_access_marketplace, ., "no"), distance_market_less_15 = percent_response(travel_to_market, ., "less_15min"), distance_market_15_30 = percent_response(travel_to_market, ., "between_15_29min"), distance_market_more_30 = percent_response(travel_to_market, ., "between_30_59min", "between_1_2hours", "more_2hours"), no_barriers_access_market = percent_response(barriers_access_market, ., "no_barriers_access_market"), market_barriers_1_name = select_percents(barriers_access_market, 1, ., questions, choices, "label", exclude = "no_barriers_access_market"), market_barriers_1_pct = select_percents(barriers_access_market, 1, ., questions, choices, "percent", exclude = "no_barriers_access_market"), market_barriers_2_name = select_percents(barriers_access_market, 2, ., questions, choices, "label", exclude = "no_barriers_access_market"), market_barriers_2_pct = select_percents(barriers_access_market, 2, ., questions, choices, "percent", exclude = "no_barriers_access_market"), market_barriers_3_name = select_percents(barriers_access_market, 3, ., questions, choices, "label", exclude = "no_barriers_access_market"), market_barriers_3_pct = select_percents(barriers_access_market, 3, ., questions, choices, "percent", exclude = "no_barriers_access_market"), item_barrier_too_expensive = percent_response(barriers_purchasing_items, ., "yes_items_too_expensive"), item_barrier_not_available = percent_response(barriers_purchasing_items, ., "yes_items_not_available"), item_too_expensive_1_name = select_percents(expensive_items_to_afford, 1, ., questions, choices, "label"), item_too_expensive_1_pct = select_percents(expensive_items_to_afford, 1, ., questions, choices, "percent"), item_too_expensive_2_name = select_percents(expensive_items_to_afford, 2, ., questions, choices, "label"), item_too_expensive_2_pct = select_percents(expensive_items_to_afford, 2, ., questions, choices, "percent"), item_too_expensive_3_name = select_percents(expensive_items_to_afford, 3, ., questions, choices, "label"), item_too_expensive_3_pct = select_percents(expensive_items_to_afford, 3, ., questions, choices, "percent"), item_not_available_1_name = select_percents(unabailable_items_marketplace, 1, ., questions, choices, "label"), item_not_available_1_pct = select_percents(unabailable_items_marketplace, 1, ., questions, choices, "percent"), item_not_available_2_name = select_percents(unabailable_items_marketplace, 2, ., questions, choices, "label"), item_not_available_2_pct = select_percents(unabailable_items_marketplace, 2, ., questions, choices, "percent"), item_not_available_3_name = select_percents(unabailable_items_marketplace, 3, ., questions, choices, "label"), item_not_available_3_pct = select_percents(unabailable_items_marketplace, 3, ., questions, choices, "percent"), returnee_cash_coping_stress = percent_response(filter(., displacement_status == "returnee")[["cash_coping"]], filter(., displacement_status == "returnee"), "stress", x_name = "cash_coping"), returnee_cash_coping_crisis = percent_response(filter(., displacement_status == "returnee")[["cash_coping"]], filter(., displacement_status == "returnee"), "crisis", x_name = "cash_coping"), returnee_cash_coping_emergency = percent_response(filter(., displacement_status == "returnee")[["cash_coping"]], filter(., displacement_status == "returnee"), "emergency", x_name = "cash_coping"), idp_cash_coping_stress = percent_response(filter(., displacement_status == "idp")[["cash_coping"]], filter(., displacement_status == "idp"), "stress", x_name = "cash_coping"), idp_cash_coping_crisis = percent_response(filter(., displacement_status == "idp")[["cash_coping"]], filter(., displacement_status == "idp"), "crisis", x_name = "cash_coping"), idp_cash_coping_emergency = percent_response(filter(., displacement_status == "idp")[["cash_coping"]], filter(., displacement_status == "idp"), "emergency", x_name = "cash_coping"), non_displaced_cash_coping_stress = percent_response(filter(., displacement_status == "non_displaced")[["cash_coping"]], filter(., displacement_status == "non_displaced"), "stress", x_name = "cash_coping"), non_displaced_cash_coping_crisis = percent_response(filter(., displacement_status == "non_displaced")[["cash_coping"]], filter(., displacement_status == "non_displaced"), "crisis", x_name = "cash_coping"), non_displaced_cash_coping_emergency = percent_response(filter(., displacement_status == "non_displaced")[["cash_coping"]], filter(., displacement_status == "non_displaced"), "emergency", x_name = "cash_coping"), cash_coping_reason_1_name = select_percents(using_cash_coping_mechanism_reasons, 1, ., questions, choices, "label"), cash_coping_reason_1_pct = select_percents(using_cash_coping_mechanism_reasons, 1, ., questions, choices, "percent"), cash_coping_reason_2_name = select_percents(using_cash_coping_mechanism_reasons, 2, ., questions, choices, "label"), cash_coping_reason_2_pct = select_percents(using_cash_coping_mechanism_reasons, 2, ., questions, choices, "percent"), cash_coping_reason_3_name = select_percents(using_cash_coping_mechanism_reasons, 3, ., questions, choices, "label"), cash_coping_reason_3_pct = select_percents(using_cash_coping_mechanism_reasons, 3, ., questions, choices, "percent"), cash_coping_reason_4_name = select_percents(using_cash_coping_mechanism_reasons, 4, ., questions, choices, "label"), cash_coping_reason_4_pct = select_percents(using_cash_coping_mechanism_reasons, 4, ., questions, choices, "percent"), cash_coping_reason_5_name = select_percents(using_cash_coping_mechanism_reasons, 5, ., questions, choices, "label"), cash_coping_reason_5_pct = select_percents(using_cash_coping_mechanism_reasons, 5, ., questions, choices, "percent"), cash_coping_reason_6_name = select_percents(using_cash_coping_mechanism_reasons, 6, ., questions, choices, "label"), cash_coping_reason_6_pct = select_percents(using_cash_coping_mechanism_reasons, 6, ., questions, choices, "percent"), apartment_house = percent_response(shelter_type, ., "apartment", "house"), unfinished_room = percent_response(shelter_type, ., "unfinished_rooms"), public_building = percent_response(shelter_type, ., "public_building_not_used_for_shelter"), private_building = percent_response(shelter_type, ., "private_building_not_used_for_shelter"), tent_caravan = percent_response(shelter_type, ., "tent_caravan"), temporary_shelter_ngos = percent_response(shelter_type, ., "temporary_shelter_ngos"), connection_house = percent_response(shelter_type, ., "connection_house"), hotel = percent_response(shelter_type, ., "hotel"), camp_informal_settlement = percent_response(shelter_type, ., "camp_informal_settlement"), other_housing = percent_response(shelter_type, ., "other"), returnee_occupancy_owned = percent_response(filter(., displacement_status == "returnee")[["occupancy_status"]], filter(., displacement_status == "returnee"), "ownership", x_name = "occupancy_status"), returnee_occupancy_rented = percent_response(filter(., displacement_status == "returnee")[["occupancy_status"]], filter(., displacement_status == "returnee"), "rental_written_contract", "rental_verbal_agreement", x_name = "occupancy_status"), returnee_occupancy_hosted_free = percent_response(filter(., displacement_status == "returnee")[["occupancy_status"]], filter(., displacement_status == "returnee"), "hosted_free", x_name = "occupancy_status"), returnee_occupancy_other = percent_response(filter(., displacement_status == "returnee")[["occupancy_status"]], filter(., displacement_status == "returnee"), "squatting", "housing_provided_public_authority", "housing_provided_employer", "other", x_name = "occupancy_status"), idp_occupancy_owned = percent_response(filter(., displacement_status == "idp")[["occupancy_status"]], filter(., displacement_status == "idp"), "ownership", x_name = "occupancy_status"), idp_occupancy_rented = percent_response(filter(., displacement_status == "idp")[["occupancy_status"]], filter(., displacement_status == "idp"), "rental_written_contract", "rental_verbal_agreement", x_name = "occupancy_status"), idp_occupancy_hosted_free = percent_response(filter(., displacement_status == "idp")[["occupancy_status"]], filter(., displacement_status == "idp"), "hosted_free", x_name = "occupancy_status"), idp_occupancy_other = percent_response(filter(., displacement_status == "idp")[["occupancy_status"]], filter(., displacement_status == "idp"), "squatting", "housing_provided_public_authority", "housing_provided_employer", "other", x_name = "occupancy_status"), non_displaced_occupancy_owned = percent_response(filter(., displacement_status == "non_displaced")[["occupancy_status"]], filter(., displacement_status == "non_displaced"), "ownership", x_name = "occupancy_status"), non_displaced_occupancy_rented = percent_response(filter(., displacement_status == "non_displaced")[["occupancy_status"]], filter(., displacement_status == "non_displaced"), "rental_written_contract", "rental_verbal_agreement", x_name = "occupancy_status"), non_displaced_occupancy_hosted_free = percent_response(filter(., displacement_status == "non_displaced")[["occupancy_status"]], filter(., displacement_status == "non_displaced"), "hosted_free", x_name = "occupancy_status"), non_displaced_occupancy_other = percent_response(filter(., displacement_status == "non_displaced")[["occupancy_status"]], filter(., displacement_status == "non_displaced"), "squatting", "housing_provided_public_authority", "housing_provided_employer", "other", x_name = "occupancy_status"), returnee_rental_cost = weighted_median(filter(., displacement_status == "returnee")[["rental_cost"]], filter(., displacement_status == "returnee"), "rental_cost"), idp_rental_cost = weighted_median(filter(., displacement_status == "idp")[["rental_cost"]], filter(., displacement_status == "idp"), "rental_cost"), non_displaced_rental_cost = weighted_median(filter(., displacement_status == "non_displaced")[["rental_cost"]], filter(., displacement_status == "non_displaced"), "rental_cost"), threatened_eviction = percent_response(eviction_threat, ., "yes_threatened_with_eviction"), recently_evicted = percent_response(eviction_threat, ., "yes_recently_evicted"), eviction_reason_1_name = select_percents(eviction_shelter_reasons, 1, ., questions, choices, "label"), eviction_reason_1_pct = select_percents(eviction_shelter_reasons, 1, ., questions, choices, "percent"), eviction_reason_2_name = select_percents(eviction_shelter_reasons, 2, ., questions, choices, "label"), eviction_reason_2_pct = select_percents(eviction_shelter_reasons, 2, ., questions, choices, "percent"), eviction_reason_3_name = select_percents(eviction_shelter_reasons, 3, ., questions, choices, "label"), eviction_reason_3_pct = select_percents(eviction_shelter_reasons, 3, ., questions, choices, "percent"), nfi_need = percent_response(nfi_need, ., "has_nfi_need"), power_cuts = weighted_median(power_cuts, .), phone_coverage_reliable = percent_response(phone_network_coverage, ., "coverage_exists_reliable"), phone_coverage_not_reliable = percent_response(phone_network_coverage, ., "coverage_exists_not_reliable"), phone_coverage_none = percent_response(phone_network_coverage, ., "coverage_doesnt_exist"), water_source_public_network = percent_response(primary_drinkingwater_source, ., "public_network"), water_source_bottles = percent_response(primary_drinkingwater_source, ., "bottled_water"), water_source_trucks = percent_response(primary_drinkingwater_source, ., "water_trucking"), water_source_other = percent_response(primary_drinkingwater_source, ., "public_tap", "protected_well", "unprotected_well", "surface_water", "rainwater", "other"), water_dist_100m = percent_response(distance_drinkingwater, ., "upto_100m"), water_dist_200m = percent_response(distance_drinkingwater, ., "between_101_200m"), water_dist_300m = percent_response(distance_drinkingwater, ., "between_201_300m"), water_dist_400m = percent_response(distance_drinkingwater, ., "between_301_400m"), water_dist_500m = percent_response(distance_drinkingwater, ., "between_401_500m"), water_dist_more_500m = percent_response(distance_drinkingwater, ., "more_500m"), returnee_water_insufficient = percent_response(filter(., displacement_status == "returnee")[["unsufficient_quantity_water"]], filter(., displacement_status == "returnee"), "yes", x_name = "unsufficient_quantity_water"), idp_water_insufficient = percent_response(filter(., displacement_status == "idp")[["unsufficient_quantity_water"]], filter(., displacement_status == "idp"), "yes", x_name = "unsufficient_quantity_water"), non_displaced_water_insufficient = percent_response(filter(., displacement_status == "non_displaced")[["unsufficient_quantity_water"]], filter(., displacement_status == "non_displaced"), "yes", x_name = "unsufficient_quantity_water"), water_access_every_day = percent_response(access_public_water_network, ., "every_day"), water_access_most_days = percent_response(access_public_water_network, ., "most_days"), water_access_rarely_none = percent_response(access_public_water_network, ., "rarely", "not_at_all"), designated_waste_disposal = percent_response(trash_disposal, ., "collected_municipality_waste_management_service", "designated_place_waste_disposal"), healthcare_challenge_yes = percent_response(access_healthcare_challenge, ., "yes"), healthcare_challenge_no = percent_response(access_healthcare_challenge, ., "no"), healthcare_challenge_dk_na = percent_response(access_healthcare_challenge, ., "dk", "dwta"), returnee_healthcare_challenge_1_name = select_percents(filter(., displacement_status == "returnee")[["access_healthcare_challenge_reasons"]], 1, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), returnee_healthcare_challenge_1_pct = select_percents(filter(., displacement_status == "returnee")[["access_healthcare_challenge_reasons"]], 1, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), returnee_healthcare_challenge_2_name = select_percents(filter(., displacement_status == "returnee")[["access_healthcare_challenge_reasons"]], 2, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), returnee_healthcare_challenge_2_pct = select_percents(filter(., displacement_status == "returnee")[["access_healthcare_challenge_reasons"]], 2, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), returnee_healthcare_challenge_3_name = select_percents(filter(., displacement_status == "returnee")[["access_healthcare_challenge_reasons"]], 3, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), returnee_healthcare_challenge_3_pct = select_percents(filter(., displacement_status == "returnee")[["access_healthcare_challenge_reasons"]], 3, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), idp_healthcare_challenge_1_name = select_percents(filter(., displacement_status == "idp")[["access_healthcare_challenge_reasons"]], 1, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), idp_healthcare_challenge_1_pct = select_percents(filter(., displacement_status == "idp")[["access_healthcare_challenge_reasons"]], 1, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), idp_healthcare_challenge_2_name = select_percents(filter(., displacement_status == "idp")[["access_healthcare_challenge_reasons"]], 2, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), idp_healthcare_challenge_2_pct = select_percents(filter(., displacement_status == "idp")[["access_healthcare_challenge_reasons"]], 2, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), idp_healthcare_challenge_3_name = select_percents(filter(., displacement_status == "idp")[["access_healthcare_challenge_reasons"]], 3, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), idp_healthcare_challenge_3_pct = select_percents(filter(., displacement_status == "idp")[["access_healthcare_challenge_reasons"]], 3, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), non_displaced_healthcare_challenge_1_name = select_percents(filter(., displacement_status == "non_displaced")[["access_healthcare_challenge_reasons"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), non_displaced_healthcare_challenge_1_pct = select_percents(filter(., displacement_status == "non_displaced")[["access_healthcare_challenge_reasons"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), non_displaced_healthcare_challenge_2_name = select_percents(filter(., displacement_status == "non_displaced")[["access_healthcare_challenge_reasons"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), non_displaced_healthcare_challenge_2_pct = select_percents(filter(., displacement_status == "non_displaced")[["access_healthcare_challenge_reasons"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), non_displaced_healthcare_challenge_3_name = select_percents(filter(., displacement_status == "non_displaced")[["access_healthcare_challenge_reasons"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), non_displaced_healthcare_challenge_3_pct = select_percents(filter(., displacement_status == "non_displaced")[["access_healthcare_challenge_reasons"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), health_time_more_30_min = percent_response(travel_to_healthcare_provider, ., "between_30_59min", "between_1_2hours", "more_2hours"), bottle_fed_child = percent_response(bottle_fed_child, ., "bottle_fed_child"), unable_vaccinate = percent_response(unable_vaccinate, ., "unable_vaccinate"), vaccinate_facility_time_more_30_min = percent_response(time_travel_vaccinations_facility, ., "between_30_59min", "between_1_2hours", "more_2hours"), returnee_chronic_disease = percent_response(filter(., displacement_status == "returnee")[["individual_suffering_chronicdisease"]], filter(., displacement_status == "returnee"), "yes", x_name = "individual_suffering_chronicdisease"), returnee_mental_illness = percent_response(filter(., displacement_status == "returnee")[["individual_suffering_mentaldisorder"]], filter(., displacement_status == "returnee"), "yes", x_name = "individual_suffering_mentaldisorder"), returnee_physical_difficulty = percent_response(filter(., displacement_status == "returnee")[["physical_cognitive_difficulties"]], filter(., displacement_status == "returnee"), "yes_male_infants", "yes_male_children", "yes_male_youths", "yes_male_adults", "yes_male_elderly", "yes_female_infants", "yes_female_children", "yes_female_youths", "yes_female_adults", "yes_female_elderly", x_name = "physical_cognitive_difficulties"), returnee_behavior_change = percent_response(filter(., displacement_status == "returnee")[["changes_behaviors_minors"]], filter(., displacement_status == "returnee"), "yes", x_name = "changes_behaviors_minors"), idp_chronic_disease = percent_response(filter(., displacement_status == "idp")[["individual_suffering_chronicdisease"]], filter(., displacement_status == "idp"), "yes", x_name = "individual_suffering_chronicdisease"), idp_mental_illness = percent_response(filter(., displacement_status == "idp")[["individual_suffering_mentaldisorder"]], filter(., displacement_status == "idp"), "yes", x_name = "individual_suffering_mentaldisorder"), idp_physical_difficulty = percent_response(filter(., displacement_status == "idp")[["physical_cognitive_difficulties"]], filter(., displacement_status == "idp"), "yes_male_infants", "yes_male_children", "yes_male_youths", "yes_male_adults", "yes_male_elderly", "yes_female_infants", "yes_female_children", "yes_female_youths", "yes_female_adults", "yes_female_elderly", x_name = "physical_cognitive_difficulties"), idp_behavior_change = percent_response(filter(., displacement_status == "idp")[["changes_behaviors_minors"]], filter(., displacement_status == "idp"), "yes", x_name = "changes_behaviors_minors"), non_displaced_chronic_disease = percent_response(filter(., displacement_status == "non_displaced")[["individual_suffering_chronicdisease"]], filter(., displacement_status == "non_displaced"), "yes", x_name = "individual_suffering_chronicdisease"), non_displaced_mental_illness = percent_response(filter(., displacement_status == "non_displaced")[["individual_suffering_mentaldisorder"]], filter(., displacement_status == "non_displaced"), "yes", x_name = "individual_suffering_mentaldisorder"), non_displaced_physical_difficulty = percent_response(filter(., displacement_status == "non_displaced")[["physical_cognitive_difficulties"]], filter(., displacement_status == "non_displaced"), "yes_male_infants", "yes_male_children", "yes_male_youths", "yes_male_adults", "yes_male_elderly", "yes_female_infants", "yes_female_children", "yes_female_youths", "yes_female_adults", "yes_female_elderly", x_name = "physical_cognitive_difficulties"), non_displaced_behavior_change = percent_response(filter(., displacement_status == "non_displaced")[["changes_behaviors_minors"]], filter(., displacement_status == "non_displaced"), "yes", x_name = "changes_behaviors_minors"), disease_1_name = select_percents(type_chronicdisease, 1, ., questions, choices, "label"), disease_1_pct = select_percents(type_chronicdisease, 1, ., questions, choices, "percent"), disease_2_name = select_percents(type_chronicdisease, 2, ., questions, choices, "label"), disease_2_pct = select_percents(type_chronicdisease, 2, ., questions, choices, "percent"), disease_3_name = select_percents(type_chronicdisease, 3, ., questions, choices, "label"), disease_3_pct = select_percents(type_chronicdisease, 3, ., questions, choices, "percent"), total_assistance = weighted_sum(total_assistance, .), no_daily_difficulty = round(100 * weighted_sum(no_daily_difficulty, .) / total_assistance), minor_daily_difficulty = round(100 * weighted_sum(minor_daily_difficulty, .) / total_assistance), daily_needs_assistance = round(100 * weighted_sum(daily_needs_assistance, .) / total_assistance), neg_behavior_0_12_num_1_name = select_percents(negative_behaviour_0_12, 1, ., questions, choices, "label"), neg_behavior_0_12_num_1_pct = select_percents(negative_behaviour_0_12, 1, ., questions, choices, "percent"), neg_behavior_0_12_num_2_name = select_percents(negative_behaviour_0_12, 2, ., questions, choices, "label"), neg_behavior_0_12_num_2_pct = select_percents(negative_behaviour_0_12, 2, ., questions, choices, "percent"), neg_behavior_0_12_num_3_name = select_percents(negative_behaviour_0_12, 3, ., questions, choices, "label"), neg_behavior_0_12_num_3_pct = select_percents(negative_behaviour_0_12, 3, ., questions, choices, "percent"), neg_behavior_0_12_num_4_name = select_percents(negative_behaviour_0_12, 4, ., questions, choices, "label"), neg_behavior_0_12_num_4_pct = select_percents(negative_behaviour_0_12, 4, ., questions, choices, "percent"), neg_behavior_0_12_num_5_name = select_percents(negative_behaviour_0_12, 5, ., questions, choices, "label"), neg_behavior_0_12_num_5_pct = select_percents(negative_behaviour_0_12, 5, ., questions, choices, "percent"), neg_behavior_13_17_num_1_name = select_percents(negative_behaviour_13_17, 1, ., questions, choices, "label"), neg_behavior_13_17_num_1_pct = select_percents(negative_behaviour_13_17, 1, ., questions, choices, "percent"), neg_behavior_13_17_num_2_name = select_percents(negative_behaviour_13_17, 2, ., questions, choices, "label"), neg_behavior_13_17_num_2_pct = select_percents(negative_behaviour_13_17, 2, ., questions, choices, "percent"), neg_behavior_13_17_num_3_name = select_percents(negative_behaviour_13_17, 3, ., questions, choices, "label"), neg_behavior_13_17_num_3_pct = select_percents(negative_behaviour_13_17, 3, ., questions, choices, "percent"), neg_behavior_13_17_num_4_name = select_percents(negative_behaviour_13_17, 4, ., questions, choices, "label"), neg_behavior_13_17_num_4_pct = select_percents(negative_behaviour_13_17, 4, ., questions, choices, "percent"), neg_behavior_13_17_num_5_name = select_percents(negative_behaviour_13_17, 5, ., questions, choices, "label"), neg_behavior_13_17_num_5_pct = select_percents(negative_behaviour_13_17, 5, ., questions, choices, "percent"), returnee_child_male = weighted_sum(filter(., displacement_status == "returnee")[["nb_children_male_edu"]], filter(., displacement_status == "returnee"), x_name = "nb_children_male_edu"), returnee_enrolled_child_male = weighted_sum(filter(., displacement_status == "returnee")[["enrolled_school_male_6_14_edu"]], filter(., displacement_status == "returnee"), x_name = "enrolled_school_male_6_14_edu"), returnee_enrolled_child_male_pct = round(100 * returnee_enrolled_child_male / returnee_child_male, 0), returnee_attending_child_male = weighted_sum(filter(., displacement_status == "returnee")[["attended_school_male_6_14"]], filter(., displacement_status == "returnee"), x_name = "attended_school_male_6_14"), returnee_attending_child_male_pct = round(100 * returnee_attending_child_male / returnee_enrolled_child_male, 0), returnee_youth_male = weighted_sum(filter(., displacement_status == "returnee")[["nb_youth_male_edu"]], filter(., displacement_status == "returnee"), x_name = "nb_youth_male_edu"), returnee_enrolled_youth_male = weighted_sum(filter(., displacement_status == "returnee")[["enrolled_school_male_15_17_edu"]], filter(., displacement_status == "returnee"), x_name = "enrolled_school_male_15_17_edu"), returnee_enrolled_youth_male_pct = round(100 * returnee_enrolled_youth_male / returnee_youth_male, 0), returnee_attending_youth_male = weighted_sum(filter(., displacement_status == "returnee")[["attended_school_male_15_17"]], filter(., displacement_status == "returnee"), x_name = "attended_school_male_15_17"), returnee_attending_youth_male_pct = round(100 * returnee_attending_youth_male / returnee_enrolled_youth_male, 0), returnee_child_female = weighted_sum(filter(., displacement_status == "returnee")[["nb_children_female_edu"]], filter(., displacement_status == "returnee"), x_name = "nb_children_female_edu"), returnee_enrolled_child_female = weighted_sum(filter(., displacement_status == "returnee")[["enrolled_school_female_6_14_edu"]], filter(., displacement_status == "returnee"), x_name = "enrolled_school_female_6_14_edu"), returnee_enrolled_child_female_pct = round(100 * returnee_enrolled_child_female / returnee_child_female, 0), returnee_attending_child_female = weighted_sum(filter(., displacement_status == "returnee")[["attended_school_female_6_14"]], filter(., displacement_status == "returnee"), x_name = "attended_school_female_6_14"), returnee_attending_child_female_pct = round(100 * returnee_attending_child_female / returnee_enrolled_child_female, 0), returnee_youth_female = weighted_sum(filter(., displacement_status == "returnee")[["nb_youth_female_edu"]], filter(., displacement_status == "returnee"), x_name = "nb_youth_female_edu"), returnee_enrolled_youth_female = weighted_sum(filter(., displacement_status == "returnee")[["enrolled_school_female_15_17_edu"]], filter(., displacement_status == "returnee"), x_name = "enrolled_school_female_15_17_edu"), returnee_enrolled_youth_female_pct = round(100 * returnee_enrolled_youth_female / returnee_youth_female, 0), returnee_attending_youth_female = weighted_sum(filter(., displacement_status == "returnee")[["attended_school_female_15_17"]], filter(., displacement_status == "returnee"), x_name = "attended_school_female_15_17"), returnee_attending_youth_female_pct = round(100 * returnee_attending_youth_female / returnee_enrolled_youth_female, 0), idp_child_male = weighted_sum(filter(., displacement_status == "idp")[["nb_children_male_edu"]], filter(., displacement_status == "idp"), x_name = "nb_children_male_edu"), idp_enrolled_child_male = weighted_sum(filter(., displacement_status == "idp")[["enrolled_school_male_6_14_edu"]], filter(., displacement_status == "idp"), x_name = "enrolled_school_male_6_14_edu"), idp_enrolled_child_male_pct = round(100 * idp_enrolled_child_male / idp_child_male, 0), idp_attending_child_male = weighted_sum(filter(., displacement_status == "idp")[["attended_school_male_6_14"]], filter(., displacement_status == "idp"), x_name = "attended_school_male_6_14"), idp_attending_child_male_pct = round(100 * idp_attending_child_male / idp_enrolled_child_male, 0), idp_youth_male = weighted_sum(filter(., displacement_status == "idp")[["nb_youth_male_edu"]], filter(., displacement_status == "idp"), x_name = "nb_youth_male_edu"), idp_enrolled_youth_male = weighted_sum(filter(., displacement_status == "idp")[["enrolled_school_male_15_17_edu"]], filter(., displacement_status == "idp"), x_name = "enrolled_school_male_15_17_edu"), idp_enrolled_youth_male_pct = round(100 * idp_enrolled_youth_male / idp_youth_male, 0), idp_attending_youth_male = weighted_sum(filter(., displacement_status == "idp")[["attended_school_male_15_17"]], filter(., displacement_status == "idp"), x_name = "attended_school_male_15_17"), idp_attending_youth_male_pct = round(100 * idp_attending_youth_male / idp_enrolled_youth_male, 0), idp_child_female = weighted_sum(filter(., displacement_status == "idp")[["nb_children_female_edu"]], filter(., displacement_status == "idp"), x_name = "nb_children_female_edu"), idp_enrolled_child_female = weighted_sum(filter(., displacement_status == "idp")[["enrolled_school_female_6_14_edu"]], filter(., displacement_status == "idp"), x_name = "enrolled_school_female_6_14_edu"), idp_enrolled_child_female_pct = round(100 * idp_enrolled_child_female / idp_child_female, 0), idp_attending_child_female = weighted_sum(filter(., displacement_status == "idp")[["attended_school_female_6_14"]], filter(., displacement_status == "idp"), x_name = "attended_school_female_6_14"), idp_attending_child_female_pct = round(100 * idp_attending_child_female / idp_enrolled_child_female, 0), idp_youth_female = weighted_sum(filter(., displacement_status == "idp")[["nb_youth_female_edu"]], filter(., displacement_status == "idp"), x_name = "nb_youth_female_edu"), idp_enrolled_youth_female = weighted_sum(filter(., displacement_status == "idp")[["enrolled_school_female_15_17_edu"]], filter(., displacement_status == "idp"), x_name = "enrolled_school_female_15_17_edu"), idp_enrolled_youth_female_pct = round(100 * idp_enrolled_youth_female / idp_youth_female, 0), idp_attending_youth_female = weighted_sum(filter(., displacement_status == "idp")[["attended_school_female_15_17"]], filter(., displacement_status == "idp"), x_name = "attended_school_female_15_17"), idp_attending_youth_female_pct = round(100 * idp_attending_youth_female / idp_enrolled_youth_female, 0), non_displaced_child_male = weighted_sum(filter(., displacement_status == "non_displaced")[["nb_children_male_edu"]], filter(., displacement_status == "non_displaced"), x_name = "nb_children_male_edu"), non_displaced_enrolled_child_male = weighted_sum(filter(., displacement_status == "non_displaced")[["enrolled_school_male_6_14_edu"]], filter(., displacement_status == "non_displaced"), x_name = "enrolled_school_male_6_14_edu"), non_displaced_enrolled_child_male_pct = round(100 * non_displaced_enrolled_child_male / non_displaced_child_male, 0), non_displaced_attending_child_male = weighted_sum(filter(., displacement_status == "non_displaced")[["attended_school_male_6_14"]], filter(., displacement_status == "non_displaced"), x_name = "attended_school_male_6_14"), non_displaced_attending_child_male_pct = round(100 * non_displaced_attending_child_male / non_displaced_enrolled_child_male, 0), non_displaced_youth_male = weighted_sum(filter(., displacement_status == "non_displaced")[["nb_youth_male_edu"]], filter(., displacement_status == "non_displaced"), x_name = "nb_youth_male_edu"), non_displaced_enrolled_youth_male = weighted_sum(filter(., displacement_status == "non_displaced")[["enrolled_school_male_15_17_edu"]], filter(., displacement_status == "non_displaced"), x_name = "enrolled_school_male_15_17_edu"), non_displaced_enrolled_youth_male_pct = round(100 * non_displaced_enrolled_youth_male / non_displaced_youth_male, 0), non_displaced_attending_youth_male = weighted_sum(filter(., displacement_status == "non_displaced")[["attended_school_male_15_17"]], filter(., displacement_status == "non_displaced"), x_name = "attended_school_male_15_17"), non_displaced_attending_youth_male_pct = round(100 * non_displaced_attending_youth_male / non_displaced_enrolled_youth_male, 0), non_displaced_child_female = weighted_sum(filter(., displacement_status == "non_displaced")[["nb_children_female_edu"]], filter(., displacement_status == "non_displaced"), x_name = "nb_children_female_edu"), non_displaced_enrolled_child_female = weighted_sum(filter(., displacement_status == "non_displaced")[["enrolled_school_female_6_14_edu"]], filter(., displacement_status == "non_displaced"), x_name = "enrolled_school_female_6_14_edu"), non_displaced_enrolled_child_female_pct = round(100 * non_displaced_enrolled_child_female / non_displaced_child_female, 0), non_displaced_attending_child_female = weighted_sum(filter(., displacement_status == "non_displaced")[["attended_school_female_6_14"]], filter(., displacement_status == "non_displaced"), x_name = "attended_school_female_6_14"), non_displaced_attending_child_female_pct = round(100 * non_displaced_attending_child_female / non_displaced_enrolled_child_female, 0), non_displaced_youth_female = weighted_sum(filter(., displacement_status == "non_displaced")[["nb_youth_female_edu"]], filter(., displacement_status == "non_displaced"), x_name = "nb_youth_female_edu"), non_displaced_enrolled_youth_female = weighted_sum(filter(., displacement_status == "non_displaced")[["enrolled_school_female_15_17_edu"]], filter(., displacement_status == "non_displaced"), x_name = "enrolled_school_female_15_17_edu"), non_displaced_enrolled_youth_female_pct = round(100 * non_displaced_enrolled_youth_female / non_displaced_youth_female, 0), non_displaced_attending_youth_female = weighted_sum(filter(., displacement_status == "non_displaced")[["attended_school_female_15_17"]], filter(., displacement_status == "non_displaced"), x_name = "attended_school_female_15_17"), non_displaced_attending_youth_female_pct = round(100 * non_displaced_attending_youth_female / non_displaced_enrolled_youth_female, 0), attendance_issue_1_name = select_percents(issues_faced_attending_school, 1, ., questions, choices, "label"), attendance_issue_1_pct = select_percents(issues_faced_attending_school, 1, ., questions, choices, "percent"), attendance_issue_2_name = select_percents(issues_faced_attending_school, 2, ., questions, choices, "label"), attendance_issue_2_pct = select_percents(issues_faced_attending_school, 2, ., questions, choices, "percent"), attendance_issue_3_name = select_percents(issues_faced_attending_school, 3, ., questions, choices, "label"), attendance_issue_3_pct = select_percents(issues_faced_attending_school, 3, ., questions, choices, "percent"), no_attendance_reason_1_name = select_percents(droppedout_school_reasons, 1, ., questions, choices, "label"), no_attendance_reason_1_pct = select_percents(droppedout_school_reasons, 1, ., questions, choices, "percent"), no_attendance_reason_2_name = select_percents(droppedout_school_reasons, 2, ., questions, choices, "label"), no_attendance_reason_2_pct = select_percents(droppedout_school_reasons, 2, ., questions, choices, "percent"), no_attendance_reason_3_name = select_percents(droppedout_school_reasons, 3, ., questions, choices, "label"), no_attendance_reason_3_pct = select_percents(droppedout_school_reasons, 3, ., questions, choices, "percent"), no_attendance_reason_4_name = select_percents(droppedout_school_reasons, 4, ., questions, choices, "label"), no_attendance_reason_4_pct = select_percents(droppedout_school_reasons, 4, ., questions, choices, "percent"), no_attendance_reason_5_name = select_percents(droppedout_school_reasons, 5, ., questions, choices, "label"), no_attendance_reason_5_pct = select_percents(droppedout_school_reasons, 5, ., questions, choices, "percent"), returnee_nonformal_school = percent_response(filter(., displacement_status == "returnee")[["nonformal_education"]], filter(., displacement_status == "returnee"), "yes_nonformal_classes_byschool", x_name = "nonformal_education"), returnee_nonformal_community = percent_response(filter(., displacement_status == "returnee")[["nonformal_education"]], filter(., displacement_status == "returnee"), "yes_nonformal_classes_bycommunity", x_name = "nonformal_education"), returnee_nonformal_priv_bus = percent_response(filter(., displacement_status == "returnee")[["nonformal_education"]], filter(., displacement_status == "returnee"), "yes_nonformal_privatebusiness", x_name = "nonformal_education"), returnee_nonformal_self = percent_response(filter(., displacement_status == "returnee")[["nonformal_education"]], filter(., displacement_status == "returnee"), "yes_selflearning", x_name = "nonformal_education"), returnee_nonformal_ngo = percent_response(filter(., displacement_status == "returnee")[["nonformal_education"]], filter(., displacement_status == "returnee"), "yes_assistance_ngos", x_name = "nonformal_education"), returnee_nonformal_other = percent_response(filter(., displacement_status == "returnee")[["nonformal_education"]], filter(., displacement_status == "returnee"), "other", x_name = "nonformal_education"), returnee_nonformal_total = returnee_nonformal_ngo + returnee_nonformal_community + returnee_nonformal_priv_bus + returnee_nonformal_school + returnee_nonformal_self + returnee_nonformal_other, idp_nonformal_school = percent_response(filter(., displacement_status == "idp")[["nonformal_education"]], filter(., displacement_status == "idp"), "yes_nonformal_classes_byschool", x_name = "nonformal_education"), idp_nonformal_community = percent_response(filter(., displacement_status == "idp")[["nonformal_education"]], filter(., displacement_status == "idp"), "yes_nonformal_classes_bycommunity", x_name = "nonformal_education"), idp_nonformal_priv_bus = percent_response(filter(., displacement_status == "idp")[["nonformal_education"]], filter(., displacement_status == "idp"), "yes_nonformal_privatebusiness", x_name = "nonformal_education"), idp_nonformal_self = percent_response(filter(., displacement_status == "idp")[["nonformal_education"]], filter(., displacement_status == "idp"), "yes_selflearning", x_name = "nonformal_education"), idp_nonformal_ngo = percent_response(filter(., displacement_status == "idp")[["nonformal_education"]], filter(., displacement_status == "idp"), "yes_assistance_ngos", x_name = "nonformal_education"), idp_nonformal_other = percent_response(filter(., displacement_status == "idp")[["nonformal_education"]], filter(., displacement_status == "idp"), "other", x_name = "nonformal_education"), idp_nonformal_total = idp_nonformal_ngo + idp_nonformal_community + idp_nonformal_priv_bus + idp_nonformal_school + idp_nonformal_self + idp_nonformal_other, non_displaced_nonformal_school = percent_response(filter(., displacement_status == "non_displaced")[["nonformal_education"]], filter(., displacement_status == "non_displaced"), "yes_nonformal_classes_byschool", x_name = "nonformal_education"), non_displaced_nonformal_community = percent_response(filter(., displacement_status == "non_displaced")[["nonformal_education"]], filter(., displacement_status == "non_displaced"), "yes_nonformal_classes_bycommunity", x_name = "nonformal_education"), non_displaced_nonformal_priv_bus = percent_response(filter(., displacement_status == "non_displaced")[["nonformal_education"]], filter(., displacement_status == "non_displaced"), "yes_nonformal_privatebusiness", x_name = "nonformal_education"), non_displaced_nonformal_self = percent_response(filter(., displacement_status == "non_displaced")[["nonformal_education"]], filter(., displacement_status == "non_displaced"), "yes_selflearning", x_name = "nonformal_education"), non_displaced_nonformal_ngo = percent_response(filter(., displacement_status == "non_displaced")[["nonformal_education"]], filter(., displacement_status == "non_displaced"), "yes_assistance_ngos", x_name = "nonformal_education"), non_displaced_nonformal_other = percent_response(filter(., displacement_status == "non_displaced")[["nonformal_education"]], filter(., displacement_status == "non_displaced"), "other", x_name = "nonformal_education"), non_displaced_nonformal_total = non_displaced_nonformal_ngo + non_displaced_nonformal_community + non_displaced_nonformal_priv_bus + non_displaced_nonformal_school + non_displaced_nonformal_self + non_displaced_nonformal_other, returnee_document_need_1_name = select_percents(filter(., displacement_status == "returnee")[["document_needs"]], 1, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "document_needs"), returnee_document_need_1_pct = select_percents(filter(., displacement_status == "returnee")[["document_needs"]], 1, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "document_needs"), returnee_document_need_2_name = select_percents(filter(., displacement_status == "returnee")[["document_needs"]], 2, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "document_needs"), returnee_document_need_2_pct = select_percents(filter(., displacement_status == "returnee")[["document_needs"]], 2, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "document_needs"), returnee_document_need_3_name = select_percents(filter(., displacement_status == "returnee")[["document_needs"]], 3, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "document_needs"), returnee_document_need_3_pct = select_percents(filter(., displacement_status == "returnee")[["document_needs"]], 3, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "document_needs"), idp_document_need_1_name = select_percents(filter(., displacement_status == "idp")[["document_needs"]], 1, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "document_needs"), idp_document_need_1_pct = select_percents(filter(., displacement_status == "idp")[["document_needs"]], 1, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "document_needs"), idp_document_need_2_name = select_percents(filter(., displacement_status == "idp")[["document_needs"]], 2, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "document_needs"), idp_document_need_2_pct = select_percents(filter(., displacement_status == "idp")[["document_needs"]], 2, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "document_needs"), idp_document_need_3_name = select_percents(filter(., displacement_status == "idp")[["document_needs"]], 3, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "document_needs"), idp_document_need_3_pct = select_percents(filter(., displacement_status == "idp")[["document_needs"]], 3, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "document_needs"), non_displaced_document_need_1_name = select_percents(filter(., displacement_status == "non_displaced")[["document_needs"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "document_needs"), non_displaced_document_need_1_pct = select_percents(filter(., displacement_status == "non_displaced")[["document_needs"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "document_needs"), non_displaced_document_need_2_name = select_percents(filter(., displacement_status == "non_displaced")[["document_needs"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "document_needs"), non_displaced_document_need_2_pct = select_percents(filter(., displacement_status == "non_displaced")[["document_needs"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "document_needs"), non_displaced_document_need_3_name = select_percents(filter(., displacement_status == "non_displaced")[["document_needs"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "document_needs"), non_displaced_document_need_3_pct = select_percents(filter(., displacement_status == "non_displaced")[["document_needs"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "document_needs"), returnee_doc_loss_impact_1_name = select_percents(filter(., displacement_status == "returnee")[["losing_legal_docs_impact"]], 1, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "losing_legal_docs_impact"), returnee_doc_loss_impact_1_pct = select_percents(filter(., displacement_status == "returnee")[["losing_legal_docs_impact"]], 1, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), returnee_doc_loss_impact_2_name = select_percents(filter(., displacement_status == "returnee")[["losing_legal_docs_impact"]], 2, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "losing_legal_docs_impact"), returnee_doc_loss_impact_2_pct = select_percents(filter(., displacement_status == "returnee")[["losing_legal_docs_impact"]], 2, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), returnee_doc_loss_impact_3_name = select_percents(filter(., displacement_status == "returnee")[["losing_legal_docs_impact"]], 3, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "losing_legal_docs_impact"), returnee_doc_loss_impact_3_pct = select_percents(filter(., displacement_status == "returnee")[["losing_legal_docs_impact"]], 3, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), idp_doc_loss_impact_1_name = select_percents(filter(., displacement_status == "idp")[["losing_legal_docs_impact"]], 1, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "losing_legal_docs_impact"), idp_doc_loss_impact_1_pct = select_percents(filter(., displacement_status == "idp")[["losing_legal_docs_impact"]], 1, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), idp_doc_loss_impact_2_name = select_percents(filter(., displacement_status == "idp")[["losing_legal_docs_impact"]], 2, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "losing_legal_docs_impact"), idp_doc_loss_impact_2_pct = select_percents(filter(., displacement_status == "idp")[["losing_legal_docs_impact"]], 2, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), idp_doc_loss_impact_3_name = select_percents(filter(., displacement_status == "idp")[["losing_legal_docs_impact"]], 3, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "losing_legal_docs_impact"), idp_doc_loss_impact_3_pct = select_percents(filter(., displacement_status == "idp")[["losing_legal_docs_impact"]], 3, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), non_displaced_doc_loss_impact_1_name = select_percents(filter(., displacement_status == "non_displaced")[["losing_legal_docs_impact"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "losing_legal_docs_impact"), non_displaced_doc_loss_impact_1_pct = select_percents(filter(., displacement_status == "non_displaced")[["losing_legal_docs_impact"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), non_displaced_doc_loss_impact_2_name = select_percents(filter(., displacement_status == "non_displaced")[["losing_legal_docs_impact"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "losing_legal_docs_impact"), non_displaced_doc_loss_impact_2_pct = select_percents(filter(., displacement_status == "non_displaced")[["losing_legal_docs_impact"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), non_displaced_doc_loss_impact_3_name = select_percents(filter(., displacement_status == "non_displaced")[["losing_legal_docs_impact"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "losing_legal_docs_impact"), non_displaced_doc_loss_impact_3_pct = select_percents(filter(., displacement_status == "non_displaced")[["losing_legal_docs_impact"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), aware_explosives = percent_response(presence_explosive_hazards, ., "yes"), explosive_action_1_name = select_percents(presence_explosive_hazards_actions, 1, ., questions, choices, "label"), explosive_action_1_pct = select_percents(presence_explosive_hazards_actions, 1, ., questions, choices, "percent"), explosive_action_2_name = select_percents(presence_explosive_hazards_actions, 2, ., questions, choices, "label"), explosive_action_2_pct = select_percents(presence_explosive_hazards_actions, 2, ., questions, choices, "percent"), explosive_action_3_name = select_percents(presence_explosive_hazards_actions, 3, ., questions, choices, "label"), explosive_action_3_pct = select_percents(presence_explosive_hazards_actions, 3, ., questions, choices, "percent"), explosive_action_3_other_pct = 100 - explosive_action_1_pct - explosive_action_2_pct - explosive_action_3_pct, no_explosive_awareness = percent_response(explosive_hazards_risk_awareness, ., "no"), member_killed_injured = percent_response(explosive_hazards_injured, ., "yes_adult_injured", "yes_adult_killes", "yes_children_injured", "yes_children_killed"), missing_members = percent_response(missing_family_members, ., "yes"), movement_restrictions = percent_response(movement_restrictions, ., "yes"), returnee_priority_need_1_name = select_percents(filter(., displacement_status == "returnee")[["priority_needs"]], 1, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "priority_needs"), returnee_priority_need_1_pct = select_percents(filter(., displacement_status == "returnee")[["priority_needs"]], 2, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "priority_needs"), returnee_priority_need_2_name = select_percents(filter(., displacement_status == "returnee")[["priority_needs"]], 1, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "priority_needs"), returnee_priority_need_2_pct = select_percents(filter(., displacement_status == "returnee")[["priority_needs"]], 2, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "priority_needs"), returnee_priority_need_3_name = select_percents(filter(., displacement_status == "returnee")[["priority_needs"]], 3, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "priority_needs"), returnee_priority_need_3_pct = select_percents(filter(., displacement_status == "returnee")[["priority_needs"]], 3, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "priority_needs"), idp_priority_need_1_name = select_percents(filter(., displacement_status == "idp")[["priority_needs"]], 1, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "priority_needs"), idp_priority_need_1_pct = select_percents(filter(., displacement_status == "idp")[["priority_needs"]], 2, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "priority_needs"), idp_priority_need_2_name = select_percents(filter(., displacement_status == "idp")[["priority_needs"]], 1, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "priority_needs"), idp_priority_need_2_pct = select_percents(filter(., displacement_status == "idp")[["priority_needs"]], 2, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "priority_needs"), idp_priority_need_3_name = select_percents(filter(., displacement_status == "idp")[["priority_needs"]], 3, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "priority_needs"), idp_priority_need_3_pct = select_percents(filter(., displacement_status == "idp")[["priority_needs"]], 3, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "priority_needs"), non_displaced_priority_need_1_name = select_percents(filter(., displacement_status == "non_displaced")[["priority_needs"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "priority_needs"), non_displaced_priority_need_1_pct = select_percents(filter(., displacement_status == "non_displaced")[["priority_needs"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "priority_needs"), non_displaced_priority_need_2_name = select_percents(filter(., displacement_status == "non_displaced")[["priority_needs"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "priority_needs"), non_displaced_priority_need_2_pct = select_percents(filter(., displacement_status == "non_displaced")[["priority_needs"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "priority_needs"), non_displaced_priority_need_3_name = select_percents(filter(., displacement_status == "non_displaced")[["priority_needs"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "priority_needs"), non_displaced_priority_need_3_pct = select_percents(filter(., displacement_status == "non_displaced")[["priority_needs"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "priority_needs"), received_assistance = percent_response(received_humanitarian_assistance, ., "yes"), rec_ass_type_1_name = select_percents(modality_assistance_received, 1, ., questions, choices, "label"), rec_ass_type_1_pct = select_percents(modality_assistance_received, 1, ., questions, choices, "percent"), rec_ass_type_2_name = select_percents(modality_assistance_received, 2, ., questions, choices, "label"), rec_ass_type_2_pct = select_percents(modality_assistance_received, 2, ., questions, choices, "percent"), rec_ass_type_3_name = select_percents(modality_assistance_received, 3, ., questions, choices, "label"), rec_ass_type_3_pct = select_percents(modality_assistance_received, 3, ., questions, choices, "percent"), rec_ass_type_4_name = select_percents(modality_assistance_received, 4, ., questions, choices, "label"), rec_ass_type_4_pct = select_percents(modality_assistance_received, 4, ., questions, choices, "percent"), rec_ass_type_5_name = select_percents(modality_assistance_received, 5, ., questions, choices, "label"), rec_ass_type_5_pct = select_percents(modality_assistance_received, 5, ., questions, choices, "percent"), assistance_satisfied = percent_response(humanitarian_assistance_satisfaction, ., "yes"), returnee_ass_barrier_1_name = select_percents(filter(., displacement_status == "returnee")[["humanitarian_assistance_barriers"]], 1, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), returnee_ass_barrier_1_pct = select_percents(filter(., displacement_status == "returnee")[["humanitarian_assistance_barriers"]], 1, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), returnee_ass_barrier_2_name = select_percents(filter(., displacement_status == "returnee")[["humanitarian_assistance_barriers"]], 2, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), returnee_ass_barrier_2_pct = select_percents(filter(., displacement_status == "returnee")[["humanitarian_assistance_barriers"]], 2, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), returnee_ass_barrier_3_name = select_percents(filter(., displacement_status == "returnee")[["humanitarian_assistance_barriers"]], 3, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), returnee_ass_barrier_3_pct = select_percents(filter(., displacement_status == "returnee")[["humanitarian_assistance_barriers"]], 3, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), idp_ass_barrier_1_name = select_percents(filter(., displacement_status == "idp")[["humanitarian_assistance_barriers"]], 1, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), idp_ass_barrier_1_pct = select_percents(filter(., displacement_status == "idp")[["humanitarian_assistance_barriers"]], 1, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), idp_ass_barrier_2_name = select_percents(filter(., displacement_status == "idp")[["humanitarian_assistance_barriers"]], 2, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), idp_ass_barrier_2_pct = select_percents(filter(., displacement_status == "idp")[["humanitarian_assistance_barriers"]], 2, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), idp_ass_barrier_3_name = select_percents(filter(., displacement_status == "idp")[["humanitarian_assistance_barriers"]], 3, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), idp_ass_barrier_3_pct = select_percents(filter(., displacement_status == "idp")[["humanitarian_assistance_barriers"]], 3, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), non_displaced_ass_barrier_1_name = select_percents(filter(., displacement_status == "non_displaced")[["humanitarian_assistance_barriers"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), non_displaced_ass_barrier_1_pct = select_percents(filter(., displacement_status == "non_displaced")[["humanitarian_assistance_barriers"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), non_displaced_ass_barrier_2_name = select_percents(filter(., displacement_status == "non_displaced")[["humanitarian_assistance_barriers"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), non_displaced_ass_barrier_2_pct = select_percents(filter(., displacement_status == "non_displaced")[["humanitarian_assistance_barriers"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), non_displaced_ass_barrier_3_name = select_percents(filter(., displacement_status == "non_displaced")[["humanitarian_assistance_barriers"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), non_displaced_ass_barrier_3_pct = select_percents(filter(., displacement_status == "non_displaced")[["humanitarian_assistance_barriers"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), returnee_prefer_ass_1_name = select_percents(filter(., displacement_status == "returnee")[["preferred_assistance_modality"]], 1, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "preferred_assistance_modality"), returnee_prefer_ass_1_pct = select_percents(filter(., displacement_status == "returnee")[["preferred_assistance_modality"]], 1, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "preferred_assistance_modality"), returnee_prefer_ass_2_name = select_percents(filter(., displacement_status == "returnee")[["preferred_assistance_modality"]], 2, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "preferred_assistance_modality"), returnee_prefer_ass_2_pct = select_percents(filter(., displacement_status == "returnee")[["preferred_assistance_modality"]], 2, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "preferred_assistance_modality"), returnee_prefer_ass_3_name = select_percents(filter(., displacement_status == "returnee")[["preferred_assistance_modality"]], 3, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "preferred_assistance_modality"), returnee_prefer_ass_3_pct = select_percents(filter(., displacement_status == "returnee")[["preferred_assistance_modality"]], 3, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "preferred_assistance_modality"), idp_prefer_ass_1_name = select_percents(filter(., displacement_status == "idp")[["preferred_assistance_modality"]], 1, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "preferred_assistance_modality"), idp_prefer_ass_1_pct = select_percents(filter(., displacement_status == "idp")[["preferred_assistance_modality"]], 1, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "preferred_assistance_modality"), idp_prefer_ass_2_name = select_percents(filter(., displacement_status == "idp")[["preferred_assistance_modality"]], 2, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "preferred_assistance_modality"), idp_prefer_ass_2_pct = select_percents(filter(., displacement_status == "idp")[["preferred_assistance_modality"]], 2, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "preferred_assistance_modality"), idp_prefer_ass_3_name = select_percents(filter(., displacement_status == "idp")[["preferred_assistance_modality"]], 3, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "preferred_assistance_modality"), idp_prefer_ass_3_pct = select_percents(filter(., displacement_status == "idp")[["preferred_assistance_modality"]], 3, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "preferred_assistance_modality"), non_displaced_prefer_ass_1_name = select_percents(filter(., displacement_status == "non_displaced")[["preferred_assistance_modality"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "preferred_assistance_modality"), non_displaced_prefer_ass_1_pct = select_percents(filter(., displacement_status == "non_displaced")[["preferred_assistance_modality"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "preferred_assistance_modality"), non_displaced_prefer_ass_2_name = select_percents(filter(., displacement_status == "non_displaced")[["preferred_assistance_modality"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "preferred_assistance_modality"), non_displaced_prefer_ass_2_pct = select_percents(filter(., displacement_status == "non_displaced")[["preferred_assistance_modality"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "preferred_assistance_modality"), non_displaced_prefer_ass_3_name = select_percents(filter(., displacement_status == "non_displaced")[["preferred_assistance_modality"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "preferred_assistance_modality"), non_displaced_prefer_ass_3_pct = select_percents(filter(., displacement_status == "non_displaced")[["preferred_assistance_modality"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "preferred_assistance_modality")) %>% mutate(returnee_adult_perm_job = round(100 * returnee_adult_perm_job / returnee_adults, 1), returnee_adult_temp_job = round(100 * returnee_adult_temp_job / returnee_adults, 1), returnee_adult_daily_labour = round(100 * returnee_adult_daily_labour / returnee_adults, 1), returnee_adult_gvt_payroll = round(100 * returnee_adult_gvt_payroll / returnee_adults, 1), non_displaced_adult_perm_job = round(100 * non_displaced_adult_perm_job / non_displaced_adults, 1), non_displaced_adult_temp_job = round(100 * non_displaced_adult_temp_job / non_displaced_adults, 1), non_displaced_adult_daily_labour = round(100 * non_displaced_adult_daily_labour / non_displaced_adults, 1), non_displaced_adult_gvt_payroll = round(100 * non_displaced_adult_gvt_payroll / non_displaced_adults, 1), idp_adult_perm_job = round(100 * idp_adult_perm_job / idp_adults, 1), idp_adult_temp_job = round(100 * idp_adult_temp_job / idp_adults, 1), idp_adult_daily_labour = round(100 * idp_adult_daily_labour / idp_adults, 1), idp_adult_gvt_payroll = round(100 * idp_adult_gvt_payroll / idp_adults, 1), returnee_minor_perm_job = round(100 * returnee_minor_perm_job / returnee_minors, 1), returnee_minor_temp_job = round(100 * returnee_minor_temp_job / returnee_minors, 1), returnee_minor_daily_labour = round(100 * returnee_minor_daily_labour / returnee_minors, 1), idp_minor_perm_job = round(100 * idp_minor_perm_job / idp_minors, 1), idp_minor_temp_job = round(100 * idp_minor_temp_job / idp_minors, 1), idp_minor_daily_labour = round(100 * idp_minor_daily_labour / idp_minors, 1), non_displaced_minor_perm_job = round(100 * non_displaced_minor_perm_job / non_displaced_minors, 1), non_displaced_minor_temp_job = round(100 * non_displaced_minor_temp_job / non_displaced_minors, 1), non_displaced_minor_daily_labour = round(100 * non_displaced_minor_daily_labour / non_displaced_minors, 1)) %>% select(-returnee_adults, -idp_adults, -non_displaced_adults, -returnee_minors, -idp_minors, -non_displaced_minors, -total_assistance, -returnee_child_male, -returnee_enrolled_child_male, -returnee_attending_child_male, -returnee_youth_male, -returnee_enrolled_youth_male, -returnee_attending_youth_male, -returnee_child_female, -returnee_enrolled_child_female, -returnee_attending_child_female, -returnee_youth_female, -returnee_enrolled_youth_female, -returnee_attending_youth_female, -idp_child_male, -idp_enrolled_child_male, -idp_attending_child_male, -idp_youth_male, -idp_enrolled_youth_male, -idp_attending_youth_male, -idp_child_female, -idp_enrolled_child_female, -idp_attending_child_female, -idp_youth_female, -idp_enrolled_youth_female, -idp_attending_youth_female, -non_displaced_child_male, -non_displaced_enrolled_child_male, -non_displaced_attending_child_male, -non_displaced_youth_male, -non_displaced_enrolled_youth_male, -non_displaced_attending_youth_male, -non_displaced_child_female, -non_displaced_enrolled_child_female, -non_displaced_attending_child_female, -non_displaced_youth_female, -non_displaced_enrolled_youth_female, -non_displaced_attending_youth_female, -total_people, -total_working_male, -total_working_female) # we will analyze mantika data for the main FS separately, then merge them mantika_shelter_damage <- dm_data %>% group_by(mantika_label) %>% summarize(no_shelter_damage = percent_response(shelter_condition, ., "no_negligible_damage", group = !!get_group(.)), light_shelter_damage = percent_response(shelter_condition, ., "light_damage", group = !!get_group(.)), medium_shelter_damage = percent_response(shelter_condition, ., "medium_damage", group = !!get_group(.)), heavy_shelter_damage = percent_response(shelter_condition, ., "heavy_damage", group = !!get_group(.)), shelter_destroyed = percent_response(shelter_condition, ., "shelter_destroyed", group = !!get_group(.))) %>% big_spread(mantika_label, c(no_shelter_damage, light_shelter_damage, medium_shelter_damage, heavy_shelter_damage, shelter_destroyed)) %>% rename_all(tolower) %>% rename_all(str_replace_all, " ", "_") mantika_public_network <- dm_data %>% group_by(mantika_label) %>% summarize(power_cuts = weighted_median(power_cuts, ., group = !!get_group(.))) %>% spread(mantika_label, power_cuts) %>% rename_all(tolower) %>% rename_all(str_replace_all, " ", "_") %>% rename_all(paste0, "_power_cuts") mantika_water_distance <- dm_data %>% group_by(mantika_label) %>% summarize(water_dist_100m = percent_response(distance_drinkingwater, ., "upto_100m", group = !!get_group(.)), water_dist_200m = percent_response(distance_drinkingwater, ., "between_101_200m", group = !!get_group(.)), water_dist_300m = percent_response(distance_drinkingwater, ., "between_201_300m", group = !!get_group(.)), water_dist_400m = percent_response(distance_drinkingwater, ., "between_301_400m", group = !!get_group(.)), water_dist_500m = percent_response(distance_drinkingwater, ., "between_401_500m", group = !!get_group(.)), water_dist_more_500m = percent_response(distance_drinkingwater, ., "more_500m", group = !!get_group(.))) %>% big_spread(mantika_label, c(water_dist_100m, water_dist_200m, water_dist_300m, water_dist_400m, water_dist_500m, water_dist_more_500m)) %>% rename_all(tolower) %>% rename_all(str_replace_all, " ", "_") mantika_explosives_awareness <- dm_data %>% group_by(mantika_label) %>% summarize(aware_explosives = percent_response(presence_explosive_hazards, ., "yes", group = !!get_group(.))) %>% spread(mantika_label, aware_explosives) %>% rename_all(tolower) %>% rename_all(str_replace_all, " ", "_") %>% rename_all(paste0, "_aware_explosives") mantika_water_insufficient <- dm_data %>% group_by(mantika_label) %>% summarize(sufficient_water = percent_response(unsufficient_quantity_water, ., "yes", group = !!get_group(.))) %>% arrange(desc(sufficient_water)) %>% slice(1:5) %>% t mantika_water_insufficient_row <- tibble( insufficient_water_mantika_1_name = mantika_water_insufficient[1,1], insufficient_water_mantika_1_pct = mantika_water_insufficient[2,1], insufficient_water_mantika_2_name = mantika_water_insufficient[1,2], insufficient_water_mantika_2_pct = mantika_water_insufficient[2,2], insufficient_water_mantika_3_name = mantika_water_insufficient[1,3], insufficient_water_mantika_3_pct = mantika_water_insufficient[2,3], insufficient_water_mantika_4_name = mantika_water_insufficient[1,4], insufficient_water_mantika_4_pct = mantika_water_insufficient[2,4], insufficient_water_mantika_5_name = mantika_water_insufficient[1,5], insufficient_water_mantika_5_pct = mantika_water_insufficient[2,5] ) mantika_movement_restrictions <- dm_data %>% group_by(mantika_label) %>% summarize(movement_restrictions = percent_response(movement_restrictions, ., "yes", group = !!get_group(.))) %>% arrange(desc(movement_restrictions)) %>% slice(1:3) %>% t mantika_movement_restrictions_row <- tibble( movement_restriction_mantika_1_name = mantika_movement_restrictions[1,1], movement_restriction_mantika_1_pct = mantika_movement_restrictions[2,1], movement_restriction_mantika_2_name = mantika_movement_restrictions[1,2], movement_restriction_mantika_2_pct = mantika_movement_restrictions[2,2], movement_restriction_mantika_3_name = mantika_movement_restrictions[1,3], movement_restriction_mantika_3_pct = mantika_movement_restrictions[2,3] ) ## now need to merge this data with the other mantika data dm_file <- bind_cols(dm_file, mantika_shelter_damage, mantika_public_network, mantika_water_distance, mantika_explosives_awareness, mantika_water_insufficient_row, mantika_movement_restrictions_row) write_csv(dm_file, "output/sector_data_merge.csv")
/code/backup/data_merge.R
no_license
hedibmustapha/lby_msna2019
R
false
false
183,267
r
# Ready to summarize all our data # PLEASE! Remember to source the data, questionnaire, choices, and weights from preliminary_weighted_analysis.R source("code/data_merge_prep.R") dm_file <- dm_data %>% summarize(age_hoh = weighted_median(age_hoh, .), male_hoh = percent_response(sex_hoh, ., "male"), female_hoh = percent_response(sex_hoh, ., "female"), total_people = weighted_sum(size_hh, .), size_hh = weighted_median(size_hh, .), infants_female = round( 100 * (weighted_sum(nb_infants_female, .) / total_people), 0), children_female = round( 100 * (weighted_sum(nb_children_female, .) / total_people), 0), youth_female = round( 100 * (weighted_sum(nb_youth_female, .) / total_people), 0), adults_female = round( 100 * (weighted_sum(nb_adults_female, .) / total_people), 0), elderly_female = round( 100 * (weighted_sum(nb_elderly_female, .) / total_people), 0), infants_male = round( 100 * (weighted_sum(nb_infants_male, .) / total_people), 0), children_male = round( 100 * (weighted_sum(nb_children_male, .) / total_people), 0), youth_male = round( 100 * (weighted_sum(nb_youth_male, .) / total_people), 0), adults_male = round( 100 * (weighted_sum(nb_adults_male, .) / total_people), 0), elderly_male = round( 100 * (weighted_sum(nb_elderly_male, .) / total_people), 0), displ_2011 = percent_response(initially_displaced_year, ., "displaced_2011"), displ_2012 = percent_response(initially_displaced_year, ., "displaced_2012"), displ_2013 = percent_response(initially_displaced_year, ., "displaced_2013"), displ_2014 = percent_response(initially_displaced_year, ., "displaced_2014"), displ_2015 = percent_response(initially_displaced_year, ., "displaced_2015"), displ_2016 = percent_response(initially_displaced_year, ., "displaced_2016"), displ_2017 = percent_response(initially_displaced_year, ., "displaced_2017"), displ_2018 = percent_response(initially_displaced_year, ., "displaced_2018"), displ_2019 = percent_response(initially_displaced_year, ., "displaced_2019"), return_2011 = percent_response(returned_year, ., "displaced_2011"), return_2012 = percent_response(returned_year, ., "displaced_2012"), return_2013 = percent_response(returned_year, ., "displaced_2013"), return_2014 = percent_response(returned_year, ., "displaced_2014"), return_2015 = percent_response(returned_year, ., "displaced_2015"), return_2016 = percent_response(returned_year, ., "displaced_2016"), return_2017 = percent_response(returned_year, ., "displaced_2017"), return_2018 = percent_response(returned_year, ., "displaced_2018"), return_2019 = percent_response(returned_year, ., "displaced_2019"), displaced_once = num_percent_response(times_displaced_since_2011, ., 1), displaced_twice = num_percent_response(times_displaced_since_2011, ., 2), displaced_thrice = num_percent_response(times_displaced_since_2011, ., 3), displaced_four_plus = num_percent_response(times_displaced_since_2011, ., 4:max(times_displaced_since_2011, na.rm = T)), push_factor_1_name = select_percents(push_factors, 1, ., questions, choices, "label"), push_factor_1_pct = select_percents(push_factors, 1, ., questions, choices, "percent"), push_factor_2_name = select_percents(push_factors, 2, ., questions, choices, "label"), push_factor_2_pct = select_percents(push_factors, 2, ., questions, choices, "percent"), push_factor_3_name = select_percents(push_factors, 3, ., questions, choices, "label"), push_factor_3_pct = select_percents(push_factors, 3, ., questions, choices, "percent"), no_return_1_name = select_percents(didnt_return_home_reasons, 1, ., questions, choices, "label"), no_return_1_pct = select_percents(didnt_return_home_reasons, 1, ., questions, choices, "percent"), no_return_2_name = select_percents(didnt_return_home_reasons, 2, ., questions, choices, "label"), no_return_2_pct = select_percents(didnt_return_home_reasons, 2, ., questions, choices, "percent"), no_return_3_name = select_percents(didnt_return_home_reasons, 3, ., questions, choices, "label"), no_return_3_pct = select_percents(didnt_return_home_reasons, 3, ., questions, choices, "percent"), returnee_incident_1_name = select_percents(returnee_issues, 1, ., questions, choices, "label"), returnee_incident_1_pct = select_percents(returnee_issues, 1, ., questions, choices, "percent"), returnee_incident_2_name = select_percents(returnee_issues, 2, ., questions, choices, "label"), returnee_incident_2_pct = select_percents(returnee_issues, 2, ., questions, choices, "percent"), returnee_incident_3_name = select_percents(returnee_issues, 3, ., questions, choices, "label"), returnee_incident_3_pct = select_percents(returnee_issues, 3, ., questions, choices, "percent"), returnee_fcs_poor = percent_response(filter(., displacement_status == "returnee")[["fcs_category"]], filter(., displacement_status == "returnee"), "poor", x_name = "fcs_category"), returnee_fcs_borderline = percent_response(filter(., displacement_status == "returnee")[["fcs_category"]], filter(., displacement_status == "returnee"), "borderline", x_name = "fcs_category"), returnee_fcs_acceptable = percent_response(filter(., displacement_status == "returnee")[["fcs_category"]], filter(., displacement_status == "returnee"), "acceptable", x_name = "fcs_category"), idp_fcs_poor = percent_response(filter(., displacement_status == "idp")[["fcs_category"]], filter(., displacement_status == "idp"), "poor", x_name = "fcs_category"), idp_fcs_borderline = percent_response(filter(., displacement_status == "idp")[["fcs_category"]], filter(., displacement_status == "idp"), "borderline", x_name = "fcs_category"), idp_fcs_acceptable = percent_response(filter(., displacement_status == "idp")[["fcs_category"]], filter(., displacement_status == "idp"), "acceptable", x_name = "fcs_category"), non_displaced_fcs_poor = percent_response(filter(., displacement_status == "non_displaced")[["fcs_category"]], filter(., displacement_status == "non_displaced"), "poor", x_name = "fcs_category"), non_displaced_fcs_borderline = percent_response(filter(., displacement_status == "non_displaced")[["fcs_category"]], filter(., displacement_status == "non_displaced"), "borderline", x_name = "fcs_category"), non_displaced_fcs_acceptable = percent_response(filter(., displacement_status == "non_displaced")[["fcs_category"]], filter(., displacement_status == "non_displaced"), "acceptable", x_name = "fcs_category"), returnee_rcsi_low = percent_response(filter(., displacement_status == "returnee")[["rcsi_category"]], filter(., displacement_status == "returnee"), "low", x_name = "rcsi_category"), returnee_rcsi_medium = percent_response(filter(., displacement_status == "returnee")[["rcsi_category"]], filter(., displacement_status == "returnee"), "medium", x_name = "rcsi_category"), returnee_rcsi_high = percent_response(filter(., displacement_status == "returnee")[["rcsi_category"]], filter(., displacement_status == "returnee"), "high", x_name = "rcsi_category"), idp_rcsi_low = percent_response(filter(., displacement_status == "idp")[["rcsi_category"]], filter(., displacement_status == "idp"), "low", x_name = "rcsi_category"), idp_rcsi_medium = percent_response(filter(., displacement_status == "idp")[["rcsi_category"]], filter(., displacement_status == "idp"), "medium", x_name = "rcsi_category"), idp_rcsi_high = percent_response(filter(., displacement_status == "idp")[["rcsi_category"]], filter(., displacement_status == "idp"), "high", x_name = "rcsi_category"), non_displaced_rcsi_low = percent_response(filter(., displacement_status == "non_displaced")[["rcsi_category"]], filter(., displacement_status == "non_displaced"), "low", x_name = "rcsi_category"), non_displaced_rcsi_medium = percent_response(filter(., displacement_status == "non_displaced")[["rcsi_category"]], filter(., displacement_status == "non_displaced"), "medium", x_name = "rcsi_category"), non_displaced_rcsi_high = percent_response(filter(., displacement_status == "non_displaced")[["rcsi_category"]], filter(., displacement_status == "non_displaced"), "high", x_name = "rcsi_category"), food_source_1_name = select_percents(food_source, 1, ., questions, choices, "label"), food_source_1_pct = select_percents(food_source, 1, ., questions, choices, "percent"), food_source_2_name = select_percents(food_source, 2, ., questions, choices, "label"), food_source_2_pct = select_percents(food_source, 2, ., questions, choices, "percent"), food_source_3_name = select_percents(food_source, 3, ., questions, choices, "label"), food_source_3_pct = select_percents(food_source, 3, ., questions, choices, "percent"), crop_production = percent_response(agricultural_production, ., "crop_production"), livestock_production = percent_response(agricultural_production, ., "livestock_production"), fishing_production = percent_response(agricultural_production, ., "fishing_production"), previous_activity = percent_response(agricultural_production_prior2011, ., "yes"), crop_previously = percent_response(agricultural_activities_prior2011, ., "crop_production_activity"), livestock_previously = percent_response(agricultural_activities_prior2011, ., "livestock_activity"), fishing_previously = percent_response(agricultural_activities_prior2011, ., "fishing_activity"), other_activity_previously = percent_response(agricultural_activities_prior2011, ., "other"), crop_prod_challenges_1_name = select_percents(crop_production_challenges, 1, ., questions, choices, "label"), crop_prod_challenges_1_pct = select_percents(crop_production_challenges, 1, ., questions, choices, "percent"), crop_prod_challenges_2_name = select_percents(crop_production_challenges, 2, ., questions, choices, "label"), crop_prod_challenges_2_pct = select_percents(crop_production_challenges, 2, ., questions, choices, "percent"), crop_prod_challenges_3_name = select_percents(crop_production_challenges, 3, ., questions, choices, "label"), crop_prod_challenges_3_pct = select_percents(crop_production_challenges, 3, ., questions, choices, "percent"), crop_prod_challenges_4_name = select_percents(crop_production_challenges, 4, ., questions, choices, "label"), crop_prod_challenges_4_pct = select_percents(crop_production_challenges, 4, ., questions, choices, "percent"), livestock_prod_challenges_1_name = select_percents(livestock_production_challenges, 1, ., questions, choices, "label"), livestock_prod_challenges_1_pct = select_percents(livestock_production_challenges, 1, ., questions, choices, "percent"), livestock_prod_challenges_2_name = select_percents(livestock_production_challenges, 2, ., questions, choices, "label"), livestock_prod_challenges_2_pct = select_percents(livestock_production_challenges, 2, ., questions, choices, "percent"), livestock_prod_challenges_3_name = select_percents(livestock_production_challenges, 3, ., questions, choices, "label"), livestock_prod_challenges_3_pct = select_percents(livestock_production_challenges, 3, ., questions, choices, "percent"), livestock_prod_challenges_4_name = select_percents(livestock_production_challenges, 4, ., questions, choices, "label"), livestock_prod_challenges_4_pct = select_percents(livestock_production_challenges, 4, ., questions, choices, "percent"), fishing_challenges_1_name = select_percents(fishing_challenges, 1, ., questions, choices, "label"), fishing_challenges_1_pct = select_percents(fishing_challenges, 1, ., questions, choices, "percent"), fishing_challenges_2_name = select_percents(fishing_challenges, 2, ., questions, choices, "label"), fishing_challenges_2_pct = select_percents(fishing_challenges, 2, ., questions, choices, "percent"), fishing_challenges_3_name = select_percents(fishing_challenges, 3, ., questions, choices, "label"), fishing_challenges_3_pct = select_percents(fishing_challenges, 3, ., questions, choices, "percent"), fishing_challenges_4_name = select_percents(fishing_challenges, 4, ., questions, choices, "label"), fishing_challenges_4_pct = select_percents(fishing_challenges, 4, ., questions, choices, "percent"), type_crops_1_name = select_percents(types_crops, 1, ., questions, choices, "label"), type_crops_1_pct = select_percents(types_crops, 1, ., questions, choices, "percent"), type_crops_2_name = select_percents(types_crops, 2, ., questions, choices, "label"), type_crops_2_pct = select_percents(types_crops, 2, ., questions, choices, "percent"), type_crops_3_name = select_percents(types_crops, 3, ., questions, choices, "label"), type_crops_3_pct = select_percents(types_crops, 3, ., questions, choices, "percent"), crop_prod_impacted = percent_response(crisis_affected_crop_production, ., "yes"), livestock_prod_impacted = percent_response(crisis_affected_livestock_production, ., "yes"), returnee_adults = sum(filter(., displacement_status == "returnee")[["nb_over18"]] * dm_weights(filter(., displacement_status == "returnee"), "nb_over18")), returnee_adult_perm_job = sum(filter(., displacement_status == "returnee")[["permanent_job_adult"]] * dm_weights(filter(., displacement_status == "returnee"), "permanent_job_adult")), returnee_adult_temp_job = sum(filter(., displacement_status == "returnee")[["temporary_job_adult"]] * dm_weights(filter(., displacement_status == "returnee"), "temporary_job_adult")), returnee_adult_daily_labour = sum(filter(., displacement_status == "returnee")[["daily_labour_job_adult"]] * dm_weights(filter(., displacement_status == "returnee"), "daily_labour_job_adult")), returnee_adult_gvt_payroll = sum(filter(., displacement_status == "returnee")[["gvt_payroll_job_adult"]] * dm_weights(filter(., displacement_status == "returnee"), "gvt_payroll_job_adult")), idp_adults = sum(filter(., displacement_status == "idp")[["nb_over18"]] * dm_weights(filter(., displacement_status == "idp"), "nb_over18")), idp_adult_perm_job = sum(filter(., displacement_status == "idp")[["permanent_job_adult"]] * dm_weights(filter(., displacement_status == "idp"), "permanent_job_adult")), idp_adult_temp_job = sum(filter(., displacement_status == "idp")[["temporary_job_adult"]] * dm_weights(filter(., displacement_status == "idp"), "temporary_job_adult")), idp_adult_daily_labour = sum(filter(., displacement_status == "idp")[["daily_labour_job_adult"]] * dm_weights(filter(., displacement_status == "idp"), "daily_labour_job_adult")), idp_adult_gvt_payroll = sum(filter(., displacement_status == "idp")[["gvt_payroll_job_adult"]] * dm_weights(filter(., displacement_status == "idp"), "gvt_payroll_job_adult")), non_displaced_adults = sum(filter(., displacement_status == "non_displaced")[["nb_over18"]] * dm_weights(filter(., displacement_status == "non_displaced"), "nb_over18")), non_displaced_adult_perm_job = sum(filter(., displacement_status == "non_displaced")[["permanent_job_adult"]] * dm_weights(filter(., displacement_status == "non_displaced"), "permanent_job_adult")), non_displaced_adult_temp_job = sum(filter(., displacement_status == "non_displaced")[["temporary_job_adult"]] * dm_weights(filter(., displacement_status == "non_displaced"), "temporary_job_adult")), non_displaced_adult_daily_labour = sum(filter(., displacement_status == "non_displaced")[["daily_labour_job_adult"]] * dm_weights(filter(., displacement_status == "non_displaced"), "daily_labour_job_adult")), non_displaced_adult_gvt_payroll = sum(filter(., displacement_status == "non_displaced")[["gvt_payroll_job_adult"]] * dm_weights(filter(., displacement_status == "non_displaced"), "gvt_payroll_job_adult")), returnee_minors = sum(filter(., displacement_status == "returnee")[["nb_under18"]] * dm_weights(filter(., displacement_status == "returnee"), "nb_under18")), returnee_minor_perm_job = sum(filter(., displacement_status == "returnee")[["permanent_job_minor"]] * dm_weights(filter(., displacement_status == "returnee"), "permanent_job_minor")), returnee_minor_temp_job = sum(filter(., displacement_status == "returnee")[["temporary_job_minor"]] * dm_weights(filter(., displacement_status == "returnee"), "temporary_job_minor")), returnee_minor_daily_labour = sum(filter(., displacement_status == "returnee")[["daily_labour_job_minor"]] * dm_weights(filter(., displacement_status == "returnee"), "daily_labour_job_minor")), idp_minors = sum(filter(., displacement_status == "idp")[["nb_under18"]] * dm_weights(filter(., displacement_status == "idp"), "nb_under18")), idp_minor_perm_job = sum(filter(., displacement_status == "idp")[["permanent_job_minor"]] * dm_weights(filter(., displacement_status == "idp"), "permanent_job_minor")), idp_minor_temp_job = sum(filter(., displacement_status == "idp")[["temporary_job_minor"]] * dm_weights(filter(., displacement_status == "idp"), "temporary_job_minor")), idp_minor_daily_labour = sum(filter(., displacement_status == "idp")[["daily_labour_job_minor"]] * dm_weights(filter(., displacement_status == "idp"), "daily_labour_job_minor")), non_displaced_minors = sum(filter(., displacement_status == "non_displaced")[["nb_under18"]] * dm_weights(filter(., displacement_status == "non_displaced"), "nb_under18")), non_displaced_minor_perm_job = sum(filter(., displacement_status == "non_displaced")[["permanent_job_minor"]] * dm_weights(filter(., displacement_status == "non_displaced"), "permanent_job_minor")), non_displaced_minor_temp_job = sum(filter(., displacement_status == "non_displaced")[["temporary_job_minor"]] * dm_weights(filter(., displacement_status == "non_displaced"), "temporary_job_minor")), non_displaced_minor_daily_labour = sum(filter(., displacement_status == "non_displaced")[["daily_labour_job_minor"]] * dm_weights(filter(., displacement_status == "non_displaced"), "daily_labour_job_minor")), total_working_male = weighted_sum(calcul_type_institution_male, .), gvt_public_sector_male = round(100 * weighted_sum(gvt_public_sector_male, .) / total_working_male, 0), libyan_owned_business_male = round(100 * weighted_sum(libyan_owned_business_male, .) / total_working_male, 0), foreign_owned_business_male = round(100 * weighted_sum(foreign_owned_business_male, .) / total_working_male, 0), libyan_ngos_csos_male = round(100 * weighted_sum(libyan_ngos_csos_male, .) / total_working_male, 0), international_ngos_male = round(100 * weighted_sum(international_ngos_male, .) / total_working_male, 0), own_family_business_male = round(100 * weighted_sum(own_family_business_male, .) / total_working_male, 0), informal_irregular_labour_male = round(100 * weighted_sum(informal_irregular_labour_male, .) / total_working_male, 0), other_institution_male = round(100 * weighted_sum(other_institution_male, .) / total_working_male, 0), total_working_female = weighted_sum(calcul_type_institution_female, .), gvt_public_sector_female = round(100 * weighted_sum(gvt_public_sector_female, .) / total_working_female, 0), libyan_owned_business_female = round(100 * weighted_sum(libyan_owned_business_female, .) / total_working_female, 0), foreign_owned_business_female = round(100 * weighted_sum(foreign_owned_business_female, .) / total_working_female, 0), libyan_ngos_csos_female = round(100 * weighted_sum(libyan_ngos_csos_female, .) / total_working_female, 0), international_ngos_female = round(100 * weighted_sum(international_ngos_female, .) / total_working_female, 0), own_family_business_female = round(100 * weighted_sum(own_family_business_female, .) / total_working_female, 0), informal_irregular_labour_female = round(100 * weighted_sum(informal_irregular_labour_female, .) / total_working_female, 0), other_institution_female = round(100 * weighted_sum(other_institution_female, .) / total_working_female, 0), returnee_gvt_salary = weighted_median(filter(., displacement_status == "returnee")[["gvt_salary"]], filter(., displacement_status == "returnee"), x_name = "gvt_salary"), returnee_gvt_social_benefits = weighted_median(filter(., displacement_status == "returnee")[["gvt_social_benefits"]], filter(., displacement_status == "returnee"), x_name = "gvt_social_benefits"), returnee_non_gvt_salary = weighted_median(filter(., displacement_status == "returnee")[["non_gvt_salary"]], filter(., displacement_status == "returnee"), x_name = "non_gvt_salary"), returnee_casual_labour = weighted_median(filter(., displacement_status == "returnee")[["casual_labour"]], filter(., displacement_status == "returnee"), x_name = "casual_labour"), returnee_own_business_income = weighted_median(filter(., displacement_status == "returnee")[["own_business_income"]], filter(., displacement_status == "returnee"), x_name = "own_business_income"), returnee_remittances = weighted_median(filter(., displacement_status == "returnee")[["remittances"]], filter(., displacement_status == "returnee"), x_name = "remittances"), returnee_family_support = weighted_median(filter(., displacement_status == "returnee")[["family_support"]], filter(., displacement_status == "returnee"), x_name = "family_support"), returnee_humanitarian_assistance = weighted_median(filter(., displacement_status == "returnee")[["humanitarian_assistance"]], filter(., displacement_status == "returnee"), x_name = "humanitarian_assistance"), returnee_zakat = weighted_median(filter(., displacement_status == "returnee")[["zakat"]], filter(., displacement_status == "returnee"), x_name = "zakat"), returnee_income_other = weighted_median(filter(., displacement_status == "returnee")[["income_other"]], filter(., displacement_status == "returnee"), x_name = "income_other"), idp_gvt_salary = weighted_median(filter(., displacement_status == "idp")[["gvt_salary"]], filter(., displacement_status == "idp"), x_name = "gvt_salary"), idp_gvt_social_benefits = weighted_median(filter(., displacement_status == "idp")[["gvt_social_benefits"]], filter(., displacement_status == "idp"), x_name = "gvt_social_benefits"), idp_non_gvt_salary = weighted_median(filter(., displacement_status == "idp")[["non_gvt_salary"]], filter(., displacement_status == "idp"), x_name = "non_gvt_salary"), idp_casual_labour = weighted_median(filter(., displacement_status == "idp")[["casual_labour"]], filter(., displacement_status == "idp"), x_name = "casual_labour"), idp_own_business_income = weighted_median(filter(., displacement_status == "idp")[["own_business_income"]], filter(., displacement_status == "idp"), x_name = "own_business_income"), idp_remittances = weighted_median(filter(., displacement_status == "idp")[["remittances"]], filter(., displacement_status == "idp"), x_name = "remittances"), idp_family_support = weighted_median(filter(., displacement_status == "idp")[["family_support"]], filter(., displacement_status == "idp"), x_name = "family_support"), idp_humanitarian_assistance = weighted_median(filter(., displacement_status == "idp")[["humanitarian_assistance"]], filter(., displacement_status == "idp"), x_name = "humanitarian_assistance"), idp_zakat = weighted_median(filter(., displacement_status == "idp")[["zakat"]], filter(., displacement_status == "idp"), x_name = "zakat"), idp_income_other = weighted_median(filter(., displacement_status == "idp")[["income_other"]], filter(., displacement_status == "idp"), x_name = "income_other"), non_displaced_gvt_salary = weighted_median(filter(., displacement_status == "non_displaced")[["gvt_salary"]], filter(., displacement_status == "non_displaced"), x_name = "gvt_salary"), non_displaced_gvt_social_benefits = weighted_median(filter(., displacement_status == "non_displaced")[["gvt_social_benefits"]], filter(., displacement_status == "non_displaced"), x_name = "gvt_social_benefits"), non_displaced_non_gvt_salary = weighted_median(filter(., displacement_status == "non_displaced")[["non_gvt_salary"]], filter(., displacement_status == "non_displaced"), x_name = "non_gvt_salary"), non_displaced_casual_labour = weighted_median(filter(., displacement_status == "non_displaced")[["casual_labour"]], filter(., displacement_status == "non_displaced"), x_name = "casual_labour"), non_displaced_own_business_income = weighted_median(filter(., displacement_status == "non_displaced")[["own_business_income"]], filter(., displacement_status == "non_displaced"), x_name = "own_business_income"), non_displaced_remittances = weighted_median(filter(., displacement_status == "non_displaced")[["remittances"]], filter(., displacement_status == "non_displaced"), x_name = "remittances"), non_displaced_family_support = weighted_median(filter(., displacement_status == "non_displaced")[["family_support"]], filter(., displacement_status == "non_displaced"), x_name = "family_support"), non_displaced_humanitarian_assistance = weighted_median(filter(., displacement_status == "non_displaced")[["humanitarian_assistance"]], filter(., displacement_status == "non_displaced"), x_name = "humanitarian_assistance"), non_displaced_zakat = weighted_median(filter(., displacement_status == "non_displaced")[["zakat"]], filter(., displacement_status == "non_displaced"), x_name = "zakat"), non_displaced_income_other = weighted_median(filter(., displacement_status == "non_displaced")[["income_other"]], filter(., displacement_status == "non_displaced"), x_name = "income_other"), received_cash = round(100 * sum((cash_modality[!is.na(cash_modality)] > 0) * dm_weights(., "cash_modality")) / sum(dm_weights(., "cash_modality")), 0), returnee_cash_challenges = percent_response(filter(., displacement_status == "returnee")[["obtaining_cash_challenge"]], filter(., displacement_status == "returnee"), "yes", x_name = "obtaining_cash_challenge"), idp_cash_challenges = percent_response(filter(., displacement_status == "idp")[["obtaining_cash_challenge"]], filter(., displacement_status == "idp"), "yes", x_name = "obtaining_cash_challenge"), non_displaced_cash_challenges = percent_response(filter(., displacement_status == "non_displaced")[["obtaining_cash_challenge"]], filter(., displacement_status == "non_displaced"), "yes", x_name = "obtaining_cash_challenge"), cash_challenge_1_name = select_percents(obtaining_cash_challenge_reasons, 1, ., questions, choices, "label"), cash_challenge_1_pct = select_percents(obtaining_cash_challenge_reasons, 1, ., questions, choices, "percent"), cash_challenge_2_name = select_percents(obtaining_cash_challenge_reasons, 2, ., questions, choices, "label"), cash_challenge_2_pct = select_percents(obtaining_cash_challenge_reasons, 2, ., questions, choices, "percent"), cash_challenge_3_name = select_percents(obtaining_cash_challenge_reasons, 3, ., questions, choices, "label"), cash_challenge_3_pct = select_percents(obtaining_cash_challenge_reasons, 3, ., questions, choices, "percent"), cash_challenge_4_name = select_percents(obtaining_cash_challenge_reasons, 4, ., questions, choices, "label"), cash_challenge_4_pct = select_percents(obtaining_cash_challenge_reasons, 4, ., questions, choices, "percent"), returnee_food_expenditure = weighted_median(filter(., displacement_status == "returnee")[["food_expenditure"]], filter(., displacement_status == "returnee"), x_name = "food_expenditure"), returnee_rent_expenditure = weighted_median(filter(., displacement_status == "returnee")[["rent_expenditure"]], filter(., displacement_status == "returnee"), x_name = "rent_expenditure"), returnee_shelter_maintenance_expenditure = weighted_median(filter(., displacement_status == "returnee")[["shelter_maintenance_expenditure"]], filter(., displacement_status == "returnee"), x_name = "shelter_maintenance_expenditure"), returnee_water_expenditure = weighted_median(filter(., displacement_status == "returnee")[["water_expenditure"]], filter(., displacement_status == "returnee"), x_name = "water_expenditure"), returnee_nfi_expenditure = weighted_median(filter(., displacement_status == "returnee")[["nfi_expenditure"]], filter(., displacement_status == "returnee"), x_name = "nfi_expenditure"), returnee_utilities_expenditure = weighted_median(filter(., displacement_status == "returnee")[["utilities_expenditure"]], filter(., displacement_status == "returnee"), x_name = "utilities_expenditure"), returnee_fuel_expenditure = weighted_median(filter(., displacement_status == "returnee")[["fuel_expenditure"]], filter(., displacement_status == "returnee"), x_name = "fuel_expenditure"), returnee_health_related_expenditure = weighted_median(filter(., displacement_status == "returnee")[["health_related_expenditure"]], filter(., displacement_status == "returnee"), x_name = "health_related_expenditure"), returnee_education_related_expenditure = weighted_median(filter(., displacement_status == "returnee")[["education_related_expenditure"]], filter(., displacement_status == "returnee"), x_name = "education_related_expenditure"), returnee_transportation_expenditure = weighted_median(filter(., displacement_status == "returnee")[["transportation_expenditure"]], filter(., displacement_status == "returnee"), x_name = "transportation_expenditure"), returnee_mobile_phone_credit_expenditure = weighted_median(filter(., displacement_status == "returnee")[["mobile_phone_credit_expenditure"]], filter(., displacement_status == "returnee"), x_name = "mobile_phone_credit_expenditure"), returnee_productive_assets_expenditure = weighted_median(filter(., displacement_status == "returnee")[["productive_assets_expenditure"]], filter(., displacement_status == "returnee"), x_name = "productive_assets_expenditure"), returnee_debt_repayment_expenditure = weighted_median(filter(., displacement_status == "returnee")[["debt_repayment_expenditure"]], filter(., displacement_status == "returnee"), x_name = "debt_repayment_expenditure"), returnee_other_expenditure = weighted_median(filter(., displacement_status == "returnee")[["other_expenditure"]], filter(., displacement_status == "returnee"), x_name = "other_expenditure"), idp_food_expenditure = weighted_median(filter(., displacement_status == "idp")[["food_expenditure"]], filter(., displacement_status == "idp"), x_name = "food_expenditure"), idp_rent_expenditure = weighted_median(filter(., displacement_status == "idp")[["rent_expenditure"]], filter(., displacement_status == "idp"), x_name = "rent_expenditure"), idp_shelter_maintenance_expenditure = weighted_median(filter(., displacement_status == "idp")[["shelter_maintenance_expenditure"]], filter(., displacement_status == "idp"), x_name = "shelter_maintenance_expenditure"), idp_water_expenditure = weighted_median(filter(., displacement_status == "idp")[["water_expenditure"]], filter(., displacement_status == "idp"), x_name = "water_expenditure"), idp_nfi_expenditure = weighted_median(filter(., displacement_status == "idp")[["nfi_expenditure"]], filter(., displacement_status == "idp"), x_name = "nfi_expenditure"), idp_utilities_expenditure = weighted_median(filter(., displacement_status == "idp")[["utilities_expenditure"]], filter(., displacement_status == "idp"), x_name = "utilities_expenditure"), idp_fuel_expenditure = weighted_median(filter(., displacement_status == "idp")[["fuel_expenditure"]], filter(., displacement_status == "idp"), x_name = "fuel_expenditure"), idp_health_related_expenditure = weighted_median(filter(., displacement_status == "idp")[["health_related_expenditure"]], filter(., displacement_status == "idp"), x_name = "health_related_expenditure"), idp_education_related_expenditure = weighted_median(filter(., displacement_status == "idp")[["education_related_expenditure"]], filter(., displacement_status == "idp"), x_name = "education_related_expenditure"), idp_transportation_expenditure = weighted_median(filter(., displacement_status == "idp")[["transportation_expenditure"]], filter(., displacement_status == "idp"), x_name = "transportation_expenditure"), idp_mobile_phone_credit_expenditure = weighted_median(filter(., displacement_status == "idp")[["mobile_phone_credit_expenditure"]], filter(., displacement_status == "idp"), x_name = "mobile_phone_credit_expenditure"), idp_productive_assets_expenditure = weighted_median(filter(., displacement_status == "idp")[["productive_assets_expenditure"]], filter(., displacement_status == "idp"), x_name = "productive_assets_expenditure"), idp_debt_repayment_expenditure = weighted_median(filter(., displacement_status == "idp")[["debt_repayment_expenditure"]], filter(., displacement_status == "idp"), x_name = "debt_repayment_expenditure"), idp_other_expenditure = weighted_median(filter(., displacement_status == "idp")[["other_expenditure"]], filter(., displacement_status == "idp"), x_name = "other_expenditure"), non_displaced_food_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["food_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "food_expenditure"), non_displaced_rent_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["rent_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "rent_expenditure"), non_displaced_shelter_maintenance_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["shelter_maintenance_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "shelter_maintenance_expenditure"), non_displaced_water_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["water_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "water_expenditure"), non_displaced_nfi_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["nfi_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "nfi_expenditure"), non_displaced_utilities_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["utilities_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "utilities_expenditure"), non_displaced_fuel_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["fuel_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "fuel_expenditure"), non_displaced_health_related_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["health_related_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "health_related_expenditure"), non_displaced_education_related_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["education_related_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "education_related_expenditure"), non_displaced_transportation_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["transportation_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "transportation_expenditure"), non_displaced_mobile_phone_credit_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["mobile_phone_credit_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "mobile_phone_credit_expenditure"), non_displaced_productive_assets_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["productive_assets_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "productive_assets_expenditure"), non_displaced_debt_repayment_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["debt_repayment_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "debt_repayment_expenditure"), non_displaced_other_expenditure = weighted_median(filter(., displacement_status == "non_displaced")[["other_expenditure"]], filter(., displacement_status == "non_displaced"), x_name = "other_expenditure"), payment_modality_1_name = select_percents(hh_payment_modality, 1, ., questions, choices, "label"), payment_modality_1_pct = select_percents(hh_payment_modality, 1, ., questions, choices, "percent"), payment_modality_2_name = select_percents(hh_payment_modality, 2, ., questions, choices, "label"), payment_modality_2_pct = select_percents(hh_payment_modality, 2, ., questions, choices, "percent"), payment_modality_3_name = select_percents(hh_payment_modality, 3, ., questions, choices, "label"), payment_modality_3_pct = select_percents(hh_payment_modality, 3, ., questions, choices, "percent"), payment_modality_4_name = select_percents(hh_payment_modality, 4, ., questions, choices, "label"), payment_modality_4_pct = select_percents(hh_payment_modality, 4, ., questions, choices, "percent"), no_market_access = percent_response(hh_access_marketplace, ., "no"), distance_market_less_15 = percent_response(travel_to_market, ., "less_15min"), distance_market_15_30 = percent_response(travel_to_market, ., "between_15_29min"), distance_market_more_30 = percent_response(travel_to_market, ., "between_30_59min", "between_1_2hours", "more_2hours"), no_barriers_access_market = percent_response(barriers_access_market, ., "no_barriers_access_market"), market_barriers_1_name = select_percents(barriers_access_market, 1, ., questions, choices, "label", exclude = "no_barriers_access_market"), market_barriers_1_pct = select_percents(barriers_access_market, 1, ., questions, choices, "percent", exclude = "no_barriers_access_market"), market_barriers_2_name = select_percents(barriers_access_market, 2, ., questions, choices, "label", exclude = "no_barriers_access_market"), market_barriers_2_pct = select_percents(barriers_access_market, 2, ., questions, choices, "percent", exclude = "no_barriers_access_market"), market_barriers_3_name = select_percents(barriers_access_market, 3, ., questions, choices, "label", exclude = "no_barriers_access_market"), market_barriers_3_pct = select_percents(barriers_access_market, 3, ., questions, choices, "percent", exclude = "no_barriers_access_market"), item_barrier_too_expensive = percent_response(barriers_purchasing_items, ., "yes_items_too_expensive"), item_barrier_not_available = percent_response(barriers_purchasing_items, ., "yes_items_not_available"), item_too_expensive_1_name = select_percents(expensive_items_to_afford, 1, ., questions, choices, "label"), item_too_expensive_1_pct = select_percents(expensive_items_to_afford, 1, ., questions, choices, "percent"), item_too_expensive_2_name = select_percents(expensive_items_to_afford, 2, ., questions, choices, "label"), item_too_expensive_2_pct = select_percents(expensive_items_to_afford, 2, ., questions, choices, "percent"), item_too_expensive_3_name = select_percents(expensive_items_to_afford, 3, ., questions, choices, "label"), item_too_expensive_3_pct = select_percents(expensive_items_to_afford, 3, ., questions, choices, "percent"), item_not_available_1_name = select_percents(unabailable_items_marketplace, 1, ., questions, choices, "label"), item_not_available_1_pct = select_percents(unabailable_items_marketplace, 1, ., questions, choices, "percent"), item_not_available_2_name = select_percents(unabailable_items_marketplace, 2, ., questions, choices, "label"), item_not_available_2_pct = select_percents(unabailable_items_marketplace, 2, ., questions, choices, "percent"), item_not_available_3_name = select_percents(unabailable_items_marketplace, 3, ., questions, choices, "label"), item_not_available_3_pct = select_percents(unabailable_items_marketplace, 3, ., questions, choices, "percent"), returnee_cash_coping_stress = percent_response(filter(., displacement_status == "returnee")[["cash_coping"]], filter(., displacement_status == "returnee"), "stress", x_name = "cash_coping"), returnee_cash_coping_crisis = percent_response(filter(., displacement_status == "returnee")[["cash_coping"]], filter(., displacement_status == "returnee"), "crisis", x_name = "cash_coping"), returnee_cash_coping_emergency = percent_response(filter(., displacement_status == "returnee")[["cash_coping"]], filter(., displacement_status == "returnee"), "emergency", x_name = "cash_coping"), idp_cash_coping_stress = percent_response(filter(., displacement_status == "idp")[["cash_coping"]], filter(., displacement_status == "idp"), "stress", x_name = "cash_coping"), idp_cash_coping_crisis = percent_response(filter(., displacement_status == "idp")[["cash_coping"]], filter(., displacement_status == "idp"), "crisis", x_name = "cash_coping"), idp_cash_coping_emergency = percent_response(filter(., displacement_status == "idp")[["cash_coping"]], filter(., displacement_status == "idp"), "emergency", x_name = "cash_coping"), non_displaced_cash_coping_stress = percent_response(filter(., displacement_status == "non_displaced")[["cash_coping"]], filter(., displacement_status == "non_displaced"), "stress", x_name = "cash_coping"), non_displaced_cash_coping_crisis = percent_response(filter(., displacement_status == "non_displaced")[["cash_coping"]], filter(., displacement_status == "non_displaced"), "crisis", x_name = "cash_coping"), non_displaced_cash_coping_emergency = percent_response(filter(., displacement_status == "non_displaced")[["cash_coping"]], filter(., displacement_status == "non_displaced"), "emergency", x_name = "cash_coping"), cash_coping_reason_1_name = select_percents(using_cash_coping_mechanism_reasons, 1, ., questions, choices, "label"), cash_coping_reason_1_pct = select_percents(using_cash_coping_mechanism_reasons, 1, ., questions, choices, "percent"), cash_coping_reason_2_name = select_percents(using_cash_coping_mechanism_reasons, 2, ., questions, choices, "label"), cash_coping_reason_2_pct = select_percents(using_cash_coping_mechanism_reasons, 2, ., questions, choices, "percent"), cash_coping_reason_3_name = select_percents(using_cash_coping_mechanism_reasons, 3, ., questions, choices, "label"), cash_coping_reason_3_pct = select_percents(using_cash_coping_mechanism_reasons, 3, ., questions, choices, "percent"), cash_coping_reason_4_name = select_percents(using_cash_coping_mechanism_reasons, 4, ., questions, choices, "label"), cash_coping_reason_4_pct = select_percents(using_cash_coping_mechanism_reasons, 4, ., questions, choices, "percent"), cash_coping_reason_5_name = select_percents(using_cash_coping_mechanism_reasons, 5, ., questions, choices, "label"), cash_coping_reason_5_pct = select_percents(using_cash_coping_mechanism_reasons, 5, ., questions, choices, "percent"), cash_coping_reason_6_name = select_percents(using_cash_coping_mechanism_reasons, 6, ., questions, choices, "label"), cash_coping_reason_6_pct = select_percents(using_cash_coping_mechanism_reasons, 6, ., questions, choices, "percent"), apartment_house = percent_response(shelter_type, ., "apartment", "house"), unfinished_room = percent_response(shelter_type, ., "unfinished_rooms"), public_building = percent_response(shelter_type, ., "public_building_not_used_for_shelter"), private_building = percent_response(shelter_type, ., "private_building_not_used_for_shelter"), tent_caravan = percent_response(shelter_type, ., "tent_caravan"), temporary_shelter_ngos = percent_response(shelter_type, ., "temporary_shelter_ngos"), connection_house = percent_response(shelter_type, ., "connection_house"), hotel = percent_response(shelter_type, ., "hotel"), camp_informal_settlement = percent_response(shelter_type, ., "camp_informal_settlement"), other_housing = percent_response(shelter_type, ., "other"), returnee_occupancy_owned = percent_response(filter(., displacement_status == "returnee")[["occupancy_status"]], filter(., displacement_status == "returnee"), "ownership", x_name = "occupancy_status"), returnee_occupancy_rented = percent_response(filter(., displacement_status == "returnee")[["occupancy_status"]], filter(., displacement_status == "returnee"), "rental_written_contract", "rental_verbal_agreement", x_name = "occupancy_status"), returnee_occupancy_hosted_free = percent_response(filter(., displacement_status == "returnee")[["occupancy_status"]], filter(., displacement_status == "returnee"), "hosted_free", x_name = "occupancy_status"), returnee_occupancy_other = percent_response(filter(., displacement_status == "returnee")[["occupancy_status"]], filter(., displacement_status == "returnee"), "squatting", "housing_provided_public_authority", "housing_provided_employer", "other", x_name = "occupancy_status"), idp_occupancy_owned = percent_response(filter(., displacement_status == "idp")[["occupancy_status"]], filter(., displacement_status == "idp"), "ownership", x_name = "occupancy_status"), idp_occupancy_rented = percent_response(filter(., displacement_status == "idp")[["occupancy_status"]], filter(., displacement_status == "idp"), "rental_written_contract", "rental_verbal_agreement", x_name = "occupancy_status"), idp_occupancy_hosted_free = percent_response(filter(., displacement_status == "idp")[["occupancy_status"]], filter(., displacement_status == "idp"), "hosted_free", x_name = "occupancy_status"), idp_occupancy_other = percent_response(filter(., displacement_status == "idp")[["occupancy_status"]], filter(., displacement_status == "idp"), "squatting", "housing_provided_public_authority", "housing_provided_employer", "other", x_name = "occupancy_status"), non_displaced_occupancy_owned = percent_response(filter(., displacement_status == "non_displaced")[["occupancy_status"]], filter(., displacement_status == "non_displaced"), "ownership", x_name = "occupancy_status"), non_displaced_occupancy_rented = percent_response(filter(., displacement_status == "non_displaced")[["occupancy_status"]], filter(., displacement_status == "non_displaced"), "rental_written_contract", "rental_verbal_agreement", x_name = "occupancy_status"), non_displaced_occupancy_hosted_free = percent_response(filter(., displacement_status == "non_displaced")[["occupancy_status"]], filter(., displacement_status == "non_displaced"), "hosted_free", x_name = "occupancy_status"), non_displaced_occupancy_other = percent_response(filter(., displacement_status == "non_displaced")[["occupancy_status"]], filter(., displacement_status == "non_displaced"), "squatting", "housing_provided_public_authority", "housing_provided_employer", "other", x_name = "occupancy_status"), returnee_rental_cost = weighted_median(filter(., displacement_status == "returnee")[["rental_cost"]], filter(., displacement_status == "returnee"), "rental_cost"), idp_rental_cost = weighted_median(filter(., displacement_status == "idp")[["rental_cost"]], filter(., displacement_status == "idp"), "rental_cost"), non_displaced_rental_cost = weighted_median(filter(., displacement_status == "non_displaced")[["rental_cost"]], filter(., displacement_status == "non_displaced"), "rental_cost"), threatened_eviction = percent_response(eviction_threat, ., "yes_threatened_with_eviction"), recently_evicted = percent_response(eviction_threat, ., "yes_recently_evicted"), eviction_reason_1_name = select_percents(eviction_shelter_reasons, 1, ., questions, choices, "label"), eviction_reason_1_pct = select_percents(eviction_shelter_reasons, 1, ., questions, choices, "percent"), eviction_reason_2_name = select_percents(eviction_shelter_reasons, 2, ., questions, choices, "label"), eviction_reason_2_pct = select_percents(eviction_shelter_reasons, 2, ., questions, choices, "percent"), eviction_reason_3_name = select_percents(eviction_shelter_reasons, 3, ., questions, choices, "label"), eviction_reason_3_pct = select_percents(eviction_shelter_reasons, 3, ., questions, choices, "percent"), nfi_need = percent_response(nfi_need, ., "has_nfi_need"), power_cuts = weighted_median(power_cuts, .), phone_coverage_reliable = percent_response(phone_network_coverage, ., "coverage_exists_reliable"), phone_coverage_not_reliable = percent_response(phone_network_coverage, ., "coverage_exists_not_reliable"), phone_coverage_none = percent_response(phone_network_coverage, ., "coverage_doesnt_exist"), water_source_public_network = percent_response(primary_drinkingwater_source, ., "public_network"), water_source_bottles = percent_response(primary_drinkingwater_source, ., "bottled_water"), water_source_trucks = percent_response(primary_drinkingwater_source, ., "water_trucking"), water_source_other = percent_response(primary_drinkingwater_source, ., "public_tap", "protected_well", "unprotected_well", "surface_water", "rainwater", "other"), water_dist_100m = percent_response(distance_drinkingwater, ., "upto_100m"), water_dist_200m = percent_response(distance_drinkingwater, ., "between_101_200m"), water_dist_300m = percent_response(distance_drinkingwater, ., "between_201_300m"), water_dist_400m = percent_response(distance_drinkingwater, ., "between_301_400m"), water_dist_500m = percent_response(distance_drinkingwater, ., "between_401_500m"), water_dist_more_500m = percent_response(distance_drinkingwater, ., "more_500m"), returnee_water_insufficient = percent_response(filter(., displacement_status == "returnee")[["unsufficient_quantity_water"]], filter(., displacement_status == "returnee"), "yes", x_name = "unsufficient_quantity_water"), idp_water_insufficient = percent_response(filter(., displacement_status == "idp")[["unsufficient_quantity_water"]], filter(., displacement_status == "idp"), "yes", x_name = "unsufficient_quantity_water"), non_displaced_water_insufficient = percent_response(filter(., displacement_status == "non_displaced")[["unsufficient_quantity_water"]], filter(., displacement_status == "non_displaced"), "yes", x_name = "unsufficient_quantity_water"), water_access_every_day = percent_response(access_public_water_network, ., "every_day"), water_access_most_days = percent_response(access_public_water_network, ., "most_days"), water_access_rarely_none = percent_response(access_public_water_network, ., "rarely", "not_at_all"), designated_waste_disposal = percent_response(trash_disposal, ., "collected_municipality_waste_management_service", "designated_place_waste_disposal"), healthcare_challenge_yes = percent_response(access_healthcare_challenge, ., "yes"), healthcare_challenge_no = percent_response(access_healthcare_challenge, ., "no"), healthcare_challenge_dk_na = percent_response(access_healthcare_challenge, ., "dk", "dwta"), returnee_healthcare_challenge_1_name = select_percents(filter(., displacement_status == "returnee")[["access_healthcare_challenge_reasons"]], 1, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), returnee_healthcare_challenge_1_pct = select_percents(filter(., displacement_status == "returnee")[["access_healthcare_challenge_reasons"]], 1, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), returnee_healthcare_challenge_2_name = select_percents(filter(., displacement_status == "returnee")[["access_healthcare_challenge_reasons"]], 2, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), returnee_healthcare_challenge_2_pct = select_percents(filter(., displacement_status == "returnee")[["access_healthcare_challenge_reasons"]], 2, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), returnee_healthcare_challenge_3_name = select_percents(filter(., displacement_status == "returnee")[["access_healthcare_challenge_reasons"]], 3, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), returnee_healthcare_challenge_3_pct = select_percents(filter(., displacement_status == "returnee")[["access_healthcare_challenge_reasons"]], 3, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), idp_healthcare_challenge_1_name = select_percents(filter(., displacement_status == "idp")[["access_healthcare_challenge_reasons"]], 1, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), idp_healthcare_challenge_1_pct = select_percents(filter(., displacement_status == "idp")[["access_healthcare_challenge_reasons"]], 1, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), idp_healthcare_challenge_2_name = select_percents(filter(., displacement_status == "idp")[["access_healthcare_challenge_reasons"]], 2, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), idp_healthcare_challenge_2_pct = select_percents(filter(., displacement_status == "idp")[["access_healthcare_challenge_reasons"]], 2, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), idp_healthcare_challenge_3_name = select_percents(filter(., displacement_status == "idp")[["access_healthcare_challenge_reasons"]], 3, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), idp_healthcare_challenge_3_pct = select_percents(filter(., displacement_status == "idp")[["access_healthcare_challenge_reasons"]], 3, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), non_displaced_healthcare_challenge_1_name = select_percents(filter(., displacement_status == "non_displaced")[["access_healthcare_challenge_reasons"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), non_displaced_healthcare_challenge_1_pct = select_percents(filter(., displacement_status == "non_displaced")[["access_healthcare_challenge_reasons"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), non_displaced_healthcare_challenge_2_name = select_percents(filter(., displacement_status == "non_displaced")[["access_healthcare_challenge_reasons"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), non_displaced_healthcare_challenge_2_pct = select_percents(filter(., displacement_status == "non_displaced")[["access_healthcare_challenge_reasons"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), non_displaced_healthcare_challenge_3_name = select_percents(filter(., displacement_status == "non_displaced")[["access_healthcare_challenge_reasons"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "access_healthcare_challenge_reasons"), non_displaced_healthcare_challenge_3_pct = select_percents(filter(., displacement_status == "non_displaced")[["access_healthcare_challenge_reasons"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "access_healthcare_challenge_reasons"), health_time_more_30_min = percent_response(travel_to_healthcare_provider, ., "between_30_59min", "between_1_2hours", "more_2hours"), bottle_fed_child = percent_response(bottle_fed_child, ., "bottle_fed_child"), unable_vaccinate = percent_response(unable_vaccinate, ., "unable_vaccinate"), vaccinate_facility_time_more_30_min = percent_response(time_travel_vaccinations_facility, ., "between_30_59min", "between_1_2hours", "more_2hours"), returnee_chronic_disease = percent_response(filter(., displacement_status == "returnee")[["individual_suffering_chronicdisease"]], filter(., displacement_status == "returnee"), "yes", x_name = "individual_suffering_chronicdisease"), returnee_mental_illness = percent_response(filter(., displacement_status == "returnee")[["individual_suffering_mentaldisorder"]], filter(., displacement_status == "returnee"), "yes", x_name = "individual_suffering_mentaldisorder"), returnee_physical_difficulty = percent_response(filter(., displacement_status == "returnee")[["physical_cognitive_difficulties"]], filter(., displacement_status == "returnee"), "yes_male_infants", "yes_male_children", "yes_male_youths", "yes_male_adults", "yes_male_elderly", "yes_female_infants", "yes_female_children", "yes_female_youths", "yes_female_adults", "yes_female_elderly", x_name = "physical_cognitive_difficulties"), returnee_behavior_change = percent_response(filter(., displacement_status == "returnee")[["changes_behaviors_minors"]], filter(., displacement_status == "returnee"), "yes", x_name = "changes_behaviors_minors"), idp_chronic_disease = percent_response(filter(., displacement_status == "idp")[["individual_suffering_chronicdisease"]], filter(., displacement_status == "idp"), "yes", x_name = "individual_suffering_chronicdisease"), idp_mental_illness = percent_response(filter(., displacement_status == "idp")[["individual_suffering_mentaldisorder"]], filter(., displacement_status == "idp"), "yes", x_name = "individual_suffering_mentaldisorder"), idp_physical_difficulty = percent_response(filter(., displacement_status == "idp")[["physical_cognitive_difficulties"]], filter(., displacement_status == "idp"), "yes_male_infants", "yes_male_children", "yes_male_youths", "yes_male_adults", "yes_male_elderly", "yes_female_infants", "yes_female_children", "yes_female_youths", "yes_female_adults", "yes_female_elderly", x_name = "physical_cognitive_difficulties"), idp_behavior_change = percent_response(filter(., displacement_status == "idp")[["changes_behaviors_minors"]], filter(., displacement_status == "idp"), "yes", x_name = "changes_behaviors_minors"), non_displaced_chronic_disease = percent_response(filter(., displacement_status == "non_displaced")[["individual_suffering_chronicdisease"]], filter(., displacement_status == "non_displaced"), "yes", x_name = "individual_suffering_chronicdisease"), non_displaced_mental_illness = percent_response(filter(., displacement_status == "non_displaced")[["individual_suffering_mentaldisorder"]], filter(., displacement_status == "non_displaced"), "yes", x_name = "individual_suffering_mentaldisorder"), non_displaced_physical_difficulty = percent_response(filter(., displacement_status == "non_displaced")[["physical_cognitive_difficulties"]], filter(., displacement_status == "non_displaced"), "yes_male_infants", "yes_male_children", "yes_male_youths", "yes_male_adults", "yes_male_elderly", "yes_female_infants", "yes_female_children", "yes_female_youths", "yes_female_adults", "yes_female_elderly", x_name = "physical_cognitive_difficulties"), non_displaced_behavior_change = percent_response(filter(., displacement_status == "non_displaced")[["changes_behaviors_minors"]], filter(., displacement_status == "non_displaced"), "yes", x_name = "changes_behaviors_minors"), disease_1_name = select_percents(type_chronicdisease, 1, ., questions, choices, "label"), disease_1_pct = select_percents(type_chronicdisease, 1, ., questions, choices, "percent"), disease_2_name = select_percents(type_chronicdisease, 2, ., questions, choices, "label"), disease_2_pct = select_percents(type_chronicdisease, 2, ., questions, choices, "percent"), disease_3_name = select_percents(type_chronicdisease, 3, ., questions, choices, "label"), disease_3_pct = select_percents(type_chronicdisease, 3, ., questions, choices, "percent"), total_assistance = weighted_sum(total_assistance, .), no_daily_difficulty = round(100 * weighted_sum(no_daily_difficulty, .) / total_assistance), minor_daily_difficulty = round(100 * weighted_sum(minor_daily_difficulty, .) / total_assistance), daily_needs_assistance = round(100 * weighted_sum(daily_needs_assistance, .) / total_assistance), neg_behavior_0_12_num_1_name = select_percents(negative_behaviour_0_12, 1, ., questions, choices, "label"), neg_behavior_0_12_num_1_pct = select_percents(negative_behaviour_0_12, 1, ., questions, choices, "percent"), neg_behavior_0_12_num_2_name = select_percents(negative_behaviour_0_12, 2, ., questions, choices, "label"), neg_behavior_0_12_num_2_pct = select_percents(negative_behaviour_0_12, 2, ., questions, choices, "percent"), neg_behavior_0_12_num_3_name = select_percents(negative_behaviour_0_12, 3, ., questions, choices, "label"), neg_behavior_0_12_num_3_pct = select_percents(negative_behaviour_0_12, 3, ., questions, choices, "percent"), neg_behavior_0_12_num_4_name = select_percents(negative_behaviour_0_12, 4, ., questions, choices, "label"), neg_behavior_0_12_num_4_pct = select_percents(negative_behaviour_0_12, 4, ., questions, choices, "percent"), neg_behavior_0_12_num_5_name = select_percents(negative_behaviour_0_12, 5, ., questions, choices, "label"), neg_behavior_0_12_num_5_pct = select_percents(negative_behaviour_0_12, 5, ., questions, choices, "percent"), neg_behavior_13_17_num_1_name = select_percents(negative_behaviour_13_17, 1, ., questions, choices, "label"), neg_behavior_13_17_num_1_pct = select_percents(negative_behaviour_13_17, 1, ., questions, choices, "percent"), neg_behavior_13_17_num_2_name = select_percents(negative_behaviour_13_17, 2, ., questions, choices, "label"), neg_behavior_13_17_num_2_pct = select_percents(negative_behaviour_13_17, 2, ., questions, choices, "percent"), neg_behavior_13_17_num_3_name = select_percents(negative_behaviour_13_17, 3, ., questions, choices, "label"), neg_behavior_13_17_num_3_pct = select_percents(negative_behaviour_13_17, 3, ., questions, choices, "percent"), neg_behavior_13_17_num_4_name = select_percents(negative_behaviour_13_17, 4, ., questions, choices, "label"), neg_behavior_13_17_num_4_pct = select_percents(negative_behaviour_13_17, 4, ., questions, choices, "percent"), neg_behavior_13_17_num_5_name = select_percents(negative_behaviour_13_17, 5, ., questions, choices, "label"), neg_behavior_13_17_num_5_pct = select_percents(negative_behaviour_13_17, 5, ., questions, choices, "percent"), returnee_child_male = weighted_sum(filter(., displacement_status == "returnee")[["nb_children_male_edu"]], filter(., displacement_status == "returnee"), x_name = "nb_children_male_edu"), returnee_enrolled_child_male = weighted_sum(filter(., displacement_status == "returnee")[["enrolled_school_male_6_14_edu"]], filter(., displacement_status == "returnee"), x_name = "enrolled_school_male_6_14_edu"), returnee_enrolled_child_male_pct = round(100 * returnee_enrolled_child_male / returnee_child_male, 0), returnee_attending_child_male = weighted_sum(filter(., displacement_status == "returnee")[["attended_school_male_6_14"]], filter(., displacement_status == "returnee"), x_name = "attended_school_male_6_14"), returnee_attending_child_male_pct = round(100 * returnee_attending_child_male / returnee_enrolled_child_male, 0), returnee_youth_male = weighted_sum(filter(., displacement_status == "returnee")[["nb_youth_male_edu"]], filter(., displacement_status == "returnee"), x_name = "nb_youth_male_edu"), returnee_enrolled_youth_male = weighted_sum(filter(., displacement_status == "returnee")[["enrolled_school_male_15_17_edu"]], filter(., displacement_status == "returnee"), x_name = "enrolled_school_male_15_17_edu"), returnee_enrolled_youth_male_pct = round(100 * returnee_enrolled_youth_male / returnee_youth_male, 0), returnee_attending_youth_male = weighted_sum(filter(., displacement_status == "returnee")[["attended_school_male_15_17"]], filter(., displacement_status == "returnee"), x_name = "attended_school_male_15_17"), returnee_attending_youth_male_pct = round(100 * returnee_attending_youth_male / returnee_enrolled_youth_male, 0), returnee_child_female = weighted_sum(filter(., displacement_status == "returnee")[["nb_children_female_edu"]], filter(., displacement_status == "returnee"), x_name = "nb_children_female_edu"), returnee_enrolled_child_female = weighted_sum(filter(., displacement_status == "returnee")[["enrolled_school_female_6_14_edu"]], filter(., displacement_status == "returnee"), x_name = "enrolled_school_female_6_14_edu"), returnee_enrolled_child_female_pct = round(100 * returnee_enrolled_child_female / returnee_child_female, 0), returnee_attending_child_female = weighted_sum(filter(., displacement_status == "returnee")[["attended_school_female_6_14"]], filter(., displacement_status == "returnee"), x_name = "attended_school_female_6_14"), returnee_attending_child_female_pct = round(100 * returnee_attending_child_female / returnee_enrolled_child_female, 0), returnee_youth_female = weighted_sum(filter(., displacement_status == "returnee")[["nb_youth_female_edu"]], filter(., displacement_status == "returnee"), x_name = "nb_youth_female_edu"), returnee_enrolled_youth_female = weighted_sum(filter(., displacement_status == "returnee")[["enrolled_school_female_15_17_edu"]], filter(., displacement_status == "returnee"), x_name = "enrolled_school_female_15_17_edu"), returnee_enrolled_youth_female_pct = round(100 * returnee_enrolled_youth_female / returnee_youth_female, 0), returnee_attending_youth_female = weighted_sum(filter(., displacement_status == "returnee")[["attended_school_female_15_17"]], filter(., displacement_status == "returnee"), x_name = "attended_school_female_15_17"), returnee_attending_youth_female_pct = round(100 * returnee_attending_youth_female / returnee_enrolled_youth_female, 0), idp_child_male = weighted_sum(filter(., displacement_status == "idp")[["nb_children_male_edu"]], filter(., displacement_status == "idp"), x_name = "nb_children_male_edu"), idp_enrolled_child_male = weighted_sum(filter(., displacement_status == "idp")[["enrolled_school_male_6_14_edu"]], filter(., displacement_status == "idp"), x_name = "enrolled_school_male_6_14_edu"), idp_enrolled_child_male_pct = round(100 * idp_enrolled_child_male / idp_child_male, 0), idp_attending_child_male = weighted_sum(filter(., displacement_status == "idp")[["attended_school_male_6_14"]], filter(., displacement_status == "idp"), x_name = "attended_school_male_6_14"), idp_attending_child_male_pct = round(100 * idp_attending_child_male / idp_enrolled_child_male, 0), idp_youth_male = weighted_sum(filter(., displacement_status == "idp")[["nb_youth_male_edu"]], filter(., displacement_status == "idp"), x_name = "nb_youth_male_edu"), idp_enrolled_youth_male = weighted_sum(filter(., displacement_status == "idp")[["enrolled_school_male_15_17_edu"]], filter(., displacement_status == "idp"), x_name = "enrolled_school_male_15_17_edu"), idp_enrolled_youth_male_pct = round(100 * idp_enrolled_youth_male / idp_youth_male, 0), idp_attending_youth_male = weighted_sum(filter(., displacement_status == "idp")[["attended_school_male_15_17"]], filter(., displacement_status == "idp"), x_name = "attended_school_male_15_17"), idp_attending_youth_male_pct = round(100 * idp_attending_youth_male / idp_enrolled_youth_male, 0), idp_child_female = weighted_sum(filter(., displacement_status == "idp")[["nb_children_female_edu"]], filter(., displacement_status == "idp"), x_name = "nb_children_female_edu"), idp_enrolled_child_female = weighted_sum(filter(., displacement_status == "idp")[["enrolled_school_female_6_14_edu"]], filter(., displacement_status == "idp"), x_name = "enrolled_school_female_6_14_edu"), idp_enrolled_child_female_pct = round(100 * idp_enrolled_child_female / idp_child_female, 0), idp_attending_child_female = weighted_sum(filter(., displacement_status == "idp")[["attended_school_female_6_14"]], filter(., displacement_status == "idp"), x_name = "attended_school_female_6_14"), idp_attending_child_female_pct = round(100 * idp_attending_child_female / idp_enrolled_child_female, 0), idp_youth_female = weighted_sum(filter(., displacement_status == "idp")[["nb_youth_female_edu"]], filter(., displacement_status == "idp"), x_name = "nb_youth_female_edu"), idp_enrolled_youth_female = weighted_sum(filter(., displacement_status == "idp")[["enrolled_school_female_15_17_edu"]], filter(., displacement_status == "idp"), x_name = "enrolled_school_female_15_17_edu"), idp_enrolled_youth_female_pct = round(100 * idp_enrolled_youth_female / idp_youth_female, 0), idp_attending_youth_female = weighted_sum(filter(., displacement_status == "idp")[["attended_school_female_15_17"]], filter(., displacement_status == "idp"), x_name = "attended_school_female_15_17"), idp_attending_youth_female_pct = round(100 * idp_attending_youth_female / idp_enrolled_youth_female, 0), non_displaced_child_male = weighted_sum(filter(., displacement_status == "non_displaced")[["nb_children_male_edu"]], filter(., displacement_status == "non_displaced"), x_name = "nb_children_male_edu"), non_displaced_enrolled_child_male = weighted_sum(filter(., displacement_status == "non_displaced")[["enrolled_school_male_6_14_edu"]], filter(., displacement_status == "non_displaced"), x_name = "enrolled_school_male_6_14_edu"), non_displaced_enrolled_child_male_pct = round(100 * non_displaced_enrolled_child_male / non_displaced_child_male, 0), non_displaced_attending_child_male = weighted_sum(filter(., displacement_status == "non_displaced")[["attended_school_male_6_14"]], filter(., displacement_status == "non_displaced"), x_name = "attended_school_male_6_14"), non_displaced_attending_child_male_pct = round(100 * non_displaced_attending_child_male / non_displaced_enrolled_child_male, 0), non_displaced_youth_male = weighted_sum(filter(., displacement_status == "non_displaced")[["nb_youth_male_edu"]], filter(., displacement_status == "non_displaced"), x_name = "nb_youth_male_edu"), non_displaced_enrolled_youth_male = weighted_sum(filter(., displacement_status == "non_displaced")[["enrolled_school_male_15_17_edu"]], filter(., displacement_status == "non_displaced"), x_name = "enrolled_school_male_15_17_edu"), non_displaced_enrolled_youth_male_pct = round(100 * non_displaced_enrolled_youth_male / non_displaced_youth_male, 0), non_displaced_attending_youth_male = weighted_sum(filter(., displacement_status == "non_displaced")[["attended_school_male_15_17"]], filter(., displacement_status == "non_displaced"), x_name = "attended_school_male_15_17"), non_displaced_attending_youth_male_pct = round(100 * non_displaced_attending_youth_male / non_displaced_enrolled_youth_male, 0), non_displaced_child_female = weighted_sum(filter(., displacement_status == "non_displaced")[["nb_children_female_edu"]], filter(., displacement_status == "non_displaced"), x_name = "nb_children_female_edu"), non_displaced_enrolled_child_female = weighted_sum(filter(., displacement_status == "non_displaced")[["enrolled_school_female_6_14_edu"]], filter(., displacement_status == "non_displaced"), x_name = "enrolled_school_female_6_14_edu"), non_displaced_enrolled_child_female_pct = round(100 * non_displaced_enrolled_child_female / non_displaced_child_female, 0), non_displaced_attending_child_female = weighted_sum(filter(., displacement_status == "non_displaced")[["attended_school_female_6_14"]], filter(., displacement_status == "non_displaced"), x_name = "attended_school_female_6_14"), non_displaced_attending_child_female_pct = round(100 * non_displaced_attending_child_female / non_displaced_enrolled_child_female, 0), non_displaced_youth_female = weighted_sum(filter(., displacement_status == "non_displaced")[["nb_youth_female_edu"]], filter(., displacement_status == "non_displaced"), x_name = "nb_youth_female_edu"), non_displaced_enrolled_youth_female = weighted_sum(filter(., displacement_status == "non_displaced")[["enrolled_school_female_15_17_edu"]], filter(., displacement_status == "non_displaced"), x_name = "enrolled_school_female_15_17_edu"), non_displaced_enrolled_youth_female_pct = round(100 * non_displaced_enrolled_youth_female / non_displaced_youth_female, 0), non_displaced_attending_youth_female = weighted_sum(filter(., displacement_status == "non_displaced")[["attended_school_female_15_17"]], filter(., displacement_status == "non_displaced"), x_name = "attended_school_female_15_17"), non_displaced_attending_youth_female_pct = round(100 * non_displaced_attending_youth_female / non_displaced_enrolled_youth_female, 0), attendance_issue_1_name = select_percents(issues_faced_attending_school, 1, ., questions, choices, "label"), attendance_issue_1_pct = select_percents(issues_faced_attending_school, 1, ., questions, choices, "percent"), attendance_issue_2_name = select_percents(issues_faced_attending_school, 2, ., questions, choices, "label"), attendance_issue_2_pct = select_percents(issues_faced_attending_school, 2, ., questions, choices, "percent"), attendance_issue_3_name = select_percents(issues_faced_attending_school, 3, ., questions, choices, "label"), attendance_issue_3_pct = select_percents(issues_faced_attending_school, 3, ., questions, choices, "percent"), no_attendance_reason_1_name = select_percents(droppedout_school_reasons, 1, ., questions, choices, "label"), no_attendance_reason_1_pct = select_percents(droppedout_school_reasons, 1, ., questions, choices, "percent"), no_attendance_reason_2_name = select_percents(droppedout_school_reasons, 2, ., questions, choices, "label"), no_attendance_reason_2_pct = select_percents(droppedout_school_reasons, 2, ., questions, choices, "percent"), no_attendance_reason_3_name = select_percents(droppedout_school_reasons, 3, ., questions, choices, "label"), no_attendance_reason_3_pct = select_percents(droppedout_school_reasons, 3, ., questions, choices, "percent"), no_attendance_reason_4_name = select_percents(droppedout_school_reasons, 4, ., questions, choices, "label"), no_attendance_reason_4_pct = select_percents(droppedout_school_reasons, 4, ., questions, choices, "percent"), no_attendance_reason_5_name = select_percents(droppedout_school_reasons, 5, ., questions, choices, "label"), no_attendance_reason_5_pct = select_percents(droppedout_school_reasons, 5, ., questions, choices, "percent"), returnee_nonformal_school = percent_response(filter(., displacement_status == "returnee")[["nonformal_education"]], filter(., displacement_status == "returnee"), "yes_nonformal_classes_byschool", x_name = "nonformal_education"), returnee_nonformal_community = percent_response(filter(., displacement_status == "returnee")[["nonformal_education"]], filter(., displacement_status == "returnee"), "yes_nonformal_classes_bycommunity", x_name = "nonformal_education"), returnee_nonformal_priv_bus = percent_response(filter(., displacement_status == "returnee")[["nonformal_education"]], filter(., displacement_status == "returnee"), "yes_nonformal_privatebusiness", x_name = "nonformal_education"), returnee_nonformal_self = percent_response(filter(., displacement_status == "returnee")[["nonformal_education"]], filter(., displacement_status == "returnee"), "yes_selflearning", x_name = "nonformal_education"), returnee_nonformal_ngo = percent_response(filter(., displacement_status == "returnee")[["nonformal_education"]], filter(., displacement_status == "returnee"), "yes_assistance_ngos", x_name = "nonformal_education"), returnee_nonformal_other = percent_response(filter(., displacement_status == "returnee")[["nonformal_education"]], filter(., displacement_status == "returnee"), "other", x_name = "nonformal_education"), returnee_nonformal_total = returnee_nonformal_ngo + returnee_nonformal_community + returnee_nonformal_priv_bus + returnee_nonformal_school + returnee_nonformal_self + returnee_nonformal_other, idp_nonformal_school = percent_response(filter(., displacement_status == "idp")[["nonformal_education"]], filter(., displacement_status == "idp"), "yes_nonformal_classes_byschool", x_name = "nonformal_education"), idp_nonformal_community = percent_response(filter(., displacement_status == "idp")[["nonformal_education"]], filter(., displacement_status == "idp"), "yes_nonformal_classes_bycommunity", x_name = "nonformal_education"), idp_nonformal_priv_bus = percent_response(filter(., displacement_status == "idp")[["nonformal_education"]], filter(., displacement_status == "idp"), "yes_nonformal_privatebusiness", x_name = "nonformal_education"), idp_nonformal_self = percent_response(filter(., displacement_status == "idp")[["nonformal_education"]], filter(., displacement_status == "idp"), "yes_selflearning", x_name = "nonformal_education"), idp_nonformal_ngo = percent_response(filter(., displacement_status == "idp")[["nonformal_education"]], filter(., displacement_status == "idp"), "yes_assistance_ngos", x_name = "nonformal_education"), idp_nonformal_other = percent_response(filter(., displacement_status == "idp")[["nonformal_education"]], filter(., displacement_status == "idp"), "other", x_name = "nonformal_education"), idp_nonformal_total = idp_nonformal_ngo + idp_nonformal_community + idp_nonformal_priv_bus + idp_nonformal_school + idp_nonformal_self + idp_nonformal_other, non_displaced_nonformal_school = percent_response(filter(., displacement_status == "non_displaced")[["nonformal_education"]], filter(., displacement_status == "non_displaced"), "yes_nonformal_classes_byschool", x_name = "nonformal_education"), non_displaced_nonformal_community = percent_response(filter(., displacement_status == "non_displaced")[["nonformal_education"]], filter(., displacement_status == "non_displaced"), "yes_nonformal_classes_bycommunity", x_name = "nonformal_education"), non_displaced_nonformal_priv_bus = percent_response(filter(., displacement_status == "non_displaced")[["nonformal_education"]], filter(., displacement_status == "non_displaced"), "yes_nonformal_privatebusiness", x_name = "nonformal_education"), non_displaced_nonformal_self = percent_response(filter(., displacement_status == "non_displaced")[["nonformal_education"]], filter(., displacement_status == "non_displaced"), "yes_selflearning", x_name = "nonformal_education"), non_displaced_nonformal_ngo = percent_response(filter(., displacement_status == "non_displaced")[["nonformal_education"]], filter(., displacement_status == "non_displaced"), "yes_assistance_ngos", x_name = "nonformal_education"), non_displaced_nonformal_other = percent_response(filter(., displacement_status == "non_displaced")[["nonformal_education"]], filter(., displacement_status == "non_displaced"), "other", x_name = "nonformal_education"), non_displaced_nonformal_total = non_displaced_nonformal_ngo + non_displaced_nonformal_community + non_displaced_nonformal_priv_bus + non_displaced_nonformal_school + non_displaced_nonformal_self + non_displaced_nonformal_other, returnee_document_need_1_name = select_percents(filter(., displacement_status == "returnee")[["document_needs"]], 1, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "document_needs"), returnee_document_need_1_pct = select_percents(filter(., displacement_status == "returnee")[["document_needs"]], 1, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "document_needs"), returnee_document_need_2_name = select_percents(filter(., displacement_status == "returnee")[["document_needs"]], 2, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "document_needs"), returnee_document_need_2_pct = select_percents(filter(., displacement_status == "returnee")[["document_needs"]], 2, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "document_needs"), returnee_document_need_3_name = select_percents(filter(., displacement_status == "returnee")[["document_needs"]], 3, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "document_needs"), returnee_document_need_3_pct = select_percents(filter(., displacement_status == "returnee")[["document_needs"]], 3, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "document_needs"), idp_document_need_1_name = select_percents(filter(., displacement_status == "idp")[["document_needs"]], 1, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "document_needs"), idp_document_need_1_pct = select_percents(filter(., displacement_status == "idp")[["document_needs"]], 1, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "document_needs"), idp_document_need_2_name = select_percents(filter(., displacement_status == "idp")[["document_needs"]], 2, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "document_needs"), idp_document_need_2_pct = select_percents(filter(., displacement_status == "idp")[["document_needs"]], 2, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "document_needs"), idp_document_need_3_name = select_percents(filter(., displacement_status == "idp")[["document_needs"]], 3, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "document_needs"), idp_document_need_3_pct = select_percents(filter(., displacement_status == "idp")[["document_needs"]], 3, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "document_needs"), non_displaced_document_need_1_name = select_percents(filter(., displacement_status == "non_displaced")[["document_needs"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "document_needs"), non_displaced_document_need_1_pct = select_percents(filter(., displacement_status == "non_displaced")[["document_needs"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "document_needs"), non_displaced_document_need_2_name = select_percents(filter(., displacement_status == "non_displaced")[["document_needs"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "document_needs"), non_displaced_document_need_2_pct = select_percents(filter(., displacement_status == "non_displaced")[["document_needs"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "document_needs"), non_displaced_document_need_3_name = select_percents(filter(., displacement_status == "non_displaced")[["document_needs"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "document_needs"), non_displaced_document_need_3_pct = select_percents(filter(., displacement_status == "non_displaced")[["document_needs"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "document_needs"), returnee_doc_loss_impact_1_name = select_percents(filter(., displacement_status == "returnee")[["losing_legal_docs_impact"]], 1, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "losing_legal_docs_impact"), returnee_doc_loss_impact_1_pct = select_percents(filter(., displacement_status == "returnee")[["losing_legal_docs_impact"]], 1, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), returnee_doc_loss_impact_2_name = select_percents(filter(., displacement_status == "returnee")[["losing_legal_docs_impact"]], 2, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "losing_legal_docs_impact"), returnee_doc_loss_impact_2_pct = select_percents(filter(., displacement_status == "returnee")[["losing_legal_docs_impact"]], 2, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), returnee_doc_loss_impact_3_name = select_percents(filter(., displacement_status == "returnee")[["losing_legal_docs_impact"]], 3, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "losing_legal_docs_impact"), returnee_doc_loss_impact_3_pct = select_percents(filter(., displacement_status == "returnee")[["losing_legal_docs_impact"]], 3, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), idp_doc_loss_impact_1_name = select_percents(filter(., displacement_status == "idp")[["losing_legal_docs_impact"]], 1, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "losing_legal_docs_impact"), idp_doc_loss_impact_1_pct = select_percents(filter(., displacement_status == "idp")[["losing_legal_docs_impact"]], 1, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), idp_doc_loss_impact_2_name = select_percents(filter(., displacement_status == "idp")[["losing_legal_docs_impact"]], 2, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "losing_legal_docs_impact"), idp_doc_loss_impact_2_pct = select_percents(filter(., displacement_status == "idp")[["losing_legal_docs_impact"]], 2, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), idp_doc_loss_impact_3_name = select_percents(filter(., displacement_status == "idp")[["losing_legal_docs_impact"]], 3, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "losing_legal_docs_impact"), idp_doc_loss_impact_3_pct = select_percents(filter(., displacement_status == "idp")[["losing_legal_docs_impact"]], 3, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), non_displaced_doc_loss_impact_1_name = select_percents(filter(., displacement_status == "non_displaced")[["losing_legal_docs_impact"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "losing_legal_docs_impact"), non_displaced_doc_loss_impact_1_pct = select_percents(filter(., displacement_status == "non_displaced")[["losing_legal_docs_impact"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), non_displaced_doc_loss_impact_2_name = select_percents(filter(., displacement_status == "non_displaced")[["losing_legal_docs_impact"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "losing_legal_docs_impact"), non_displaced_doc_loss_impact_2_pct = select_percents(filter(., displacement_status == "non_displaced")[["losing_legal_docs_impact"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), non_displaced_doc_loss_impact_3_name = select_percents(filter(., displacement_status == "non_displaced")[["losing_legal_docs_impact"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "losing_legal_docs_impact"), non_displaced_doc_loss_impact_3_pct = select_percents(filter(., displacement_status == "non_displaced")[["losing_legal_docs_impact"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "losing_legal_docs_impact"), aware_explosives = percent_response(presence_explosive_hazards, ., "yes"), explosive_action_1_name = select_percents(presence_explosive_hazards_actions, 1, ., questions, choices, "label"), explosive_action_1_pct = select_percents(presence_explosive_hazards_actions, 1, ., questions, choices, "percent"), explosive_action_2_name = select_percents(presence_explosive_hazards_actions, 2, ., questions, choices, "label"), explosive_action_2_pct = select_percents(presence_explosive_hazards_actions, 2, ., questions, choices, "percent"), explosive_action_3_name = select_percents(presence_explosive_hazards_actions, 3, ., questions, choices, "label"), explosive_action_3_pct = select_percents(presence_explosive_hazards_actions, 3, ., questions, choices, "percent"), explosive_action_3_other_pct = 100 - explosive_action_1_pct - explosive_action_2_pct - explosive_action_3_pct, no_explosive_awareness = percent_response(explosive_hazards_risk_awareness, ., "no"), member_killed_injured = percent_response(explosive_hazards_injured, ., "yes_adult_injured", "yes_adult_killes", "yes_children_injured", "yes_children_killed"), missing_members = percent_response(missing_family_members, ., "yes"), movement_restrictions = percent_response(movement_restrictions, ., "yes"), returnee_priority_need_1_name = select_percents(filter(., displacement_status == "returnee")[["priority_needs"]], 1, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "priority_needs"), returnee_priority_need_1_pct = select_percents(filter(., displacement_status == "returnee")[["priority_needs"]], 2, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "priority_needs"), returnee_priority_need_2_name = select_percents(filter(., displacement_status == "returnee")[["priority_needs"]], 1, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "priority_needs"), returnee_priority_need_2_pct = select_percents(filter(., displacement_status == "returnee")[["priority_needs"]], 2, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "priority_needs"), returnee_priority_need_3_name = select_percents(filter(., displacement_status == "returnee")[["priority_needs"]], 3, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "priority_needs"), returnee_priority_need_3_pct = select_percents(filter(., displacement_status == "returnee")[["priority_needs"]], 3, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "priority_needs"), idp_priority_need_1_name = select_percents(filter(., displacement_status == "idp")[["priority_needs"]], 1, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "priority_needs"), idp_priority_need_1_pct = select_percents(filter(., displacement_status == "idp")[["priority_needs"]], 2, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "priority_needs"), idp_priority_need_2_name = select_percents(filter(., displacement_status == "idp")[["priority_needs"]], 1, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "priority_needs"), idp_priority_need_2_pct = select_percents(filter(., displacement_status == "idp")[["priority_needs"]], 2, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "priority_needs"), idp_priority_need_3_name = select_percents(filter(., displacement_status == "idp")[["priority_needs"]], 3, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "priority_needs"), idp_priority_need_3_pct = select_percents(filter(., displacement_status == "idp")[["priority_needs"]], 3, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "priority_needs"), non_displaced_priority_need_1_name = select_percents(filter(., displacement_status == "non_displaced")[["priority_needs"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "priority_needs"), non_displaced_priority_need_1_pct = select_percents(filter(., displacement_status == "non_displaced")[["priority_needs"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "priority_needs"), non_displaced_priority_need_2_name = select_percents(filter(., displacement_status == "non_displaced")[["priority_needs"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "priority_needs"), non_displaced_priority_need_2_pct = select_percents(filter(., displacement_status == "non_displaced")[["priority_needs"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "priority_needs"), non_displaced_priority_need_3_name = select_percents(filter(., displacement_status == "non_displaced")[["priority_needs"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "priority_needs"), non_displaced_priority_need_3_pct = select_percents(filter(., displacement_status == "non_displaced")[["priority_needs"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "priority_needs"), received_assistance = percent_response(received_humanitarian_assistance, ., "yes"), rec_ass_type_1_name = select_percents(modality_assistance_received, 1, ., questions, choices, "label"), rec_ass_type_1_pct = select_percents(modality_assistance_received, 1, ., questions, choices, "percent"), rec_ass_type_2_name = select_percents(modality_assistance_received, 2, ., questions, choices, "label"), rec_ass_type_2_pct = select_percents(modality_assistance_received, 2, ., questions, choices, "percent"), rec_ass_type_3_name = select_percents(modality_assistance_received, 3, ., questions, choices, "label"), rec_ass_type_3_pct = select_percents(modality_assistance_received, 3, ., questions, choices, "percent"), rec_ass_type_4_name = select_percents(modality_assistance_received, 4, ., questions, choices, "label"), rec_ass_type_4_pct = select_percents(modality_assistance_received, 4, ., questions, choices, "percent"), rec_ass_type_5_name = select_percents(modality_assistance_received, 5, ., questions, choices, "label"), rec_ass_type_5_pct = select_percents(modality_assistance_received, 5, ., questions, choices, "percent"), assistance_satisfied = percent_response(humanitarian_assistance_satisfaction, ., "yes"), returnee_ass_barrier_1_name = select_percents(filter(., displacement_status == "returnee")[["humanitarian_assistance_barriers"]], 1, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), returnee_ass_barrier_1_pct = select_percents(filter(., displacement_status == "returnee")[["humanitarian_assistance_barriers"]], 1, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), returnee_ass_barrier_2_name = select_percents(filter(., displacement_status == "returnee")[["humanitarian_assistance_barriers"]], 2, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), returnee_ass_barrier_2_pct = select_percents(filter(., displacement_status == "returnee")[["humanitarian_assistance_barriers"]], 2, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), returnee_ass_barrier_3_name = select_percents(filter(., displacement_status == "returnee")[["humanitarian_assistance_barriers"]], 3, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), returnee_ass_barrier_3_pct = select_percents(filter(., displacement_status == "returnee")[["humanitarian_assistance_barriers"]], 3, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), idp_ass_barrier_1_name = select_percents(filter(., displacement_status == "idp")[["humanitarian_assistance_barriers"]], 1, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), idp_ass_barrier_1_pct = select_percents(filter(., displacement_status == "idp")[["humanitarian_assistance_barriers"]], 1, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), idp_ass_barrier_2_name = select_percents(filter(., displacement_status == "idp")[["humanitarian_assistance_barriers"]], 2, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), idp_ass_barrier_2_pct = select_percents(filter(., displacement_status == "idp")[["humanitarian_assistance_barriers"]], 2, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), idp_ass_barrier_3_name = select_percents(filter(., displacement_status == "idp")[["humanitarian_assistance_barriers"]], 3, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), idp_ass_barrier_3_pct = select_percents(filter(., displacement_status == "idp")[["humanitarian_assistance_barriers"]], 3, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), non_displaced_ass_barrier_1_name = select_percents(filter(., displacement_status == "non_displaced")[["humanitarian_assistance_barriers"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), non_displaced_ass_barrier_1_pct = select_percents(filter(., displacement_status == "non_displaced")[["humanitarian_assistance_barriers"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), non_displaced_ass_barrier_2_name = select_percents(filter(., displacement_status == "non_displaced")[["humanitarian_assistance_barriers"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), non_displaced_ass_barrier_2_pct = select_percents(filter(., displacement_status == "non_displaced")[["humanitarian_assistance_barriers"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), non_displaced_ass_barrier_3_name = select_percents(filter(., displacement_status == "non_displaced")[["humanitarian_assistance_barriers"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "humanitarian_assistance_barriers"), non_displaced_ass_barrier_3_pct = select_percents(filter(., displacement_status == "non_displaced")[["humanitarian_assistance_barriers"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "humanitarian_assistance_barriers"), returnee_prefer_ass_1_name = select_percents(filter(., displacement_status == "returnee")[["preferred_assistance_modality"]], 1, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "preferred_assistance_modality"), returnee_prefer_ass_1_pct = select_percents(filter(., displacement_status == "returnee")[["preferred_assistance_modality"]], 1, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "preferred_assistance_modality"), returnee_prefer_ass_2_name = select_percents(filter(., displacement_status == "returnee")[["preferred_assistance_modality"]], 2, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "preferred_assistance_modality"), returnee_prefer_ass_2_pct = select_percents(filter(., displacement_status == "returnee")[["preferred_assistance_modality"]], 2, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "preferred_assistance_modality"), returnee_prefer_ass_3_name = select_percents(filter(., displacement_status == "returnee")[["preferred_assistance_modality"]], 3, filter(., displacement_status == "returnee"), questions, choices, "label", x_name = "preferred_assistance_modality"), returnee_prefer_ass_3_pct = select_percents(filter(., displacement_status == "returnee")[["preferred_assistance_modality"]], 3, filter(., displacement_status == "returnee"), questions, choices, "percent", x_name = "preferred_assistance_modality"), idp_prefer_ass_1_name = select_percents(filter(., displacement_status == "idp")[["preferred_assistance_modality"]], 1, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "preferred_assistance_modality"), idp_prefer_ass_1_pct = select_percents(filter(., displacement_status == "idp")[["preferred_assistance_modality"]], 1, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "preferred_assistance_modality"), idp_prefer_ass_2_name = select_percents(filter(., displacement_status == "idp")[["preferred_assistance_modality"]], 2, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "preferred_assistance_modality"), idp_prefer_ass_2_pct = select_percents(filter(., displacement_status == "idp")[["preferred_assistance_modality"]], 2, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "preferred_assistance_modality"), idp_prefer_ass_3_name = select_percents(filter(., displacement_status == "idp")[["preferred_assistance_modality"]], 3, filter(., displacement_status == "idp"), questions, choices, "label", x_name = "preferred_assistance_modality"), idp_prefer_ass_3_pct = select_percents(filter(., displacement_status == "idp")[["preferred_assistance_modality"]], 3, filter(., displacement_status == "idp"), questions, choices, "percent", x_name = "preferred_assistance_modality"), non_displaced_prefer_ass_1_name = select_percents(filter(., displacement_status == "non_displaced")[["preferred_assistance_modality"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "preferred_assistance_modality"), non_displaced_prefer_ass_1_pct = select_percents(filter(., displacement_status == "non_displaced")[["preferred_assistance_modality"]], 1, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "preferred_assistance_modality"), non_displaced_prefer_ass_2_name = select_percents(filter(., displacement_status == "non_displaced")[["preferred_assistance_modality"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "preferred_assistance_modality"), non_displaced_prefer_ass_2_pct = select_percents(filter(., displacement_status == "non_displaced")[["preferred_assistance_modality"]], 2, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "preferred_assistance_modality"), non_displaced_prefer_ass_3_name = select_percents(filter(., displacement_status == "non_displaced")[["preferred_assistance_modality"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "label", x_name = "preferred_assistance_modality"), non_displaced_prefer_ass_3_pct = select_percents(filter(., displacement_status == "non_displaced")[["preferred_assistance_modality"]], 3, filter(., displacement_status == "non_displaced"), questions, choices, "percent", x_name = "preferred_assistance_modality")) %>% mutate(returnee_adult_perm_job = round(100 * returnee_adult_perm_job / returnee_adults, 1), returnee_adult_temp_job = round(100 * returnee_adult_temp_job / returnee_adults, 1), returnee_adult_daily_labour = round(100 * returnee_adult_daily_labour / returnee_adults, 1), returnee_adult_gvt_payroll = round(100 * returnee_adult_gvt_payroll / returnee_adults, 1), non_displaced_adult_perm_job = round(100 * non_displaced_adult_perm_job / non_displaced_adults, 1), non_displaced_adult_temp_job = round(100 * non_displaced_adult_temp_job / non_displaced_adults, 1), non_displaced_adult_daily_labour = round(100 * non_displaced_adult_daily_labour / non_displaced_adults, 1), non_displaced_adult_gvt_payroll = round(100 * non_displaced_adult_gvt_payroll / non_displaced_adults, 1), idp_adult_perm_job = round(100 * idp_adult_perm_job / idp_adults, 1), idp_adult_temp_job = round(100 * idp_adult_temp_job / idp_adults, 1), idp_adult_daily_labour = round(100 * idp_adult_daily_labour / idp_adults, 1), idp_adult_gvt_payroll = round(100 * idp_adult_gvt_payroll / idp_adults, 1), returnee_minor_perm_job = round(100 * returnee_minor_perm_job / returnee_minors, 1), returnee_minor_temp_job = round(100 * returnee_minor_temp_job / returnee_minors, 1), returnee_minor_daily_labour = round(100 * returnee_minor_daily_labour / returnee_minors, 1), idp_minor_perm_job = round(100 * idp_minor_perm_job / idp_minors, 1), idp_minor_temp_job = round(100 * idp_minor_temp_job / idp_minors, 1), idp_minor_daily_labour = round(100 * idp_minor_daily_labour / idp_minors, 1), non_displaced_minor_perm_job = round(100 * non_displaced_minor_perm_job / non_displaced_minors, 1), non_displaced_minor_temp_job = round(100 * non_displaced_minor_temp_job / non_displaced_minors, 1), non_displaced_minor_daily_labour = round(100 * non_displaced_minor_daily_labour / non_displaced_minors, 1)) %>% select(-returnee_adults, -idp_adults, -non_displaced_adults, -returnee_minors, -idp_minors, -non_displaced_minors, -total_assistance, -returnee_child_male, -returnee_enrolled_child_male, -returnee_attending_child_male, -returnee_youth_male, -returnee_enrolled_youth_male, -returnee_attending_youth_male, -returnee_child_female, -returnee_enrolled_child_female, -returnee_attending_child_female, -returnee_youth_female, -returnee_enrolled_youth_female, -returnee_attending_youth_female, -idp_child_male, -idp_enrolled_child_male, -idp_attending_child_male, -idp_youth_male, -idp_enrolled_youth_male, -idp_attending_youth_male, -idp_child_female, -idp_enrolled_child_female, -idp_attending_child_female, -idp_youth_female, -idp_enrolled_youth_female, -idp_attending_youth_female, -non_displaced_child_male, -non_displaced_enrolled_child_male, -non_displaced_attending_child_male, -non_displaced_youth_male, -non_displaced_enrolled_youth_male, -non_displaced_attending_youth_male, -non_displaced_child_female, -non_displaced_enrolled_child_female, -non_displaced_attending_child_female, -non_displaced_youth_female, -non_displaced_enrolled_youth_female, -non_displaced_attending_youth_female, -total_people, -total_working_male, -total_working_female) # we will analyze mantika data for the main FS separately, then merge them mantika_shelter_damage <- dm_data %>% group_by(mantika_label) %>% summarize(no_shelter_damage = percent_response(shelter_condition, ., "no_negligible_damage", group = !!get_group(.)), light_shelter_damage = percent_response(shelter_condition, ., "light_damage", group = !!get_group(.)), medium_shelter_damage = percent_response(shelter_condition, ., "medium_damage", group = !!get_group(.)), heavy_shelter_damage = percent_response(shelter_condition, ., "heavy_damage", group = !!get_group(.)), shelter_destroyed = percent_response(shelter_condition, ., "shelter_destroyed", group = !!get_group(.))) %>% big_spread(mantika_label, c(no_shelter_damage, light_shelter_damage, medium_shelter_damage, heavy_shelter_damage, shelter_destroyed)) %>% rename_all(tolower) %>% rename_all(str_replace_all, " ", "_") mantika_public_network <- dm_data %>% group_by(mantika_label) %>% summarize(power_cuts = weighted_median(power_cuts, ., group = !!get_group(.))) %>% spread(mantika_label, power_cuts) %>% rename_all(tolower) %>% rename_all(str_replace_all, " ", "_") %>% rename_all(paste0, "_power_cuts") mantika_water_distance <- dm_data %>% group_by(mantika_label) %>% summarize(water_dist_100m = percent_response(distance_drinkingwater, ., "upto_100m", group = !!get_group(.)), water_dist_200m = percent_response(distance_drinkingwater, ., "between_101_200m", group = !!get_group(.)), water_dist_300m = percent_response(distance_drinkingwater, ., "between_201_300m", group = !!get_group(.)), water_dist_400m = percent_response(distance_drinkingwater, ., "between_301_400m", group = !!get_group(.)), water_dist_500m = percent_response(distance_drinkingwater, ., "between_401_500m", group = !!get_group(.)), water_dist_more_500m = percent_response(distance_drinkingwater, ., "more_500m", group = !!get_group(.))) %>% big_spread(mantika_label, c(water_dist_100m, water_dist_200m, water_dist_300m, water_dist_400m, water_dist_500m, water_dist_more_500m)) %>% rename_all(tolower) %>% rename_all(str_replace_all, " ", "_") mantika_explosives_awareness <- dm_data %>% group_by(mantika_label) %>% summarize(aware_explosives = percent_response(presence_explosive_hazards, ., "yes", group = !!get_group(.))) %>% spread(mantika_label, aware_explosives) %>% rename_all(tolower) %>% rename_all(str_replace_all, " ", "_") %>% rename_all(paste0, "_aware_explosives") mantika_water_insufficient <- dm_data %>% group_by(mantika_label) %>% summarize(sufficient_water = percent_response(unsufficient_quantity_water, ., "yes", group = !!get_group(.))) %>% arrange(desc(sufficient_water)) %>% slice(1:5) %>% t mantika_water_insufficient_row <- tibble( insufficient_water_mantika_1_name = mantika_water_insufficient[1,1], insufficient_water_mantika_1_pct = mantika_water_insufficient[2,1], insufficient_water_mantika_2_name = mantika_water_insufficient[1,2], insufficient_water_mantika_2_pct = mantika_water_insufficient[2,2], insufficient_water_mantika_3_name = mantika_water_insufficient[1,3], insufficient_water_mantika_3_pct = mantika_water_insufficient[2,3], insufficient_water_mantika_4_name = mantika_water_insufficient[1,4], insufficient_water_mantika_4_pct = mantika_water_insufficient[2,4], insufficient_water_mantika_5_name = mantika_water_insufficient[1,5], insufficient_water_mantika_5_pct = mantika_water_insufficient[2,5] ) mantika_movement_restrictions <- dm_data %>% group_by(mantika_label) %>% summarize(movement_restrictions = percent_response(movement_restrictions, ., "yes", group = !!get_group(.))) %>% arrange(desc(movement_restrictions)) %>% slice(1:3) %>% t mantika_movement_restrictions_row <- tibble( movement_restriction_mantika_1_name = mantika_movement_restrictions[1,1], movement_restriction_mantika_1_pct = mantika_movement_restrictions[2,1], movement_restriction_mantika_2_name = mantika_movement_restrictions[1,2], movement_restriction_mantika_2_pct = mantika_movement_restrictions[2,2], movement_restriction_mantika_3_name = mantika_movement_restrictions[1,3], movement_restriction_mantika_3_pct = mantika_movement_restrictions[2,3] ) ## now need to merge this data with the other mantika data dm_file <- bind_cols(dm_file, mantika_shelter_damage, mantika_public_network, mantika_water_distance, mantika_explosives_awareness, mantika_water_insufficient_row, mantika_movement_restrictions_row) write_csv(dm_file, "output/sector_data_merge.csv")
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=-=-=-=-=-=-=-=-=-=-=-=-= # Article: # Integrative meta-analysis of epigenome-wide association studies # identifies genomic and # epigenomics differences in the brain and the blood in Alzheimer’s disease #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=-=-=-=-=-=-=-=-=-=-=-=-= # Authors: # - Tiago C. silva # - Lily Wang #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=-=-=-=-=-=-=-=-=-=-=-=-= # Date: 12 July 2021 #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=-=-=-=-=-=-=-=-=-=-=-=-= library(readr) library(MethReg) library(readxl) library(ReMapEnrich) library(writexl) #----------------------------------------------------------------------------- # MethReg analysis # target gene ~ TF_activity (dorothea) + CpG + CpG * TF #----------------------------------------------------------------------------- path.mathReg <- "analysis_results/methReg/Blood" path.mathReg.plot <- file.path(path.mathReg, "plots/") for(p in grep("dir",ls(),value = T)) dir.create(get(p),recursive = TRUE,showWarnings = FALSE) #----------------------------------------------------------------------------- # Select cpgs #----------------------------------------------------------------------------- # CpGs with P<1E- 5 in AD vs. CN comparison AD_vs_CN <- readxl::read_xlsx( "DRAFT-TABLES_FIGURES_4-17-2021/_Supp Table 2 final_AD_vs_CN-selcted-columns-formatted-V2.xlsx",skip = 3 ) cpgs.ad.cn <- AD_vs_CN$cpg length(cpgs.ad.cn) # 50 cpgs.prioritized <- readxl::read_xlsx( "DRAFT-TABLES_FIGURES_4-17-2021/_Supp Table 3 prioritized-CpGs-crossTissue_brain_blood.xlsx",skip = 3 ) cpgs.prioritized <- cpgs.prioritized$CpG %>% na.omit() %>% as.character length(cpgs.prioritized) cpgs.all <- c( cpgs.prioritized, cpgs.ad.cn ) %>% unique # need to add TF activity load("datasets/Aux/ADNI_matched_rna_dnam_residuals.rda") #------------------------------------------------------------------------------- # Analysis #------------------------------------------------------------------------------- # Get triplets using remap dir.base <- "." dir.data.aux <- file.path(dir.base,"datasets/Aux/") remapCatalog2018hg19 <- downloadRemapCatalog(dir.data.aux, assembly = "hg19") remapCatalog <- bedToGranges(remapCatalog2018hg19) #------------------------------------------------------------------------------- # Aux functions #------------------------------------------------------------------------------- update_met_IQR <- function(results, dnam){ iqr <- calculate_IQR(dnam) results$met.IQR <- iqr$IQR[match(results$regionID, iqr$ID)] results <- results %>% dplyr::relocate(dplyr::contains("IQR"), .after = last_col()) return(results) } add_annot_cpgs <- function(results){ results <- cbind( results, AD_vs_CN[ match(results$probeID,AD_vs_CN$cpg), c("Islands.UCSC.Relation_to_Island","UCSC_RefGene_Name","UCSC_RefGene_Group","GREAT_annotation","E073_15_coreMarks_segments_state","sig.in.brain") ] ) results } add_percent_zero_q1_q4 <- function(results, dnam, exp){ aux <- plyr::adply( unique(results[,c("probeID","target")]), .margins = 1, .fun = function(row) { rna.target <- exp[rownames(exp) == row$target, , drop = FALSE] met <- dnam[rownames(dnam) == as.character(row$probeID), ] data <- data.frame( rna.target = rna.target %>% as.numeric, met = met %>% as.numeric ) quant.met <- quantile(data$met,na.rm = TRUE) low.cutoff <- quant.met[2] upper.cutoff <- quant.met[4] data.high.low <- data %>% filter(.data$met <= low.cutoff | .data$met >= upper.cutoff) data.high.low$metGrp <- ifelse(data.high.low$met <= low.cutoff, 0, 1) pct.zeros.in.quant.samples <- sum( data.high.low$rna.target == 0, na.rm = TRUE) / nrow(data.high.low) data.frame("% of 0 target genes (Q1 and Q4)" = paste0(round(pct.zeros.in.quant.samples * 100,digits = 2)," %")) } ) results$`% of 0 residual target genes (Q1 and Q4)` <- results$`% of 0 target genes (Q1 and Q4)` results$`% of 0 target genes (Q1 and Q4)` <- aux$X..of.0.target.genes..Q1.and.Q4.[ match(paste0(results$probeID,results$target),paste0(aux$probeID,aux$target)) ] return(results) } #------------------------------------------------------------------------------- # Promoter analysis, triplets using remap #------------------------------------------------------------------------------- EPIC.hg19 <- MethReg:::get_met_probes_info(genome = "hg19", arrayType = "EPIC") triplet.promoter.ewas <- create_triplet_distance_based( region = EPIC.hg19[rownames(residuals.matched.met),], genome = "hg19", target.method = "genes.promoter.overlap", TF.peaks.gr = remapCatalog, motif.search.window.size = 500 ) triplet.promoter.ewas <- triplet.promoter.ewas[!is.na(triplet.promoter.ewas$TF),] nrow(triplet.promoter.ewas) # 4642 triplets triplet.promoter.ewas <- triplet.promoter.ewas %>% dplyr::filter(.data$TF %in% rownames(rnaseq.tf.es)) triplet.promoter.ewas <- triplet.promoter.ewas %>% dplyr::filter(.data$target %in% rownames(residuals.matched.exp)) nrow(triplet.promoter.ewas) # 1995 triplets triplet.promoter.ewas$probeID <- names(EPIC.hg19)[match(triplet.promoter.ewas$regionID,make_names_from_granges(EPIC.hg19))] triplet.promoter.ewas$TF %>% unique %>% length # 239 triplet.promoter.ewas$regionID %>% unique %>% length # 36 triplet.promoter.ewas$target %>% unique %>% length # 31 file.promoter <- file.path(path.mathReg, "promoter/ADNI_and_remap_promoter_analysis_using_TF_es_dorothea_all_triplet.csv") dir.create(dirname(file.promoter),recursive = TRUE,showWarnings = FALSE) dnam <- MethReg:::map_probes_to_regions(residuals.matched.met,genome = "hg19",arrayType = "EPIC") cores <- 1 results.promoter.analysis <- triplet.promoter.ewas %>% cor_tf_target_gene( exp = residuals.matched.exp, tf.activity.es = rnaseq.tf.es, cores = cores ) %>% cor_dnam_target_gene( dnam = dnam, exp = residuals.matched.exp, cores = cores, filter.results = FALSE, min.cor.estimate = 0.2, min.cor.pval = 0.05 ) %>% interaction_model( dnam = dnam, exp = residuals.matched.exp, tf.activity.es = rnaseq.tf.es, cores = cores, filter.correlated.tf.exp.dnam = FALSE, filter.triplet.by.sig.term = FALSE, sig.threshold = 0.05, fdr = TRUE, stage.wise.analysis = TRUE ) %>% stratified_model( dnam = dnam, exp = residuals.matched.exp, tf.activity.es = rnaseq.tf.es, cores = cores ) results.promoter.analysis <- results.promoter.analysis %>% add_annot_cpgs() %>% add_percent_zero_q1_q4(dnam = residuals.matched.met, exp = residuals.matched.exp) %>% update_met_IQR(dnam = residuals.matched.met) results.promoter.analysis$RLM_TF_fdr <- p.adjust(results.promoter.analysis$RLM_TF_pvalue,method = "fdr") results.promoter.analysis$RLM_DNAmGroup_fdr <- p.adjust(results.promoter.analysis$RLM_DNAmGroup_pvalue,method = "fdr") results.promoter.analysis$`RLM_DNAmGroup:TF_fdr` <- p.adjust(results.promoter.analysis$`RLM_DNAmGroup:TF_pvalue`,method = "fdr") readr:::write_csv( x = results.promoter.analysis, file = file.promoter ) results.promoter.analysis.sig.fdr.int <- results.promoter.analysis %>% dplyr::filter(`RLM_DNAmGroup:TF_fdr` < 0.05) readr:::write_csv( x = results.promoter.analysis.sig.fdr.int, file = gsub("all_triplet","sig_fdr_int_triplet",file.promoter) ) results.promoter.analysis.sig.int <- results.promoter.analysis %>% dplyr::filter(`RLM_DNAmGroup:TF_triplet_stage_wise_adj_pvalue` < 0.05) readr:::write_csv( x = results.promoter.analysis.sig.int, file = gsub("all_triplet","sig_stage_wise_fdr_int_triplet",file.promoter) ) results.promoter.analysis.sig <- results.promoter.analysis %>% filter_at(vars(contains("triplet_stage")), any_vars(. < 0.05)) readr:::write_csv( x = results.promoter.analysis.sig, file = gsub("all_triplet", "sig_any_stage_wise_triplet", file.promoter) ) #------------------------------------------------------------------------------- # Distal analysis, triplets using remap #------------------------------------------------------------------------------- triplet.distal.ewas <- create_triplet_distance_based( region = EPIC.hg19[rownames(se.selected.cpgs),], genome = "hg19", target.method = "nearby.genes", TF.peaks.gr = remapCatalog, motif.search.window.size = 500, target.rm.promoter.regions.from.distal.linking = TRUE ) triplet.distal.ewas <- triplet.distal.ewas[!is.na(triplet.distal.ewas$TF),] dim(triplet.distal.ewas) # 57730 triplets triplet.distal.ewas <- triplet.distal.ewas %>% dplyr::filter(.data$TF %in% rownames(rnaseq.tf.es)) dim(triplet.distal.ewas) # 39360 triplets triplet.distal.ewas$probeID <- names(EPIC.hg19)[match(triplet.distal.ewas$regionID,make_names_from_granges(EPIC.hg19))] triplet.distal.ewas$TF %>% unique %>% length # 265 triplet.distal.ewas$regionID %>% unique %>% length # 92 triplet.distal.ewas$target %>% unique %>% length # 862 file.distal <- file.path(path.mathReg, "distal/ADNI_and_remap_distal_analysis_using_TF_es_dorothea_all_triplet.csv") dir.create(dirname(file.distal),recursive = TRUE) cores <- 4 results.distal.analysis <- triplet.distal.ewas %>% cor_tf_target_gene( exp = residuals.matched.exp, tf.activity.es = rnaseq.tf.es, cores = cores ) %>% cor_dnam_target_gene( dnam = dnam, exp = residuals.matched.exp, cores = cores, filter.results = FALSE, min.cor.estimate = 0.2, min.cor.pval = 0.05 ) %>% interaction_model( dnam = dnam, exp = residuals.matched.exp, tf.activity.es = rnaseq.tf.es, cores = cores, filter.correlated.tf.exp.dnam = FALSE, filter.triplet.by.sig.term = FALSE, sig.threshold = 0.05, fdr = TRUE, stage.wise.analysis = TRUE ) %>% stratified_model( dnam = dnam, exp = residuals.matched.exp, tf.activity.es = rnaseq.tf.es, cores = cores ) #results.distal.analysis <- results.distal.analysis[results.distal.analysis$probeID %in% cpgs.iqr.higher.0.03,] results.distal.analysis <- results.distal.analysis %>% add_annot_cpgs() %>% add_percent_zero_q1_q4(dnam = residuals.matched.met, exp = residuals.matched.exp) %>% update_met_IQR(dnam = residuals.matched.met) results.distal.analysis$RLM_TF_fdr <- p.adjust(results.distal.analysis$RLM_TF_pvalue,method = "fdr") results.distal.analysis$RLM_DNAmGroup_fdr <- p.adjust(results.distal.analysis$RLM_DNAmGroup_pvalue,method = "fdr") results.distal.analysis$`RLM_DNAmGroup:TF_fdr` <- p.adjust(results.distal.analysis$`RLM_DNAmGroup:TF_pvalue`,method = "fdr") readr:::write_csv( x = results.distal.analysis, file = file.distal ) results.distal.analysis.sig.fdr.int <- results.distal.analysis %>% dplyr::filter(`RLM_DNAmGroup:TF_fdr` < 0.05) results.distal.analysis.sig.fdr.int <- results.distal.analysis.sig.fdr.int[order(results.distal.analysis.sig.fdr.int$`RLM_DNAmGroup:TF_fdr`),] readr:::write_csv( x = results.distal.analysis.sig.fdr.int, file = gsub("all_triplet","sig_fdr_int_triplet",file.distal) ) results.distal.analysis.sig.int <- results.distal.analysis %>% dplyr::filter(results.distal.analysis$`RLM_DNAmGroup:TF_triplet_stage_wise_adj_pvalue` < 0.05) readr:::write_csv( x = results.distal.analysis.sig.int, file = gsub("all_triplet","sig_stage_wise_fdr_int_triplet",file.distal) ) results.distal.analysis.sig <- results.distal.analysis %>% filter_at(vars(contains("triplet_stage")), any_vars(. < 0.05)) readr:::write_csv( x = results.distal.analysis.sig, file = gsub("all_triplet", "sig_any_stage_wise_triplet", file.distal) ) #------------------------------------------------------------------------------------------- # Save table #------------------------------------------------------------------------------------------ writexl::write_xlsx( list( "Methreg_promoter.analysis_all" = results.promoter.analysis, "Methreg_distal.analysis_all" = results.distal.analysis ), path = file.path(path.mathReg,"cpg/MethReg_distal_promoter.xlsx") ) #------------------------------------------------------------------------------------------- # Plot triplets #------------------------------------------------------------------------------------------ triplets <- plyr::rbind.fill(results.promoter.analysis,results.distal.analysis) triplets <- triplets[which(triplets$`RLM_DNAmGroup:TF_triplet_stage_wise_adj_pvalue` < 0.05),] plots <- plot_interaction_model( triplet.results = triplets, dnam = dnam, label.dnam = "residuals", label.exp = "residuals", exp = residuals.matched.exp, tf.activity.es = rnaseq.tf.es, genome = "hg19" ) # Merge plots into one file plots.one.page <- gridExtra::marrangeGrob(plots, nrow = 1, ncol = 1) ggplot2::ggsave( filename = file.path(path.mathReg, paste0("plots/Distal_promoter_RLM_DNAmGroup_TF_triplet_stage_wise_adj_pvalue_less_than_005.pdf")), plot = plots.one.page, width = 11, height = 13 )
/code/MethReg/Blood_MethReg_cpg.R
no_license
TransBioInfoLab/AD-meta-analysis-blood
R
false
false
12,996
r
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=-=-=-=-=-=-=-=-=-=-=-=-= # Article: # Integrative meta-analysis of epigenome-wide association studies # identifies genomic and # epigenomics differences in the brain and the blood in Alzheimer’s disease #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=-=-=-=-=-=-=-=-=-=-=-=-= # Authors: # - Tiago C. silva # - Lily Wang #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=-=-=-=-=-=-=-=-=-=-=-=-= # Date: 12 July 2021 #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=-=-=-=-=-=-=-=-=-=-=-=-= library(readr) library(MethReg) library(readxl) library(ReMapEnrich) library(writexl) #----------------------------------------------------------------------------- # MethReg analysis # target gene ~ TF_activity (dorothea) + CpG + CpG * TF #----------------------------------------------------------------------------- path.mathReg <- "analysis_results/methReg/Blood" path.mathReg.plot <- file.path(path.mathReg, "plots/") for(p in grep("dir",ls(),value = T)) dir.create(get(p),recursive = TRUE,showWarnings = FALSE) #----------------------------------------------------------------------------- # Select cpgs #----------------------------------------------------------------------------- # CpGs with P<1E- 5 in AD vs. CN comparison AD_vs_CN <- readxl::read_xlsx( "DRAFT-TABLES_FIGURES_4-17-2021/_Supp Table 2 final_AD_vs_CN-selcted-columns-formatted-V2.xlsx",skip = 3 ) cpgs.ad.cn <- AD_vs_CN$cpg length(cpgs.ad.cn) # 50 cpgs.prioritized <- readxl::read_xlsx( "DRAFT-TABLES_FIGURES_4-17-2021/_Supp Table 3 prioritized-CpGs-crossTissue_brain_blood.xlsx",skip = 3 ) cpgs.prioritized <- cpgs.prioritized$CpG %>% na.omit() %>% as.character length(cpgs.prioritized) cpgs.all <- c( cpgs.prioritized, cpgs.ad.cn ) %>% unique # need to add TF activity load("datasets/Aux/ADNI_matched_rna_dnam_residuals.rda") #------------------------------------------------------------------------------- # Analysis #------------------------------------------------------------------------------- # Get triplets using remap dir.base <- "." dir.data.aux <- file.path(dir.base,"datasets/Aux/") remapCatalog2018hg19 <- downloadRemapCatalog(dir.data.aux, assembly = "hg19") remapCatalog <- bedToGranges(remapCatalog2018hg19) #------------------------------------------------------------------------------- # Aux functions #------------------------------------------------------------------------------- update_met_IQR <- function(results, dnam){ iqr <- calculate_IQR(dnam) results$met.IQR <- iqr$IQR[match(results$regionID, iqr$ID)] results <- results %>% dplyr::relocate(dplyr::contains("IQR"), .after = last_col()) return(results) } add_annot_cpgs <- function(results){ results <- cbind( results, AD_vs_CN[ match(results$probeID,AD_vs_CN$cpg), c("Islands.UCSC.Relation_to_Island","UCSC_RefGene_Name","UCSC_RefGene_Group","GREAT_annotation","E073_15_coreMarks_segments_state","sig.in.brain") ] ) results } add_percent_zero_q1_q4 <- function(results, dnam, exp){ aux <- plyr::adply( unique(results[,c("probeID","target")]), .margins = 1, .fun = function(row) { rna.target <- exp[rownames(exp) == row$target, , drop = FALSE] met <- dnam[rownames(dnam) == as.character(row$probeID), ] data <- data.frame( rna.target = rna.target %>% as.numeric, met = met %>% as.numeric ) quant.met <- quantile(data$met,na.rm = TRUE) low.cutoff <- quant.met[2] upper.cutoff <- quant.met[4] data.high.low <- data %>% filter(.data$met <= low.cutoff | .data$met >= upper.cutoff) data.high.low$metGrp <- ifelse(data.high.low$met <= low.cutoff, 0, 1) pct.zeros.in.quant.samples <- sum( data.high.low$rna.target == 0, na.rm = TRUE) / nrow(data.high.low) data.frame("% of 0 target genes (Q1 and Q4)" = paste0(round(pct.zeros.in.quant.samples * 100,digits = 2)," %")) } ) results$`% of 0 residual target genes (Q1 and Q4)` <- results$`% of 0 target genes (Q1 and Q4)` results$`% of 0 target genes (Q1 and Q4)` <- aux$X..of.0.target.genes..Q1.and.Q4.[ match(paste0(results$probeID,results$target),paste0(aux$probeID,aux$target)) ] return(results) } #------------------------------------------------------------------------------- # Promoter analysis, triplets using remap #------------------------------------------------------------------------------- EPIC.hg19 <- MethReg:::get_met_probes_info(genome = "hg19", arrayType = "EPIC") triplet.promoter.ewas <- create_triplet_distance_based( region = EPIC.hg19[rownames(residuals.matched.met),], genome = "hg19", target.method = "genes.promoter.overlap", TF.peaks.gr = remapCatalog, motif.search.window.size = 500 ) triplet.promoter.ewas <- triplet.promoter.ewas[!is.na(triplet.promoter.ewas$TF),] nrow(triplet.promoter.ewas) # 4642 triplets triplet.promoter.ewas <- triplet.promoter.ewas %>% dplyr::filter(.data$TF %in% rownames(rnaseq.tf.es)) triplet.promoter.ewas <- triplet.promoter.ewas %>% dplyr::filter(.data$target %in% rownames(residuals.matched.exp)) nrow(triplet.promoter.ewas) # 1995 triplets triplet.promoter.ewas$probeID <- names(EPIC.hg19)[match(triplet.promoter.ewas$regionID,make_names_from_granges(EPIC.hg19))] triplet.promoter.ewas$TF %>% unique %>% length # 239 triplet.promoter.ewas$regionID %>% unique %>% length # 36 triplet.promoter.ewas$target %>% unique %>% length # 31 file.promoter <- file.path(path.mathReg, "promoter/ADNI_and_remap_promoter_analysis_using_TF_es_dorothea_all_triplet.csv") dir.create(dirname(file.promoter),recursive = TRUE,showWarnings = FALSE) dnam <- MethReg:::map_probes_to_regions(residuals.matched.met,genome = "hg19",arrayType = "EPIC") cores <- 1 results.promoter.analysis <- triplet.promoter.ewas %>% cor_tf_target_gene( exp = residuals.matched.exp, tf.activity.es = rnaseq.tf.es, cores = cores ) %>% cor_dnam_target_gene( dnam = dnam, exp = residuals.matched.exp, cores = cores, filter.results = FALSE, min.cor.estimate = 0.2, min.cor.pval = 0.05 ) %>% interaction_model( dnam = dnam, exp = residuals.matched.exp, tf.activity.es = rnaseq.tf.es, cores = cores, filter.correlated.tf.exp.dnam = FALSE, filter.triplet.by.sig.term = FALSE, sig.threshold = 0.05, fdr = TRUE, stage.wise.analysis = TRUE ) %>% stratified_model( dnam = dnam, exp = residuals.matched.exp, tf.activity.es = rnaseq.tf.es, cores = cores ) results.promoter.analysis <- results.promoter.analysis %>% add_annot_cpgs() %>% add_percent_zero_q1_q4(dnam = residuals.matched.met, exp = residuals.matched.exp) %>% update_met_IQR(dnam = residuals.matched.met) results.promoter.analysis$RLM_TF_fdr <- p.adjust(results.promoter.analysis$RLM_TF_pvalue,method = "fdr") results.promoter.analysis$RLM_DNAmGroup_fdr <- p.adjust(results.promoter.analysis$RLM_DNAmGroup_pvalue,method = "fdr") results.promoter.analysis$`RLM_DNAmGroup:TF_fdr` <- p.adjust(results.promoter.analysis$`RLM_DNAmGroup:TF_pvalue`,method = "fdr") readr:::write_csv( x = results.promoter.analysis, file = file.promoter ) results.promoter.analysis.sig.fdr.int <- results.promoter.analysis %>% dplyr::filter(`RLM_DNAmGroup:TF_fdr` < 0.05) readr:::write_csv( x = results.promoter.analysis.sig.fdr.int, file = gsub("all_triplet","sig_fdr_int_triplet",file.promoter) ) results.promoter.analysis.sig.int <- results.promoter.analysis %>% dplyr::filter(`RLM_DNAmGroup:TF_triplet_stage_wise_adj_pvalue` < 0.05) readr:::write_csv( x = results.promoter.analysis.sig.int, file = gsub("all_triplet","sig_stage_wise_fdr_int_triplet",file.promoter) ) results.promoter.analysis.sig <- results.promoter.analysis %>% filter_at(vars(contains("triplet_stage")), any_vars(. < 0.05)) readr:::write_csv( x = results.promoter.analysis.sig, file = gsub("all_triplet", "sig_any_stage_wise_triplet", file.promoter) ) #------------------------------------------------------------------------------- # Distal analysis, triplets using remap #------------------------------------------------------------------------------- triplet.distal.ewas <- create_triplet_distance_based( region = EPIC.hg19[rownames(se.selected.cpgs),], genome = "hg19", target.method = "nearby.genes", TF.peaks.gr = remapCatalog, motif.search.window.size = 500, target.rm.promoter.regions.from.distal.linking = TRUE ) triplet.distal.ewas <- triplet.distal.ewas[!is.na(triplet.distal.ewas$TF),] dim(triplet.distal.ewas) # 57730 triplets triplet.distal.ewas <- triplet.distal.ewas %>% dplyr::filter(.data$TF %in% rownames(rnaseq.tf.es)) dim(triplet.distal.ewas) # 39360 triplets triplet.distal.ewas$probeID <- names(EPIC.hg19)[match(triplet.distal.ewas$regionID,make_names_from_granges(EPIC.hg19))] triplet.distal.ewas$TF %>% unique %>% length # 265 triplet.distal.ewas$regionID %>% unique %>% length # 92 triplet.distal.ewas$target %>% unique %>% length # 862 file.distal <- file.path(path.mathReg, "distal/ADNI_and_remap_distal_analysis_using_TF_es_dorothea_all_triplet.csv") dir.create(dirname(file.distal),recursive = TRUE) cores <- 4 results.distal.analysis <- triplet.distal.ewas %>% cor_tf_target_gene( exp = residuals.matched.exp, tf.activity.es = rnaseq.tf.es, cores = cores ) %>% cor_dnam_target_gene( dnam = dnam, exp = residuals.matched.exp, cores = cores, filter.results = FALSE, min.cor.estimate = 0.2, min.cor.pval = 0.05 ) %>% interaction_model( dnam = dnam, exp = residuals.matched.exp, tf.activity.es = rnaseq.tf.es, cores = cores, filter.correlated.tf.exp.dnam = FALSE, filter.triplet.by.sig.term = FALSE, sig.threshold = 0.05, fdr = TRUE, stage.wise.analysis = TRUE ) %>% stratified_model( dnam = dnam, exp = residuals.matched.exp, tf.activity.es = rnaseq.tf.es, cores = cores ) #results.distal.analysis <- results.distal.analysis[results.distal.analysis$probeID %in% cpgs.iqr.higher.0.03,] results.distal.analysis <- results.distal.analysis %>% add_annot_cpgs() %>% add_percent_zero_q1_q4(dnam = residuals.matched.met, exp = residuals.matched.exp) %>% update_met_IQR(dnam = residuals.matched.met) results.distal.analysis$RLM_TF_fdr <- p.adjust(results.distal.analysis$RLM_TF_pvalue,method = "fdr") results.distal.analysis$RLM_DNAmGroup_fdr <- p.adjust(results.distal.analysis$RLM_DNAmGroup_pvalue,method = "fdr") results.distal.analysis$`RLM_DNAmGroup:TF_fdr` <- p.adjust(results.distal.analysis$`RLM_DNAmGroup:TF_pvalue`,method = "fdr") readr:::write_csv( x = results.distal.analysis, file = file.distal ) results.distal.analysis.sig.fdr.int <- results.distal.analysis %>% dplyr::filter(`RLM_DNAmGroup:TF_fdr` < 0.05) results.distal.analysis.sig.fdr.int <- results.distal.analysis.sig.fdr.int[order(results.distal.analysis.sig.fdr.int$`RLM_DNAmGroup:TF_fdr`),] readr:::write_csv( x = results.distal.analysis.sig.fdr.int, file = gsub("all_triplet","sig_fdr_int_triplet",file.distal) ) results.distal.analysis.sig.int <- results.distal.analysis %>% dplyr::filter(results.distal.analysis$`RLM_DNAmGroup:TF_triplet_stage_wise_adj_pvalue` < 0.05) readr:::write_csv( x = results.distal.analysis.sig.int, file = gsub("all_triplet","sig_stage_wise_fdr_int_triplet",file.distal) ) results.distal.analysis.sig <- results.distal.analysis %>% filter_at(vars(contains("triplet_stage")), any_vars(. < 0.05)) readr:::write_csv( x = results.distal.analysis.sig, file = gsub("all_triplet", "sig_any_stage_wise_triplet", file.distal) ) #------------------------------------------------------------------------------------------- # Save table #------------------------------------------------------------------------------------------ writexl::write_xlsx( list( "Methreg_promoter.analysis_all" = results.promoter.analysis, "Methreg_distal.analysis_all" = results.distal.analysis ), path = file.path(path.mathReg,"cpg/MethReg_distal_promoter.xlsx") ) #------------------------------------------------------------------------------------------- # Plot triplets #------------------------------------------------------------------------------------------ triplets <- plyr::rbind.fill(results.promoter.analysis,results.distal.analysis) triplets <- triplets[which(triplets$`RLM_DNAmGroup:TF_triplet_stage_wise_adj_pvalue` < 0.05),] plots <- plot_interaction_model( triplet.results = triplets, dnam = dnam, label.dnam = "residuals", label.exp = "residuals", exp = residuals.matched.exp, tf.activity.es = rnaseq.tf.es, genome = "hg19" ) # Merge plots into one file plots.one.page <- gridExtra::marrangeGrob(plots, nrow = 1, ncol = 1) ggplot2::ggsave( filename = file.path(path.mathReg, paste0("plots/Distal_promoter_RLM_DNAmGroup_TF_triplet_stage_wise_adj_pvalue_less_than_005.pdf")), plot = plots.one.page, width = 11, height = 13 )
library(emdi) data(eusilcA_smp) data(eusilcA_pop) # Get weighted direct estimates and the corresponding variance direct_estim <- direct("eqIncome", smp_data = eusilcA_smp, smp_domains = "district", weights = "weight", var = TRUE) # Reduce the data set to mean and headcount ratio eusilcA_smpAgg <- direct_estim$ind[, 1:3] eusilcA_smpAgg <- data.frame(eusilcA_smpAgg, direct_estim$MSE[, 2:3]) names(eusilcA_smpAgg) <- c("Domain", "Mean", "Head_Count", "Var_Mean", "Var_Head_Count") # Add sample sizes for effective sample size eusilcA_smpAgg$n <- table(direct_estim$framework$smp_domains_vec) # Get covariate information from the population data pop_eqsize <- direct("eqsize", smp_data = eusilcA_pop, smp_domains = "district") pop_cash <- direct("cash", smp_data = eusilcA_pop, smp_domains = "district") pop_self_empl <- direct("self_empl", smp_data = eusilcA_pop, smp_domains = "district") pop_unempl_ben <- direct("unempl_ben", smp_data = eusilcA_pop, smp_domains = "district") pop_age_ben <- direct("age_ben", smp_data = eusilcA_pop, smp_domains = "district") pop_surv_ben <- direct("surv_ben", smp_data = eusilcA_pop, smp_domains = "district") pop_sick_ben <- direct("sick_ben", smp_data = eusilcA_pop, smp_domains = "district") pop_dis_ben <- direct("dis_ben", smp_data = eusilcA_pop, smp_domains = "district") pop_rent <- direct("rent", smp_data = eusilcA_pop, smp_domains = "district") pop_fam_allow <- direct("fam_allow", smp_data = eusilcA_pop, smp_domains = "district") pop_house_allow <- direct("house_allow", smp_data = eusilcA_pop, smp_domains = "district") pop_cap_inv <- direct("cap_inv", smp_data = eusilcA_pop, smp_domains = "district") pop_tax_adj <- direct("tax_adj", smp_data = eusilcA_pop, smp_domains = "district") eusilcA_popAgg <- data.frame(Domain = pop_eqsize$ind$Domain, eqsize = pop_eqsize$ind$Mean, cash = pop_cash$ind$Mean, self_empl = pop_self_empl$ind$Mean, unempl_ben = pop_unempl_ben$ind$Mean, age_ben = pop_age_ben$ind$Mean, surv_ben = pop_surv_ben$ind$Mean, sick_ben = pop_sick_ben$ind$Mean, dis_ben = pop_dis_ben$ind$Mean, rent = pop_rent$ind$Mean, fam_allow = pop_fam_allow$ind$Mean, house_allow = pop_house_allow$ind$Mean, cap_inv = pop_cap_inv$ind$Mean, tax_adj = pop_tax_adj$ind$Mean) save(eusilcA_smpAgg, file = "./data/eusilcA_smpAgg.rda") save(eusilcA_popAgg, file = "./data/eusilcA_popAgg.rda")
/Spielplatz/get_aggregated_data.R
no_license
akreutzmann/fayherriot
R
false
false
2,732
r
library(emdi) data(eusilcA_smp) data(eusilcA_pop) # Get weighted direct estimates and the corresponding variance direct_estim <- direct("eqIncome", smp_data = eusilcA_smp, smp_domains = "district", weights = "weight", var = TRUE) # Reduce the data set to mean and headcount ratio eusilcA_smpAgg <- direct_estim$ind[, 1:3] eusilcA_smpAgg <- data.frame(eusilcA_smpAgg, direct_estim$MSE[, 2:3]) names(eusilcA_smpAgg) <- c("Domain", "Mean", "Head_Count", "Var_Mean", "Var_Head_Count") # Add sample sizes for effective sample size eusilcA_smpAgg$n <- table(direct_estim$framework$smp_domains_vec) # Get covariate information from the population data pop_eqsize <- direct("eqsize", smp_data = eusilcA_pop, smp_domains = "district") pop_cash <- direct("cash", smp_data = eusilcA_pop, smp_domains = "district") pop_self_empl <- direct("self_empl", smp_data = eusilcA_pop, smp_domains = "district") pop_unempl_ben <- direct("unempl_ben", smp_data = eusilcA_pop, smp_domains = "district") pop_age_ben <- direct("age_ben", smp_data = eusilcA_pop, smp_domains = "district") pop_surv_ben <- direct("surv_ben", smp_data = eusilcA_pop, smp_domains = "district") pop_sick_ben <- direct("sick_ben", smp_data = eusilcA_pop, smp_domains = "district") pop_dis_ben <- direct("dis_ben", smp_data = eusilcA_pop, smp_domains = "district") pop_rent <- direct("rent", smp_data = eusilcA_pop, smp_domains = "district") pop_fam_allow <- direct("fam_allow", smp_data = eusilcA_pop, smp_domains = "district") pop_house_allow <- direct("house_allow", smp_data = eusilcA_pop, smp_domains = "district") pop_cap_inv <- direct("cap_inv", smp_data = eusilcA_pop, smp_domains = "district") pop_tax_adj <- direct("tax_adj", smp_data = eusilcA_pop, smp_domains = "district") eusilcA_popAgg <- data.frame(Domain = pop_eqsize$ind$Domain, eqsize = pop_eqsize$ind$Mean, cash = pop_cash$ind$Mean, self_empl = pop_self_empl$ind$Mean, unempl_ben = pop_unempl_ben$ind$Mean, age_ben = pop_age_ben$ind$Mean, surv_ben = pop_surv_ben$ind$Mean, sick_ben = pop_sick_ben$ind$Mean, dis_ben = pop_dis_ben$ind$Mean, rent = pop_rent$ind$Mean, fam_allow = pop_fam_allow$ind$Mean, house_allow = pop_house_allow$ind$Mean, cap_inv = pop_cap_inv$ind$Mean, tax_adj = pop_tax_adj$ind$Mean) save(eusilcA_smpAgg, file = "./data/eusilcA_smpAgg.rda") save(eusilcA_popAgg, file = "./data/eusilcA_popAgg.rda")
########################################################################## # Copyright (c) 2014 Andrew Yates # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ########################################################################## # -------------------------------------------------- # Internal debugging tools to verify function. # -------------------------------------------------- sse <- function(v) sum((v-mean(v))^2) ## compute sum squared error per step location "i" ## 1:i -> low step ## i+1:n -> high step fit.upstep <- function(v) { R <- list() v <- sort(v) n <- length(v) R$v.sse <- sapply(1:(n-1), function(i) sse(v[1:i])+sse(v[(i+1):n])) R$idx <- which.min(R$v.sse) R$sum.sse <- R$v.sse[R$idx] R$th <- (v[R$idx]+v[R$idx+1])/2 R$low <- mean(v[1:R$idx]) R$high <- mean(v[(R$idx+1):n]) R$mean <- mean(v) R$mean.idx <- tail(which(v<=mean(v)), 1) R$median <- median(v) R$median.idx <- max(floor(length(v)/2),1) R } plot.sse <- function(R, add.mean.median=FALSE, ...) { plot(R$v.sse, xlab="Highest Index In Low Step", ylab="Sum of Sum Squared Errors", ...) legend("topright",pch=c(15,16,17),col=c("#cc0000","#0000cc","#009900"),legend=c("Stepfit","Mean","Median")) abline(h=R$sum.sse, col="#cc0000", lty=1) abline(v=R$idx, col="#cc0000", lty=3) points(R$idx, R$v.sse[R$idx], col="#ff0000", pch=15, cex=3) if(add.mean.median) { # mean abline(v=R$mean.idx, col="#0000cc", lty=3) abline(h=R$v.sse[R$mean.idx], col="#0000cc", lty=1) points(R$mean.idx, R$v.sse[R$mean.idx], col="#0000cc", pch=16, cex=3) # median abline(v=R$median.idx, col="#009900", lty=3) abline(h=R$v.sse[R$median.idx], col="#009900", lty=1) points(R$median.idx, R$v.sse[R$median.idx], col="#009900", pch=17, cex=3) } } plot.stepfit <- function(R, v, add.mean.median=FALSE, ...) { v <- sort(v) plot(v, xlab="Rank", ylab="Log Expression", ...) legend("topleft",pch=c(15,16,17),col=c("#cc0000","#0000cc","#009900"),legend=c("Stepfit","Mean","Median")) abline(h=R$th, col="#cc0000", lty=2, lwd=2) abline(v=R$idx, col="#990000", lty=3) abline(h=R$high, col="#990000", lwd=1, lty=3) abline(h=R$low, col="#990000", lwd=1, lty=3) if(add.mean.median) { abline(h=R$mean, col="#0000cc", lty=2, lwd=2) #abline(v=R$mean.idx, col="#0000cc", lty=3, lwd=1) points(R$mean.idx, R$mean, col="#0000cc", pch=16, cex=3) abline(h=R$median, col="#009900", lty=2, lwd=2) #abline(v=R$median.idx, col="#009900", lty=3, lwd=1) points(R$median.idx, R$median, col="#009900", pch=17, cex=3) } segments(x0=0, y0=R$low, x1=R$idx, y1=R$low, col="#ff0000", lwd=6) segments(x0=R$idx, y0=R$low, x1=R$idx, y1=R$high, col="#ff0000", lwd=6) segments(x0=R$idx, y0=R$high, x1=length(v)+1, y1=R$high, col="#ff0000", lwd=6) points(R$idx+0.5, R$th, col="#ff0000", pch=15, cex=3) }
/gsplom.Rcheck/00_pkg_src/gsplom/R/util.R
permissive
andrewdyates/gsplom.rpackage
R
false
false
3,826
r
########################################################################## # Copyright (c) 2014 Andrew Yates # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ########################################################################## # -------------------------------------------------- # Internal debugging tools to verify function. # -------------------------------------------------- sse <- function(v) sum((v-mean(v))^2) ## compute sum squared error per step location "i" ## 1:i -> low step ## i+1:n -> high step fit.upstep <- function(v) { R <- list() v <- sort(v) n <- length(v) R$v.sse <- sapply(1:(n-1), function(i) sse(v[1:i])+sse(v[(i+1):n])) R$idx <- which.min(R$v.sse) R$sum.sse <- R$v.sse[R$idx] R$th <- (v[R$idx]+v[R$idx+1])/2 R$low <- mean(v[1:R$idx]) R$high <- mean(v[(R$idx+1):n]) R$mean <- mean(v) R$mean.idx <- tail(which(v<=mean(v)), 1) R$median <- median(v) R$median.idx <- max(floor(length(v)/2),1) R } plot.sse <- function(R, add.mean.median=FALSE, ...) { plot(R$v.sse, xlab="Highest Index In Low Step", ylab="Sum of Sum Squared Errors", ...) legend("topright",pch=c(15,16,17),col=c("#cc0000","#0000cc","#009900"),legend=c("Stepfit","Mean","Median")) abline(h=R$sum.sse, col="#cc0000", lty=1) abline(v=R$idx, col="#cc0000", lty=3) points(R$idx, R$v.sse[R$idx], col="#ff0000", pch=15, cex=3) if(add.mean.median) { # mean abline(v=R$mean.idx, col="#0000cc", lty=3) abline(h=R$v.sse[R$mean.idx], col="#0000cc", lty=1) points(R$mean.idx, R$v.sse[R$mean.idx], col="#0000cc", pch=16, cex=3) # median abline(v=R$median.idx, col="#009900", lty=3) abline(h=R$v.sse[R$median.idx], col="#009900", lty=1) points(R$median.idx, R$v.sse[R$median.idx], col="#009900", pch=17, cex=3) } } plot.stepfit <- function(R, v, add.mean.median=FALSE, ...) { v <- sort(v) plot(v, xlab="Rank", ylab="Log Expression", ...) legend("topleft",pch=c(15,16,17),col=c("#cc0000","#0000cc","#009900"),legend=c("Stepfit","Mean","Median")) abline(h=R$th, col="#cc0000", lty=2, lwd=2) abline(v=R$idx, col="#990000", lty=3) abline(h=R$high, col="#990000", lwd=1, lty=3) abline(h=R$low, col="#990000", lwd=1, lty=3) if(add.mean.median) { abline(h=R$mean, col="#0000cc", lty=2, lwd=2) #abline(v=R$mean.idx, col="#0000cc", lty=3, lwd=1) points(R$mean.idx, R$mean, col="#0000cc", pch=16, cex=3) abline(h=R$median, col="#009900", lty=2, lwd=2) #abline(v=R$median.idx, col="#009900", lty=3, lwd=1) points(R$median.idx, R$median, col="#009900", pch=17, cex=3) } segments(x0=0, y0=R$low, x1=R$idx, y1=R$low, col="#ff0000", lwd=6) segments(x0=R$idx, y0=R$low, x1=R$idx, y1=R$high, col="#ff0000", lwd=6) segments(x0=R$idx, y0=R$high, x1=length(v)+1, y1=R$high, col="#ff0000", lwd=6) points(R$idx+0.5, R$th, col="#ff0000", pch=15, cex=3) }
\name{lmer} \alias{lmer} \title{Fit Linear Mixed-Effects Models} \usage{ lmer(formula, data = NULL, REML = TRUE, sparseX = FALSE, control = list(), start = NULL, verbose = 0L, subset, weights, na.action, offset, contrasts = NULL, devFunOnly = FALSE, optimizer = "Nelder_Mead", ...) } \arguments{ \item{formula}{a two-sided linear formula object describing both the fixed-effects and fixed-effects part of the model, with the response on the left of a \code{~} operator and the terms, separated by \code{+} operators, on the right. Random-effects terms are distinguished by vertical bars (\code{"|"}) separating expressions for design matrices from grouping factors.} \item{data}{an optional data frame containing the variables named in \code{formula}. By default the variables are taken from the environment from which \code{lmer} is called. While \code{data} is optional, the package authors \emph{strongly} recommend its use, especially when later applying methods such as \code{update} and \code{drop1} to the fitted model (\emph{such methods are not guaranteed to work properly if \code{data} is omitted}). If \code{data} is omitted, variables will be taken from the environment of \code{formula} (if specified as a formula) or from the parent frame (if specified as a character vector).} \item{REML}{logical scalar - Should the estimates be chosen to optimize the REML criterion (as opposed to the log-likelihood)? Defaults to \code{TRUE}.} \item{sparseX}{logical - should a sparse model matrix be used for the fixed-effects terms? Defaults to \code{FALSE}. Currently inactive.} \item{control}{a named list of control parameters for the estimation algorithm, specifying only the ones to be changed from their default values. Hence defaults to an empty list.\cr Possible control options and their default values are: \describe{ \item{\code{msVerbose}:}{a logical value passed as the \code{trace} argument to \code{nlminb} (see documentation on that function). Default is \code{getOption("verbose")}.} \item{\code{maxIter}:}{a positive integer passed as the \code{maxIter} argument to \code{nlminb} (see documentation on that function). Default is \code{300}.} \item{\code{maxFN}:}{a positive integer specifying the maximum number of evaluations of the deviance function allowed during the optimization. Default is \code{900}.} \item{\code{tol}:}{a positive number specifying the convergence tolerance, currently only for the PWRSS iterations in \code{\link{glmer}}. Default is \code{0.000001}.} }} \item{start}{a named list of starting values for the parameters in the model. For \code{lmer} this can be a numeric vector or a list with one component named \code{"theta"}. Infrequently used.} \item{verbose}{integer scalar. If \code{> 0} verbose output is generated during the optimization of the parameter estimates. If \code{> 1} verbose output is generated during the individual PIRLS steps.} \item{subset}{an optional expression indicating the subset of the rows of \code{data} that should be used in the fit. This can be a logical vector, or a numeric vector indicating which observation numbers are to be included, or a character vector of the row names to be included. All observations are included by default.} \item{weights}{an optional vector of \sQuote{prior weights} to be used in the fitting process. Should be \code{NULL} or a numeric vector.} \item{na.action}{a function that indicates what should happen when the data contain \code{NA}s. The default action (\code{na.fail}) prints an error message and terminates if there are any incomplete observations.} \item{offset}{this can be used to specify an \emph{a priori} known component to be included in the linear predictor during fitting. This should be \code{NULL} or a numeric vector of length equal to the number of cases. One or more \code{\link{offset}} terms can be included in the formula instead or as well, and if more than one is specified their sum is used. See \code{\link{model.offset}}.} \item{contrasts}{an optional list. See the \code{contrasts.arg} of \code{model.matrix.default}.} \item{devFunOnly}{logical - return only the deviance evaluation function.} \item{optimizer}{character - name of optimizing function. The built-in optimizers are \code{\link{Nelder_Mead}} and \code{\link[minqa]{bobyqa}} (from the \pkg{minqa} package. Any minimizing function that allows box constraints can be used provided that it (1) takes input parameters \code{fn} (function to be optimized), \code{par} (starting parameter values), \code{lower} (lower bounds) and \code{control} (control parameters, passed through from the \code{control} argument) and (2) returns a list with (at least) elements \code{par} (best-fit parameters), \code{fval} (best-fit function value), \code{conv} (convergence code) and (optionally) \code{message} (informational message, or explanation of convergence failure). Special provisions are made for \code{\link{bobyqa}}, \code{\link{Nelder_Mead}}, and optimizers wrapped in the \pkg{optimx} package; to use \pkg{optimx} optimizers (including \code{L-BFGS-B} from base \code{\link{optim}} and \code{\link{nlminb}}), pass the \code{method} argument to \code{optim} in the \code{control} argument.} \item{\dots}{other potential arguments. A \code{method} argument was used in earlier versions of the package. Its functionality has been replaced by the \code{REML} argument.} } \value{ An object of class \code{"\linkS4class{merMod}"}, for which many methods are available. See there for details. } \description{ Fit a linear mixed model (LMM) } \details{ \itemize{ \item{If the \code{formula} argument is specified as a character vector, the function will attempt to coerce it to a formula. However, this is not recommended (users who want to construct formulas by pasting together components are advised to use \code{\link{as.formula}}); model fits will work but subsequent methods such as \code{\link{drop1}}, \code{\link{update}} may fail.} \item{Unlike some simpler modeling frameworks such as \code{\link{lm}} and \code{\link{glm}} which automatically detect perfectly collinear predictor variables, \code{[gn]lmer} cannot handle design matrices of less than full rank. For example, in cases of models with interactions that have unobserved combinations of levels, it is up to the user to define a new variable (for example creating \code{ab} within the data from the results of \code{droplevels(interaction(a,b))}). } } } \examples{ ## linear mixed models - reference values from older code (fm1 <- lmer(Reaction ~ Days + (Days|Subject), sleepstudy)) (fm2 <- lmer(Reaction ~ Days + (1|Subject) + (0+Days|Subject), sleepstudy)) anova(fm1, fm2) } \seealso{ The \code{\linkS4class{merMod}} class, \code{\link[stats]{lm}} } \concept{ LMM } \keyword{models}
/man/lmer.Rd
no_license
stevencarlislewalker/lme4
R
false
false
7,050
rd
\name{lmer} \alias{lmer} \title{Fit Linear Mixed-Effects Models} \usage{ lmer(formula, data = NULL, REML = TRUE, sparseX = FALSE, control = list(), start = NULL, verbose = 0L, subset, weights, na.action, offset, contrasts = NULL, devFunOnly = FALSE, optimizer = "Nelder_Mead", ...) } \arguments{ \item{formula}{a two-sided linear formula object describing both the fixed-effects and fixed-effects part of the model, with the response on the left of a \code{~} operator and the terms, separated by \code{+} operators, on the right. Random-effects terms are distinguished by vertical bars (\code{"|"}) separating expressions for design matrices from grouping factors.} \item{data}{an optional data frame containing the variables named in \code{formula}. By default the variables are taken from the environment from which \code{lmer} is called. While \code{data} is optional, the package authors \emph{strongly} recommend its use, especially when later applying methods such as \code{update} and \code{drop1} to the fitted model (\emph{such methods are not guaranteed to work properly if \code{data} is omitted}). If \code{data} is omitted, variables will be taken from the environment of \code{formula} (if specified as a formula) or from the parent frame (if specified as a character vector).} \item{REML}{logical scalar - Should the estimates be chosen to optimize the REML criterion (as opposed to the log-likelihood)? Defaults to \code{TRUE}.} \item{sparseX}{logical - should a sparse model matrix be used for the fixed-effects terms? Defaults to \code{FALSE}. Currently inactive.} \item{control}{a named list of control parameters for the estimation algorithm, specifying only the ones to be changed from their default values. Hence defaults to an empty list.\cr Possible control options and their default values are: \describe{ \item{\code{msVerbose}:}{a logical value passed as the \code{trace} argument to \code{nlminb} (see documentation on that function). Default is \code{getOption("verbose")}.} \item{\code{maxIter}:}{a positive integer passed as the \code{maxIter} argument to \code{nlminb} (see documentation on that function). Default is \code{300}.} \item{\code{maxFN}:}{a positive integer specifying the maximum number of evaluations of the deviance function allowed during the optimization. Default is \code{900}.} \item{\code{tol}:}{a positive number specifying the convergence tolerance, currently only for the PWRSS iterations in \code{\link{glmer}}. Default is \code{0.000001}.} }} \item{start}{a named list of starting values for the parameters in the model. For \code{lmer} this can be a numeric vector or a list with one component named \code{"theta"}. Infrequently used.} \item{verbose}{integer scalar. If \code{> 0} verbose output is generated during the optimization of the parameter estimates. If \code{> 1} verbose output is generated during the individual PIRLS steps.} \item{subset}{an optional expression indicating the subset of the rows of \code{data} that should be used in the fit. This can be a logical vector, or a numeric vector indicating which observation numbers are to be included, or a character vector of the row names to be included. All observations are included by default.} \item{weights}{an optional vector of \sQuote{prior weights} to be used in the fitting process. Should be \code{NULL} or a numeric vector.} \item{na.action}{a function that indicates what should happen when the data contain \code{NA}s. The default action (\code{na.fail}) prints an error message and terminates if there are any incomplete observations.} \item{offset}{this can be used to specify an \emph{a priori} known component to be included in the linear predictor during fitting. This should be \code{NULL} or a numeric vector of length equal to the number of cases. One or more \code{\link{offset}} terms can be included in the formula instead or as well, and if more than one is specified their sum is used. See \code{\link{model.offset}}.} \item{contrasts}{an optional list. See the \code{contrasts.arg} of \code{model.matrix.default}.} \item{devFunOnly}{logical - return only the deviance evaluation function.} \item{optimizer}{character - name of optimizing function. The built-in optimizers are \code{\link{Nelder_Mead}} and \code{\link[minqa]{bobyqa}} (from the \pkg{minqa} package. Any minimizing function that allows box constraints can be used provided that it (1) takes input parameters \code{fn} (function to be optimized), \code{par} (starting parameter values), \code{lower} (lower bounds) and \code{control} (control parameters, passed through from the \code{control} argument) and (2) returns a list with (at least) elements \code{par} (best-fit parameters), \code{fval} (best-fit function value), \code{conv} (convergence code) and (optionally) \code{message} (informational message, or explanation of convergence failure). Special provisions are made for \code{\link{bobyqa}}, \code{\link{Nelder_Mead}}, and optimizers wrapped in the \pkg{optimx} package; to use \pkg{optimx} optimizers (including \code{L-BFGS-B} from base \code{\link{optim}} and \code{\link{nlminb}}), pass the \code{method} argument to \code{optim} in the \code{control} argument.} \item{\dots}{other potential arguments. A \code{method} argument was used in earlier versions of the package. Its functionality has been replaced by the \code{REML} argument.} } \value{ An object of class \code{"\linkS4class{merMod}"}, for which many methods are available. See there for details. } \description{ Fit a linear mixed model (LMM) } \details{ \itemize{ \item{If the \code{formula} argument is specified as a character vector, the function will attempt to coerce it to a formula. However, this is not recommended (users who want to construct formulas by pasting together components are advised to use \code{\link{as.formula}}); model fits will work but subsequent methods such as \code{\link{drop1}}, \code{\link{update}} may fail.} \item{Unlike some simpler modeling frameworks such as \code{\link{lm}} and \code{\link{glm}} which automatically detect perfectly collinear predictor variables, \code{[gn]lmer} cannot handle design matrices of less than full rank. For example, in cases of models with interactions that have unobserved combinations of levels, it is up to the user to define a new variable (for example creating \code{ab} within the data from the results of \code{droplevels(interaction(a,b))}). } } } \examples{ ## linear mixed models - reference values from older code (fm1 <- lmer(Reaction ~ Days + (Days|Subject), sleepstudy)) (fm2 <- lmer(Reaction ~ Days + (1|Subject) + (0+Days|Subject), sleepstudy)) anova(fm1, fm2) } \seealso{ The \code{\linkS4class{merMod}} class, \code{\link[stats]{lm}} } \concept{ LMM } \keyword{models}
library(glmnet) mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/central_nervous_system.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.95,family="gaussian",standardize=TRUE) sink('./central_nervous_system_093.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/AvgRank/central_nervous_system/central_nervous_system_093.R
no_license
esbgkannan/QSMART
R
false
false
378
r
library(glmnet) mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/central_nervous_system.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.95,family="gaussian",standardize=TRUE) sink('./central_nervous_system_093.txt',append=TRUE) print(glm$glmnet.fit) sink()
# Load the bayes net functions source("BayesianNetworks-template.r") # Risk factors collected in the survey cdc_survey_variables = list("income", "exercise", "smoke", "bmi", "bp", "cholesterol", "angina", "stroke", "attack", "diabetes") survey_variables_states = c(8, 2, 2, 4, 4, 2, 2, 2, 2, 4) # Health outcomes and habits health_outcomes = c("diabetes", "stroke", "attack", "angina") habit_variables = c("smoke", "exercise") bad_habit_values = c("1", "2") good_habit_values = c("2", "1") health_variables = c("bp", "cholesterol", "bmi") bad_health_values = c("1", "1", "3") good_health_values = c("3", "2", "2") diabetes_codes = c("1", "2", "3", "4") diabetes_values = c("Diabetic", "Diabetes only during pregnancy", "Not diabetic", "Pre-diabetic") stroke_codes = c("1", "2") stroke_values = c("Will have stroke", "Will not have stroke") attack_codes = c("1", "2") attack_values = c("Will have heart attack", "Will not have heart attack") angina_codes = c("1", "2") angina_values = c("Will have angina", "Will not have angina") positive_health_outcomes_codes = c(diabetes_values[1], stroke_values[1], attack_values[1], angina_values[1]) outcome_codes = list(diabetes_codes, stroke_codes, attack_codes, angina_codes) names(outcome_codes) = health_outcomes outcome_values = list(diabetes_values, stroke_values, attack_values, angina_values) names(outcome_values) = health_outcomes # Load data collected by the CDC in the 2015 Behavioral Risk Factor Surveillance System (BRFSS) survey. cdc_survey_data = read.csv(file="RiskFactors.csv", header=TRUE, sep=",") ## load libraries load_libraries = function() { library(knitr) } ## Change table headings for display purposes ## factor_table: factor tables that needs heading changed ## headings_to_change: headings to be changed ## new_headings: new headings ## ## Will return the table with changed headings change_table_headings = function(factor_table, headings_to_change, new_headings) { if (!is.null(headings_to_change)) { for (heading_index in 1:length(headings_to_change)) { names(factor_table)[which(names(factor_table) == headings_to_change[heading_index])] = new_headings[heading_index] } } return(factor_table) } ## Compute number of probabilities that need to be stored for bayes net node ## variables_in_node: variables that affect the node plus the node itself ## ## Will return number of probabilities that need to be stored for bayes net node probabilities_in_node = function(variables_in_node) { probabilities_to_store = 1.0 for (survey_variable in variables_in_node) { probabilities_to_store = probabilities_to_store * survey_variables_states[which(cdc_survey_variables == survey_variable)] } return(probabilities_to_store) } ## Drop columns from table ## factor_table: factor tables that needs columns dropped ## columns_to_drop: columms to be dropped ## ## Will return the table with dropped columns drop_table_columns = function(factor_table, columns_to_drop) { if (!is.null(columns_to_drop)) { for (column_index in 1:length(columns_to_drop)) { factor_table = factor_table[-which(names(factor_table) == columns_to_drop[column_index])] } } return(factor_table) } ## Change column values to be meaningful ## factor_table: factor tables that needs columns dropped ## columns_name: name of the columm ## from_column_codes: codes to change ## to_column_values: values to change codes into ## ## Will return the table with column values changed from codes to meaningful ones change_to_meaningful_values = function(factor_table, columns_name, from_column_codes, to_column_values) { if (!is.null(factor_table[[columns_name]])) { for (column_value_index in 1:length(from_column_codes)) { indexes_to_change = which(factor_table[[columns_name]] == from_column_codes[column_value_index]) factor_table[[columns_name]] = replace(factor_table[[columns_name]], indexes_to_change, rep(c(to_column_values[column_value_index]), each=length(indexes_to_change))) } } return(factor_table) } ## Show formatted table ## factor_table: factor tables that needs heading changed ## headings_to_change: headings to be changed ## new_headings: new headings ## ## Will print the formatted table show_formatted_table = function(factor_table, headings_to_change, new_headings) { factor_table = change_table_headings(factor_table, headings_to_change, new_headings) grid.table(head(factor_table), rows=NULL) } ## Create and return the Bayes Net ## ## Will return the Bayes Net create_bayes_net = function() { risk_factors_bayes_net = list() # Add income. risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("income")) # Add smoke status given income risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("smoke","income")) # Add exercise given income risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("exercise","income")) # Add bmi given income and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("bmi","income", "exercise")) # Add blood pressure given exercise, income and smoking risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("bp","exercise", "income", "smoke")) # Add cholesterol given exercise, income and smoking risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("cholesterol","exercise", "income", "smoke")) # Add diabetes given bmi risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("diabetes","bmi")) # Add stroke given bmi, bp and cholesterol risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("stroke","bmi", "bp", "cholesterol")) # Add attack given bmi, bp and cholesterol risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("attack","bmi", "bp", "cholesterol")) # Add angina given bmi, bp and cholesterol risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("angina","bmi", "bp", "cholesterol")) return(risk_factors_bayes_net) } ## Get health outcomes probability tables ## factor_table: factor tables that needs heading changed ## headings_to_change: headings to be changed ## new_headings: new headings ## ## Will return the health outcomes probability tables get_health_outcomes = function(bayes_net, health_outcomes, observe_variables, observe_values) { health_outcomes_tables = list() # Find probability of health outcomes with observed variables like habits for (my_outcome in health_outcomes) { # Do not marginalize the observed variables and the outcome being considered outcomes_to_not_marginalize = c(observe_variables, my_outcome) outcomes_to_marginalize = cdc_survey_variables[!(cdc_survey_variables %in% outcomes_to_not_marginalize)] # Get inference table my_outcome_table = infer(bayesnet=bayes_net, margVars=outcomes_to_marginalize, obsVars=observe_variables, obsVals=observe_values) # Drop columns for observed variables my_outcome_table = drop_table_columns(factor_table=my_outcome_table, columns_to_drop=observe_variables) names(my_outcome_table)[[1]]="Probability" # Change values from codes to meaningful names my_outcome_table = change_to_meaningful_values(factor_table=my_outcome_table, columns_name=my_outcome, from_column_codes=outcome_codes[[my_outcome]], to_column_values=outcome_values[[my_outcome]]) health_outcomes_tables[[length(health_outcomes_tables) + 1]] = my_outcome_table } return(health_outcomes_tables) } ## Print the impact of habits on outcomes ## bayes_net: the bayes net ## ## Will print the impact of habits on outcomes impact_of_habits_on_outcomes = function(bayes_net) { # What is the probability of the outcome if I have bad habits (smoke and don’t exercise)? outcome_table_1 = get_health_outcomes(bayes_net=bayes_net, health_outcomes=health_outcomes, observe_variables=habit_variables, observe_values=bad_habit_values) # How about if I have good habits (don’t smoke and do exercise)? outcome_table_2 = get_health_outcomes(bayes_net=bayes_net, health_outcomes=health_outcomes, observe_variables=habit_variables, observe_values=good_habit_values) print_outcome_probabilities(outcome_table_1=outcome_table_1, outcome_table_1_observation="Bad Habits", outcome_table_2=outcome_table_2, outcome_table_2_observation="Good Habits", caption_text="Health Outcome Probabilities based on Habits") } ## Print the impact of health on outcomes ## bayes_net: the bayes net ## ## Will print the impact of health on outcomes impact_of_health_on_outcomes = function(bayes_net) { # What is the probability of the outcome if I have poor health (high blood pressure, high cholesterol, and overweight)? outcome_table_1 = get_health_outcomes(bayes_net=bayes_net, health_outcomes=health_outcomes, observe_variables=health_variables, observe_values=bad_health_values) # What if I have good health (low blood pressure, low cholesterol, and normal weight)? outcome_table_2 = get_health_outcomes(bayes_net=bayes_net, health_outcomes=health_outcomes, observe_variables=health_variables, observe_values=good_health_values) print_outcome_probabilities(outcome_table_1=outcome_table_1, outcome_table_1_observation="Poor Health", outcome_table_2=outcome_table_2, outcome_table_2_observation="Good Health", caption_text="Health Outcome Probabilities based on Health") } ## Print outcome probability table ## ## outcome_table_1: Tables for health outcome 1 ## outcome_table_1_observation: Description for health outcome 1 observed value ## outcome_table_2: Tables for health outcome 2 ## outcome_table_2_observation: Description for health outcome 2 observed value ## ## Print formatted table with each outcome on a row with probability under various observed vales print_outcome_probabilities = function(outcome_table_1, outcome_table_1_observation, outcome_table_2, outcome_table_2_observation, caption_text) { table_counter = 0 table_1_probabilities = numeric() for (factor_table in outcome_table_1) { table_counter = table_counter + 1 table_1_probabilities[length(table_1_probabilities) + 1] = factor_table[["Probability"]][which(factor_table[[health_outcomes[table_counter]]] == positive_health_outcomes_codes[table_counter])] } table_counter = 0 table_2_probabilities = numeric() for (factor_table in outcome_table_2) { table_counter = table_counter + 1 table_2_probabilities[length(table_2_probabilities) + 1] = factor_table[["Probability"]][which(factor_table[[health_outcomes[table_counter]]] == positive_health_outcomes_codes[table_counter])] } table_to_print = data.frame(health_outcomes, table_1_probabilities, table_2_probabilities) names(table_to_print) = c("Health Outcome", outcome_table_1_observation, outcome_table_2_observation) print(kable(table_to_print, caption = caption_text)) } ## Create a bayes net where habits affect outcomes ## ## Will return a bayes net with edges from smoking to each of the four outcomes and edges from ## exercise to each of the four outcomes. make_habits_impact_outcomes = function() { risk_factors_bayes_net = list() # Add income. risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("income")) # Add smoke status given income risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("smoke","income")) # Add exercise given income risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("exercise","income")) # Add bmi given income and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("bmi","income", "exercise")) # Add blood pressure given exercise, income and smoking risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("bp","exercise", "income", "smoke")) # Add cholesterol given exercise, income and smoking risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("cholesterol","exercise", "income", "smoke")) # Add diabetes given bmi, smoking and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("diabetes","bmi", "smoke", "exercise")) # Add stroke given bmi, bp, cholesterol, smoking and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("stroke","bmi", "bp", "cholesterol", "smoke", "exercise")) # Add attack given bmi, bp, cholesterol, smoking and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("attack","bmi", "bp", "cholesterol", "smoke", "exercise")) # Add angina given bmi, bp, cholesterol, smoking and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("angina","bmi", "bp", "cholesterol", "smoke", "exercise")) return(risk_factors_bayes_net) } ## Create a bayes net where diabetes affects stroke ## ## Will a bayes net with edges from diabetes to stroke make_diabetes_impact_stroke_outcome = function() { risk_factors_bayes_net = list() # Add income. risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("income")) # Add smoke status given income risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("smoke","income")) # Add exercise given income risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("exercise","income")) # Add bmi given income and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("bmi","income", "exercise")) # Add blood pressure given exercise, income and smoking risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("bp","exercise", "income", "smoke")) # Add cholesterol given exercise, income and smoking risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("cholesterol","exercise", "income", "smoke")) # Add diabetes given bmi, smoking and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("diabetes","bmi", "smoke", "exercise")) # Add stroke given bmi, bp, cholesterol, smoking, exercise and diabetes risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("stroke","bmi", "bp", "cholesterol", "smoke", "exercise", "diabetes")) # Add attack given bmi, bp, cholesterol, smoking and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("attack","bmi", "bp", "cholesterol", "smoke", "exercise")) # Add angina given bmi, bp, cholesterol, smoking and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("angina","bmi", "bp", "cholesterol", "smoke", "exercise")) return(risk_factors_bayes_net) } ## Print the impact of diabetes on stroke outcome ## bayes_net: the bayes net ## ## Will print the impact of diabetes on stroke outcome impact_of_diabetes_on_stroke = function(bayes_net, link_from_diabetes_to_stroke) { evaluated_outcome_stroke = c("stroke") observed_var_diabetes = c("diabetes") observed_val_have_diabetes = c("1") observed_val_no_diabetes = c("3") # What is the probability of a stroke if I have diabetes? outcome_tables = get_health_outcomes(bayes_net=bayes_net, health_outcomes=evaluated_outcome_stroke, observe_variables=observed_var_diabetes, observe_values=observed_val_have_diabetes) table_counter = 0 caption_text_qualifier = "" if (isTRUE(link_from_diabetes_to_stroke)) { caption_text_qualifier = "- edge from diabetes to stroke" } else { caption_text_qualifier = "- no edge from diabetes to stroke" } caption_text = "Probability for stroke when diabetes is present" for (factor_table in outcome_tables) { table_counter = table_counter + 1 print(kable(factor_table, caption = paste(caption_text, caption_text_qualifier))) } # What is the probability of a stroke if I do not have diabetes? outcome_tables = get_health_outcomes(bayes_net=bayes_net, health_outcomes=evaluated_outcome_stroke, observe_variables=observed_var_diabetes, observe_values=observed_val_no_diabetes) table_counter = 0 caption_text = "Probability for stroke when diabetes is not present" for (factor_table in outcome_tables) { table_counter = table_counter + 1 print(kable(factor_table, caption = paste(caption_text, caption_text_qualifier))) } }
/BRFSS Bayes Net.R
no_license
gopalmenon/Bayesian-Networks
R
false
false
18,393
r
# Load the bayes net functions source("BayesianNetworks-template.r") # Risk factors collected in the survey cdc_survey_variables = list("income", "exercise", "smoke", "bmi", "bp", "cholesterol", "angina", "stroke", "attack", "diabetes") survey_variables_states = c(8, 2, 2, 4, 4, 2, 2, 2, 2, 4) # Health outcomes and habits health_outcomes = c("diabetes", "stroke", "attack", "angina") habit_variables = c("smoke", "exercise") bad_habit_values = c("1", "2") good_habit_values = c("2", "1") health_variables = c("bp", "cholesterol", "bmi") bad_health_values = c("1", "1", "3") good_health_values = c("3", "2", "2") diabetes_codes = c("1", "2", "3", "4") diabetes_values = c("Diabetic", "Diabetes only during pregnancy", "Not diabetic", "Pre-diabetic") stroke_codes = c("1", "2") stroke_values = c("Will have stroke", "Will not have stroke") attack_codes = c("1", "2") attack_values = c("Will have heart attack", "Will not have heart attack") angina_codes = c("1", "2") angina_values = c("Will have angina", "Will not have angina") positive_health_outcomes_codes = c(diabetes_values[1], stroke_values[1], attack_values[1], angina_values[1]) outcome_codes = list(diabetes_codes, stroke_codes, attack_codes, angina_codes) names(outcome_codes) = health_outcomes outcome_values = list(diabetes_values, stroke_values, attack_values, angina_values) names(outcome_values) = health_outcomes # Load data collected by the CDC in the 2015 Behavioral Risk Factor Surveillance System (BRFSS) survey. cdc_survey_data = read.csv(file="RiskFactors.csv", header=TRUE, sep=",") ## load libraries load_libraries = function() { library(knitr) } ## Change table headings for display purposes ## factor_table: factor tables that needs heading changed ## headings_to_change: headings to be changed ## new_headings: new headings ## ## Will return the table with changed headings change_table_headings = function(factor_table, headings_to_change, new_headings) { if (!is.null(headings_to_change)) { for (heading_index in 1:length(headings_to_change)) { names(factor_table)[which(names(factor_table) == headings_to_change[heading_index])] = new_headings[heading_index] } } return(factor_table) } ## Compute number of probabilities that need to be stored for bayes net node ## variables_in_node: variables that affect the node plus the node itself ## ## Will return number of probabilities that need to be stored for bayes net node probabilities_in_node = function(variables_in_node) { probabilities_to_store = 1.0 for (survey_variable in variables_in_node) { probabilities_to_store = probabilities_to_store * survey_variables_states[which(cdc_survey_variables == survey_variable)] } return(probabilities_to_store) } ## Drop columns from table ## factor_table: factor tables that needs columns dropped ## columns_to_drop: columms to be dropped ## ## Will return the table with dropped columns drop_table_columns = function(factor_table, columns_to_drop) { if (!is.null(columns_to_drop)) { for (column_index in 1:length(columns_to_drop)) { factor_table = factor_table[-which(names(factor_table) == columns_to_drop[column_index])] } } return(factor_table) } ## Change column values to be meaningful ## factor_table: factor tables that needs columns dropped ## columns_name: name of the columm ## from_column_codes: codes to change ## to_column_values: values to change codes into ## ## Will return the table with column values changed from codes to meaningful ones change_to_meaningful_values = function(factor_table, columns_name, from_column_codes, to_column_values) { if (!is.null(factor_table[[columns_name]])) { for (column_value_index in 1:length(from_column_codes)) { indexes_to_change = which(factor_table[[columns_name]] == from_column_codes[column_value_index]) factor_table[[columns_name]] = replace(factor_table[[columns_name]], indexes_to_change, rep(c(to_column_values[column_value_index]), each=length(indexes_to_change))) } } return(factor_table) } ## Show formatted table ## factor_table: factor tables that needs heading changed ## headings_to_change: headings to be changed ## new_headings: new headings ## ## Will print the formatted table show_formatted_table = function(factor_table, headings_to_change, new_headings) { factor_table = change_table_headings(factor_table, headings_to_change, new_headings) grid.table(head(factor_table), rows=NULL) } ## Create and return the Bayes Net ## ## Will return the Bayes Net create_bayes_net = function() { risk_factors_bayes_net = list() # Add income. risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("income")) # Add smoke status given income risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("smoke","income")) # Add exercise given income risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("exercise","income")) # Add bmi given income and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("bmi","income", "exercise")) # Add blood pressure given exercise, income and smoking risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("bp","exercise", "income", "smoke")) # Add cholesterol given exercise, income and smoking risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("cholesterol","exercise", "income", "smoke")) # Add diabetes given bmi risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("diabetes","bmi")) # Add stroke given bmi, bp and cholesterol risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("stroke","bmi", "bp", "cholesterol")) # Add attack given bmi, bp and cholesterol risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("attack","bmi", "bp", "cholesterol")) # Add angina given bmi, bp and cholesterol risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("angina","bmi", "bp", "cholesterol")) return(risk_factors_bayes_net) } ## Get health outcomes probability tables ## factor_table: factor tables that needs heading changed ## headings_to_change: headings to be changed ## new_headings: new headings ## ## Will return the health outcomes probability tables get_health_outcomes = function(bayes_net, health_outcomes, observe_variables, observe_values) { health_outcomes_tables = list() # Find probability of health outcomes with observed variables like habits for (my_outcome in health_outcomes) { # Do not marginalize the observed variables and the outcome being considered outcomes_to_not_marginalize = c(observe_variables, my_outcome) outcomes_to_marginalize = cdc_survey_variables[!(cdc_survey_variables %in% outcomes_to_not_marginalize)] # Get inference table my_outcome_table = infer(bayesnet=bayes_net, margVars=outcomes_to_marginalize, obsVars=observe_variables, obsVals=observe_values) # Drop columns for observed variables my_outcome_table = drop_table_columns(factor_table=my_outcome_table, columns_to_drop=observe_variables) names(my_outcome_table)[[1]]="Probability" # Change values from codes to meaningful names my_outcome_table = change_to_meaningful_values(factor_table=my_outcome_table, columns_name=my_outcome, from_column_codes=outcome_codes[[my_outcome]], to_column_values=outcome_values[[my_outcome]]) health_outcomes_tables[[length(health_outcomes_tables) + 1]] = my_outcome_table } return(health_outcomes_tables) } ## Print the impact of habits on outcomes ## bayes_net: the bayes net ## ## Will print the impact of habits on outcomes impact_of_habits_on_outcomes = function(bayes_net) { # What is the probability of the outcome if I have bad habits (smoke and don’t exercise)? outcome_table_1 = get_health_outcomes(bayes_net=bayes_net, health_outcomes=health_outcomes, observe_variables=habit_variables, observe_values=bad_habit_values) # How about if I have good habits (don’t smoke and do exercise)? outcome_table_2 = get_health_outcomes(bayes_net=bayes_net, health_outcomes=health_outcomes, observe_variables=habit_variables, observe_values=good_habit_values) print_outcome_probabilities(outcome_table_1=outcome_table_1, outcome_table_1_observation="Bad Habits", outcome_table_2=outcome_table_2, outcome_table_2_observation="Good Habits", caption_text="Health Outcome Probabilities based on Habits") } ## Print the impact of health on outcomes ## bayes_net: the bayes net ## ## Will print the impact of health on outcomes impact_of_health_on_outcomes = function(bayes_net) { # What is the probability of the outcome if I have poor health (high blood pressure, high cholesterol, and overweight)? outcome_table_1 = get_health_outcomes(bayes_net=bayes_net, health_outcomes=health_outcomes, observe_variables=health_variables, observe_values=bad_health_values) # What if I have good health (low blood pressure, low cholesterol, and normal weight)? outcome_table_2 = get_health_outcomes(bayes_net=bayes_net, health_outcomes=health_outcomes, observe_variables=health_variables, observe_values=good_health_values) print_outcome_probabilities(outcome_table_1=outcome_table_1, outcome_table_1_observation="Poor Health", outcome_table_2=outcome_table_2, outcome_table_2_observation="Good Health", caption_text="Health Outcome Probabilities based on Health") } ## Print outcome probability table ## ## outcome_table_1: Tables for health outcome 1 ## outcome_table_1_observation: Description for health outcome 1 observed value ## outcome_table_2: Tables for health outcome 2 ## outcome_table_2_observation: Description for health outcome 2 observed value ## ## Print formatted table with each outcome on a row with probability under various observed vales print_outcome_probabilities = function(outcome_table_1, outcome_table_1_observation, outcome_table_2, outcome_table_2_observation, caption_text) { table_counter = 0 table_1_probabilities = numeric() for (factor_table in outcome_table_1) { table_counter = table_counter + 1 table_1_probabilities[length(table_1_probabilities) + 1] = factor_table[["Probability"]][which(factor_table[[health_outcomes[table_counter]]] == positive_health_outcomes_codes[table_counter])] } table_counter = 0 table_2_probabilities = numeric() for (factor_table in outcome_table_2) { table_counter = table_counter + 1 table_2_probabilities[length(table_2_probabilities) + 1] = factor_table[["Probability"]][which(factor_table[[health_outcomes[table_counter]]] == positive_health_outcomes_codes[table_counter])] } table_to_print = data.frame(health_outcomes, table_1_probabilities, table_2_probabilities) names(table_to_print) = c("Health Outcome", outcome_table_1_observation, outcome_table_2_observation) print(kable(table_to_print, caption = caption_text)) } ## Create a bayes net where habits affect outcomes ## ## Will return a bayes net with edges from smoking to each of the four outcomes and edges from ## exercise to each of the four outcomes. make_habits_impact_outcomes = function() { risk_factors_bayes_net = list() # Add income. risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("income")) # Add smoke status given income risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("smoke","income")) # Add exercise given income risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("exercise","income")) # Add bmi given income and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("bmi","income", "exercise")) # Add blood pressure given exercise, income and smoking risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("bp","exercise", "income", "smoke")) # Add cholesterol given exercise, income and smoking risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("cholesterol","exercise", "income", "smoke")) # Add diabetes given bmi, smoking and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("diabetes","bmi", "smoke", "exercise")) # Add stroke given bmi, bp, cholesterol, smoking and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("stroke","bmi", "bp", "cholesterol", "smoke", "exercise")) # Add attack given bmi, bp, cholesterol, smoking and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("attack","bmi", "bp", "cholesterol", "smoke", "exercise")) # Add angina given bmi, bp, cholesterol, smoking and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("angina","bmi", "bp", "cholesterol", "smoke", "exercise")) return(risk_factors_bayes_net) } ## Create a bayes net where diabetes affects stroke ## ## Will a bayes net with edges from diabetes to stroke make_diabetes_impact_stroke_outcome = function() { risk_factors_bayes_net = list() # Add income. risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("income")) # Add smoke status given income risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("smoke","income")) # Add exercise given income risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("exercise","income")) # Add bmi given income and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("bmi","income", "exercise")) # Add blood pressure given exercise, income and smoking risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("bp","exercise", "income", "smoke")) # Add cholesterol given exercise, income and smoking risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("cholesterol","exercise", "income", "smoke")) # Add diabetes given bmi, smoking and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("diabetes","bmi", "smoke", "exercise")) # Add stroke given bmi, bp, cholesterol, smoking, exercise and diabetes risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("stroke","bmi", "bp", "cholesterol", "smoke", "exercise", "diabetes")) # Add attack given bmi, bp, cholesterol, smoking and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("attack","bmi", "bp", "cholesterol", "smoke", "exercise")) # Add angina given bmi, bp, cholesterol, smoking and exercise risk_factors_bayes_net[[length(risk_factors_bayes_net) + 1]] = createCPTfromData(cdc_survey_data, c("angina","bmi", "bp", "cholesterol", "smoke", "exercise")) return(risk_factors_bayes_net) } ## Print the impact of diabetes on stroke outcome ## bayes_net: the bayes net ## ## Will print the impact of diabetes on stroke outcome impact_of_diabetes_on_stroke = function(bayes_net, link_from_diabetes_to_stroke) { evaluated_outcome_stroke = c("stroke") observed_var_diabetes = c("diabetes") observed_val_have_diabetes = c("1") observed_val_no_diabetes = c("3") # What is the probability of a stroke if I have diabetes? outcome_tables = get_health_outcomes(bayes_net=bayes_net, health_outcomes=evaluated_outcome_stroke, observe_variables=observed_var_diabetes, observe_values=observed_val_have_diabetes) table_counter = 0 caption_text_qualifier = "" if (isTRUE(link_from_diabetes_to_stroke)) { caption_text_qualifier = "- edge from diabetes to stroke" } else { caption_text_qualifier = "- no edge from diabetes to stroke" } caption_text = "Probability for stroke when diabetes is present" for (factor_table in outcome_tables) { table_counter = table_counter + 1 print(kable(factor_table, caption = paste(caption_text, caption_text_qualifier))) } # What is the probability of a stroke if I do not have diabetes? outcome_tables = get_health_outcomes(bayes_net=bayes_net, health_outcomes=evaluated_outcome_stroke, observe_variables=observed_var_diabetes, observe_values=observed_val_no_diabetes) table_counter = 0 caption_text = "Probability for stroke when diabetes is not present" for (factor_table in outcome_tables) { table_counter = table_counter + 1 print(kable(factor_table, caption = paste(caption_text, caption_text_qualifier))) } }
#"File Name: HW03.r # Student: Yeuh-Jung Tsou # Date: 03, 09, 2017 rm(list=ls()) # Using R # Load the following CSV file to your R environment: # http://www.math.smith.edu/sasr/datasets/help.csv file<-read.csv("http://www.math.smith.edu/sasr/datasets/help.csv") attach(file) # Create a dataframe of: id, pcs1, mcs1, substance, and race group dataset<-data.frame(id,pcs1,mcs1,substance,racegrp) View(dataset) # Calculate: Mean, Med, STD, Max, Min of mcs1 mcs1_mean<-mean(mcs1,na.rm=TRUE) mcs1_med<-median(mcs1,na.rm=TRUE) mcs1_std<-sd(mcs1,na.rm=TRUE) mcs1_max<-max(mcs1,na.rm=TRUE) mcs1_min<-min(mcs1,na.rm=TRUE) # Create a frequency table of substance vs. racegroup table(substance,racegrp) # Substitute the missing values of mcs1 by the overall average mcs1[is.na(mcs1)]<-mcs1_mean dataset2<-data.frame(id,pcs1,mcs1,substance,racegrp) View(dataset2) detach(file) rm(list=ls())
/HW03.R
no_license
heeroshinn/Data-Mining
R
false
false
915
r
#"File Name: HW03.r # Student: Yeuh-Jung Tsou # Date: 03, 09, 2017 rm(list=ls()) # Using R # Load the following CSV file to your R environment: # http://www.math.smith.edu/sasr/datasets/help.csv file<-read.csv("http://www.math.smith.edu/sasr/datasets/help.csv") attach(file) # Create a dataframe of: id, pcs1, mcs1, substance, and race group dataset<-data.frame(id,pcs1,mcs1,substance,racegrp) View(dataset) # Calculate: Mean, Med, STD, Max, Min of mcs1 mcs1_mean<-mean(mcs1,na.rm=TRUE) mcs1_med<-median(mcs1,na.rm=TRUE) mcs1_std<-sd(mcs1,na.rm=TRUE) mcs1_max<-max(mcs1,na.rm=TRUE) mcs1_min<-min(mcs1,na.rm=TRUE) # Create a frequency table of substance vs. racegroup table(substance,racegrp) # Substitute the missing values of mcs1 by the overall average mcs1[is.na(mcs1)]<-mcs1_mean dataset2<-data.frame(id,pcs1,mcs1,substance,racegrp) View(dataset2) detach(file) rm(list=ls())
#' Example data for a meta-analysis of dichotomous outcomes: Exercise-based cardiac rehabilitation #' #' A dataset consisting of 14 empirical studies used for a meta-analysis in #' Anderson et al. (2016). The outcome of interest was risk of hospital admission of patients #' with coronary heart disease within follow up duration. Compared was #' exercise-based cardiac rehabilitation (treatment) with usual care (control). #' #' @format A data frame with 14 rows and 15 variables: #' \describe{ #' \item{study_name}{short name of each study} #' \item{year}{publication year of each study} #' \item{ai}{number of patients in the treatment group with an event (hospital admission) for each study.} #' \item{bi}{number of patients in the treatment group with no event for each study.} #' \item{ci}{number of patients in the control group with an event (hospital admission) for each study.} #' \item{di}{number of patients in the control group with no event for each study.} #' \item{n1i}{number of patients in the treatment group for each study, (ai + bi).} #' \item{n2i}{number of patients in the control group for each study, (ci + di).} #' \item{rr}{relative risk of an event for treatment vs. control, (ai/n1i)/(ci/n2i).} #' \item{or}{odds ratio of an event for treatment vs. control, (ai*di)/(bi*ci).} #' \item{logrr}{natural logarithm of the relative risk (\code{rr}) for meta-analysis.} #' \item{logrr_se}{standard error of the natural logarithm of the relative risk for meta-analysis, sqrt(1/ai + 1/ci - 1/(ai + bi) - 1/(ci + di)).} #' \item{logor}{natural logarithm of the odds ratio (\code{or}) for meta-analysis.} #' \item{logor_se}{standard error of the natural logarithm of the odds ratio for meta-analysis, sqrt(1/ai + 1/bi +1/ci + 1/di).} #' \item{followup}{dichotomous moderator: follow up duration.} #' } #' @references Anderson, L., Oldridge, N., Thompson, D. R., Zwisler, A. D., Rees, K., #' Martin, N., & Taylor, R. S. (2016). Exercise-based cardiac rehabilitation for coronary #' heart disease: Cochrane systematic review and meta-analysis. #' \emph{Journal of the American College of Cardiology}, \emph{67}, 1-12. "exrehab"
/R/exrehab.R
no_license
Mkossmeier/metaviz
R
false
false
2,175
r
#' Example data for a meta-analysis of dichotomous outcomes: Exercise-based cardiac rehabilitation #' #' A dataset consisting of 14 empirical studies used for a meta-analysis in #' Anderson et al. (2016). The outcome of interest was risk of hospital admission of patients #' with coronary heart disease within follow up duration. Compared was #' exercise-based cardiac rehabilitation (treatment) with usual care (control). #' #' @format A data frame with 14 rows and 15 variables: #' \describe{ #' \item{study_name}{short name of each study} #' \item{year}{publication year of each study} #' \item{ai}{number of patients in the treatment group with an event (hospital admission) for each study.} #' \item{bi}{number of patients in the treatment group with no event for each study.} #' \item{ci}{number of patients in the control group with an event (hospital admission) for each study.} #' \item{di}{number of patients in the control group with no event for each study.} #' \item{n1i}{number of patients in the treatment group for each study, (ai + bi).} #' \item{n2i}{number of patients in the control group for each study, (ci + di).} #' \item{rr}{relative risk of an event for treatment vs. control, (ai/n1i)/(ci/n2i).} #' \item{or}{odds ratio of an event for treatment vs. control, (ai*di)/(bi*ci).} #' \item{logrr}{natural logarithm of the relative risk (\code{rr}) for meta-analysis.} #' \item{logrr_se}{standard error of the natural logarithm of the relative risk for meta-analysis, sqrt(1/ai + 1/ci - 1/(ai + bi) - 1/(ci + di)).} #' \item{logor}{natural logarithm of the odds ratio (\code{or}) for meta-analysis.} #' \item{logor_se}{standard error of the natural logarithm of the odds ratio for meta-analysis, sqrt(1/ai + 1/bi +1/ci + 1/di).} #' \item{followup}{dichotomous moderator: follow up duration.} #' } #' @references Anderson, L., Oldridge, N., Thompson, D. R., Zwisler, A. D., Rees, K., #' Martin, N., & Taylor, R. S. (2016). Exercise-based cardiac rehabilitation for coronary #' heart disease: Cochrane systematic review and meta-analysis. #' \emph{Journal of the American College of Cardiology}, \emph{67}, 1-12. "exrehab"
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/connect_inbo_dbase.R \name{connect_inbo_dbase-defunct} \alias{connect_inbo_dbase-defunct} \title{Connect to an INBO database} \usage{ connect_inbo_dbase(database_name) } \arguments{ \item{database_name}{char Name of the INBO database you want to connect} } \value{ odbc connection } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#defunct}{\figure{lifecycle-defunct.svg}{options: alt='[Defunct]'}}}{\strong{[Defunct]}} Connects to an INBO database by simply providing the database's name as an argument. The function can only be used from within the INBO network. For more information, refer to \href{https://inbo.github.io/tutorials/tutorials/r_database_access/}{this tutorial}. } \examples{ \dontrun{ connection <- connect_inbo_dbase("D0021_00_userFlora") connection <- connect_inbo_dbase("W0003_00_Lims") } } \seealso{ \code{\link{inborutils-defunct}} } \keyword{internal}
/man/connect_inbo_dbase-defunct.Rd
permissive
kdmulligan/inborutils
R
false
true
991
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/connect_inbo_dbase.R \name{connect_inbo_dbase-defunct} \alias{connect_inbo_dbase-defunct} \title{Connect to an INBO database} \usage{ connect_inbo_dbase(database_name) } \arguments{ \item{database_name}{char Name of the INBO database you want to connect} } \value{ odbc connection } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#defunct}{\figure{lifecycle-defunct.svg}{options: alt='[Defunct]'}}}{\strong{[Defunct]}} Connects to an INBO database by simply providing the database's name as an argument. The function can only be used from within the INBO network. For more information, refer to \href{https://inbo.github.io/tutorials/tutorials/r_database_access/}{this tutorial}. } \examples{ \dontrun{ connection <- connect_inbo_dbase("D0021_00_userFlora") connection <- connect_inbo_dbase("W0003_00_Lims") } } \seealso{ \code{\link{inborutils-defunct}} } \keyword{internal}
#Part 1 #Write a function named 'pollutantmean' that calculates the mean of a pollutant #(sulfate or nitrate) across a specified list of monitors. #The function 'pollutantmean' takes three arguments: 'directory', 'pollutant', and 'id'. #Given a vector monitor ID numbers, 'pollutantmean' reads that monitors' #particulate matter data from the directory specified in the 'directory' argument #and returns the mean of the pollutant across all of the monitors, #ignoring any missing values coded as NA. A prototype of the function is as follows pollutantmean <- function(directory, pollutant, id){ id_f <- formatC(id, flag = 0, width = 3) means <- numeric() for (i in seq_along(id)){ station <- id_f[i] file <- paste(directory, station,".csv",sep = "") values <- read.csv(file, header = TRUE, sep = ",") means<-c(means, values[,pollutant]) } mean(means, na.rm = TRUE) }
/Part1.R
no_license
mguevara1986/Scripts_R_Programming
R
false
false
898
r
#Part 1 #Write a function named 'pollutantmean' that calculates the mean of a pollutant #(sulfate or nitrate) across a specified list of monitors. #The function 'pollutantmean' takes three arguments: 'directory', 'pollutant', and 'id'. #Given a vector monitor ID numbers, 'pollutantmean' reads that monitors' #particulate matter data from the directory specified in the 'directory' argument #and returns the mean of the pollutant across all of the monitors, #ignoring any missing values coded as NA. A prototype of the function is as follows pollutantmean <- function(directory, pollutant, id){ id_f <- formatC(id, flag = 0, width = 3) means <- numeric() for (i in seq_along(id)){ station <- id_f[i] file <- paste(directory, station,".csv",sep = "") values <- read.csv(file, header = TRUE, sep = ",") means<-c(means, values[,pollutant]) } mean(means, na.rm = TRUE) }
library(readr) library("dplyr") rating_data = read.delim("C:/Users/User/Documents/GitHub/ml-100k/u.data", sep="\t", header=FALSE) rating_data = rating_data[,-4] colnames(rating_data)=c("user_id", "item_id", "rating") user_data = read.delim("C:/Users/User/Documents/GitHub/ml-100k/u.user", sep="|", header=FALSE) colnames(user_data) = c("user_id", "age", "gender", "occupation", "zipcode") joined = inner_join(user_data, rating_data) men_rate = dplyr::filter(joined, gender=="M") women_rate = dplyr::filter(joined, gender=="F") best_ps = c() best_ids = c() p_threshold = 0.05 for (i in unique(joined$item_id)){ mens = dplyr::filter(men_rate, item_id == i) mens = mens$rating womens = dplyr::filter(women_rate, item_id==i) womens = womens$rating print (i) if (length(womens)<=1 || length(mens)<=1){ print(paste("err....", i, "is broken")) next() } if (sd(womens)<0.01 && sd(mens)<=0.01){ print(paste("err....", i, "is broken2")) next() } t = t.test(mens, womens, alternative="less") p = t$p.value if (p<p_threshold){ best_ps = c(p, best_ps) best_ids = c(i, best_ids) print (paste("Best ID has changed to ", i, " with p value", p)) } } movie_data = read.delim("C:/Users/User/Documents/GitHub/ml-100k/u.item", sep="|", header=FALSE, stringsAsFactors = FALSE) names(movie_data)[1:2] = c("movie_id", "movie_title") results = cbind.data.frame(best_titles = movie_data$movie_title[movie_data$movie_id %in% best_ids], best_ps) results = results[order(best_ps),] # These are the results results #--------------------------------------------------------- library("lattice") singer_df = singer head(singer_df) unique(singer_df$voice.part) singer_df$voice.part = as.character(singer_df$voice.part) replace_vec = c("Soprano", "Tenor", "Alto", "Bass") for (i in replace_vec){ singer_df$voice.part[grepl(i, singer_df$voice.part)] = i } singer_df = group_by(singer_df, voice.part) heights = mutate(singer_df, mean(height)) pairwise.t.test(singer_df$height, singer_df$voice.part, pool.sd=FALSE) # differences are significant :)
/t_test.R
no_license
sylvansecrets/signal-coursework
R
false
false
2,153
r
library(readr) library("dplyr") rating_data = read.delim("C:/Users/User/Documents/GitHub/ml-100k/u.data", sep="\t", header=FALSE) rating_data = rating_data[,-4] colnames(rating_data)=c("user_id", "item_id", "rating") user_data = read.delim("C:/Users/User/Documents/GitHub/ml-100k/u.user", sep="|", header=FALSE) colnames(user_data) = c("user_id", "age", "gender", "occupation", "zipcode") joined = inner_join(user_data, rating_data) men_rate = dplyr::filter(joined, gender=="M") women_rate = dplyr::filter(joined, gender=="F") best_ps = c() best_ids = c() p_threshold = 0.05 for (i in unique(joined$item_id)){ mens = dplyr::filter(men_rate, item_id == i) mens = mens$rating womens = dplyr::filter(women_rate, item_id==i) womens = womens$rating print (i) if (length(womens)<=1 || length(mens)<=1){ print(paste("err....", i, "is broken")) next() } if (sd(womens)<0.01 && sd(mens)<=0.01){ print(paste("err....", i, "is broken2")) next() } t = t.test(mens, womens, alternative="less") p = t$p.value if (p<p_threshold){ best_ps = c(p, best_ps) best_ids = c(i, best_ids) print (paste("Best ID has changed to ", i, " with p value", p)) } } movie_data = read.delim("C:/Users/User/Documents/GitHub/ml-100k/u.item", sep="|", header=FALSE, stringsAsFactors = FALSE) names(movie_data)[1:2] = c("movie_id", "movie_title") results = cbind.data.frame(best_titles = movie_data$movie_title[movie_data$movie_id %in% best_ids], best_ps) results = results[order(best_ps),] # These are the results results #--------------------------------------------------------- library("lattice") singer_df = singer head(singer_df) unique(singer_df$voice.part) singer_df$voice.part = as.character(singer_df$voice.part) replace_vec = c("Soprano", "Tenor", "Alto", "Bass") for (i in replace_vec){ singer_df$voice.part[grepl(i, singer_df$voice.part)] = i } singer_df = group_by(singer_df, voice.part) heights = mutate(singer_df, mean(height)) pairwise.t.test(singer_df$height, singer_df$voice.part, pool.sd=FALSE) # differences are significant :)
## Extract chlorophyll library(raster) library(lubridate) library(ggplot2) library(mgcv) setwd("D:/UCSC/Analysis/sasiHumpbacks/") # Source the function to get chlorophyll file source("./scripts/99_build_chlorophyll_dataset.R") # Get fitted data dat <- readRDS("./output/fitted_tracks_w_env_02.RDS") #------------------------ # Get chlorophyll # Assign index for later reassembly dat$dx <- 1:nrow(dat) # Date - unique year & month dat$ymd <- paste(year(dat$date), month(dat$date), 15, sep = "-") dts <- sort(unique(dat$ymd)) #----------- hold <- data.frame() for (i in 1:length(dts)) { print(paste(i, "of", length(dts)), sep = " ") this.date <- dts[i] # Get this month's data this.data <- dat[dat$ymd == this.date, ] # Geth the chla this.chla <- get_chla(date = this.date, lag = 0, datadir = "../mega/data_chlorophyll_download/") this.chla.lag1 <- get_chla(date = this.date, lag = 1, datadir = "../mega/data_chlorophyll_download/") this.chla.lag2 <- get_chla(date = this.date, lag = 2, datadir = "../mega/data_chlorophyll_download/") this.chla.lag3 <- get_chla(date = this.date, lag = 3, datadir = "../mega/data_chlorophyll_download/") this.data$CHLA_0m <- raster::extract(this.chla, this.data[ , c("lon", "lat")]) this.data$CHLA_1m <- raster::extract(this.chla.lag1, this.data[ , c("lon", "lat")]) this.data$CHLA_2m <- raster::extract(this.chla.lag2, this.data[ , c("lon", "lat")]) this.data$CHLA_3m <- raster::extract(this.chla.lag3, this.data[ , c("lon", "lat")]) hold <- rbind(hold, this.data) rm(this.date, this.data, this.chla, this.chla.lag1, this.chla.lag2, this.chla.lag3) } #------------------------ # Reorganise and save output # Reorder hold <- hold[order(hold$dx), ] # Drop unneccessary columns hold$dx <- NULL hold$ymd <- NULL # Save saveRDS(hold, "./output/fitted_tracks_w_env_03.RDS") #------------------------ # Check the two lags m0 <- gam(g ~ s(CHLA_0m), data = hold, family = "betar", REML = F) m1 <- gam(g ~ s(CHLA_1m), data = hold, family = "betar", REML = F) m2 <- gam(g ~ s(CHLA_2m), data = hold, family = "betar", REML = F) m3 <- gam(g ~ s(CHLA_3m), data = hold, family = "betar", REML = F) AIC(m0, m1, m2, m3) #------------------------ # Plot chlrophyll by breeding stock png("./output/figs/CHLA.png", height = 1200, width = 1200) ggplot(data = hold, aes(x = CHLA_0m, y = g, group = stock)) + geom_point(alpha = 0.1) + scale_y_continuous(limits = c(0, 1)) + scale_x_log10() + facet_wrap(~ stock, ncol = 2) + geom_rug(alpha = 0.1) + geom_smooth() + labs(title = "CHLA", x = "Chlorophyll a concentration (mg/m^3)", y = "Move persistence") dev.off() if (FALSE) { #------------------------ # Plot example chlorophyll for supplement library(SOmap) library(pals) # Get example chlorophyll example_chla <- get_chla(date = "2016-03-15", lag = 0) # Crop example_chla <- crop(example_chla, extent(-180, +180, -90, -40)) # And log example_chla <- log(example_chla) # Basemap mp <- SOmap( trim = -40, bathy_legend = F, border_col = c("white", "white"), border_width = 0.01, straight = TRUE, graticules = TRUE) # Get rid of bathymetry and its legend raster::values(mp$bathy[[1]]$plotargs$x) <- NA_real_ mp$bathy_legend <- NULL # Plot chlorophyll to file png("./out/figs/envar_examples/CHLA.png", height = 4, width = 6, units = "in", res = 300) mp SOplot(SOproj(example_chla), col = rev(ocean.algae(125)), add = T, legend.args = list(text = "log10(CHLA)")) dev.off() }
/scripts/04_extractEnvars_Chlorophyll.R
no_license
ryanreisinger/sasiHumpbacks
R
false
false
3,530
r
## Extract chlorophyll library(raster) library(lubridate) library(ggplot2) library(mgcv) setwd("D:/UCSC/Analysis/sasiHumpbacks/") # Source the function to get chlorophyll file source("./scripts/99_build_chlorophyll_dataset.R") # Get fitted data dat <- readRDS("./output/fitted_tracks_w_env_02.RDS") #------------------------ # Get chlorophyll # Assign index for later reassembly dat$dx <- 1:nrow(dat) # Date - unique year & month dat$ymd <- paste(year(dat$date), month(dat$date), 15, sep = "-") dts <- sort(unique(dat$ymd)) #----------- hold <- data.frame() for (i in 1:length(dts)) { print(paste(i, "of", length(dts)), sep = " ") this.date <- dts[i] # Get this month's data this.data <- dat[dat$ymd == this.date, ] # Geth the chla this.chla <- get_chla(date = this.date, lag = 0, datadir = "../mega/data_chlorophyll_download/") this.chla.lag1 <- get_chla(date = this.date, lag = 1, datadir = "../mega/data_chlorophyll_download/") this.chla.lag2 <- get_chla(date = this.date, lag = 2, datadir = "../mega/data_chlorophyll_download/") this.chla.lag3 <- get_chla(date = this.date, lag = 3, datadir = "../mega/data_chlorophyll_download/") this.data$CHLA_0m <- raster::extract(this.chla, this.data[ , c("lon", "lat")]) this.data$CHLA_1m <- raster::extract(this.chla.lag1, this.data[ , c("lon", "lat")]) this.data$CHLA_2m <- raster::extract(this.chla.lag2, this.data[ , c("lon", "lat")]) this.data$CHLA_3m <- raster::extract(this.chla.lag3, this.data[ , c("lon", "lat")]) hold <- rbind(hold, this.data) rm(this.date, this.data, this.chla, this.chla.lag1, this.chla.lag2, this.chla.lag3) } #------------------------ # Reorganise and save output # Reorder hold <- hold[order(hold$dx), ] # Drop unneccessary columns hold$dx <- NULL hold$ymd <- NULL # Save saveRDS(hold, "./output/fitted_tracks_w_env_03.RDS") #------------------------ # Check the two lags m0 <- gam(g ~ s(CHLA_0m), data = hold, family = "betar", REML = F) m1 <- gam(g ~ s(CHLA_1m), data = hold, family = "betar", REML = F) m2 <- gam(g ~ s(CHLA_2m), data = hold, family = "betar", REML = F) m3 <- gam(g ~ s(CHLA_3m), data = hold, family = "betar", REML = F) AIC(m0, m1, m2, m3) #------------------------ # Plot chlrophyll by breeding stock png("./output/figs/CHLA.png", height = 1200, width = 1200) ggplot(data = hold, aes(x = CHLA_0m, y = g, group = stock)) + geom_point(alpha = 0.1) + scale_y_continuous(limits = c(0, 1)) + scale_x_log10() + facet_wrap(~ stock, ncol = 2) + geom_rug(alpha = 0.1) + geom_smooth() + labs(title = "CHLA", x = "Chlorophyll a concentration (mg/m^3)", y = "Move persistence") dev.off() if (FALSE) { #------------------------ # Plot example chlorophyll for supplement library(SOmap) library(pals) # Get example chlorophyll example_chla <- get_chla(date = "2016-03-15", lag = 0) # Crop example_chla <- crop(example_chla, extent(-180, +180, -90, -40)) # And log example_chla <- log(example_chla) # Basemap mp <- SOmap( trim = -40, bathy_legend = F, border_col = c("white", "white"), border_width = 0.01, straight = TRUE, graticules = TRUE) # Get rid of bathymetry and its legend raster::values(mp$bathy[[1]]$plotargs$x) <- NA_real_ mp$bathy_legend <- NULL # Plot chlorophyll to file png("./out/figs/envar_examples/CHLA.png", height = 4, width = 6, units = "in", res = 300) mp SOplot(SOproj(example_chla), col = rev(ocean.algae(125)), add = T, legend.args = list(text = "log10(CHLA)")) dev.off() }
# Import libraries library(shiny) library(shinythemes) library(data.table) library(RCurl) library(randomForest) # Read data weather <- read.csv("D:\\Other Documents\\R language Autodidacticism\\4_Web apps in R\\Data-Driven Web Application in R\\weather-weka.csv", stringsAsFactors = T ) # Build model model <- randomForest(play ~ ., data = weather, ntree = 500, mtry = 4, importance = TRUE) # Save model to RDS file # saveRDS(model, "model.rds") # Read in the RF model #model <- readRDS("model.rds") #################################### # User interface # #################################### ui <- fluidPage(theme = shinytheme("united"), # Page header headerPanel('Play Golf?'), # Input values sidebarPanel( HTML("<h3>Input parameters</h3>"), selectInput("outlook", label = "Outlook:", choices = list("Sunny" = "sunny", "Overcast" = "overcast", "Rainy" = "rainy"), selected = "Rainy"), sliderInput("temperature", "Temperature:", min = 64, max = 86, value = 70), sliderInput("humidity", "Humidity:", min = 65, max = 96, value = 90), selectInput("windy", label = "Windy:", choices = list("Yes" = "TRUE", "No" = "FALSE"), selected = "TRUE"), actionButton("submitbutton", "Submit", class = "btn btn-primary") ), mainPanel( tags$label(h3('Status/Output')), # Status/Output Text Box verbatimTextOutput('contents'), tableOutput('tabledata') # Prediction results table ) ) #################################### # Server # #################################### server <- function(input, output, session) { # Input Data datasetInput <- reactive({ # outlook,temperature,humidity,windy,play df <- data.frame( Name = c("outlook", "temperature", "humidity", "windy"), Value = as.character(c(input$outlook, input$temperature, input$humidity, input$windy)), stringsAsFactors = FALSE) play <- "play" df <- rbind(df, play) input <- transpose(df) write.table(input,"input.csv", sep=",", quote = FALSE, row.names = FALSE, col.names = FALSE) test <- read.csv(paste("input", ".csv", sep=""), header = TRUE) test$outlook <- factor(test$outlook, levels = c("overcast", "rainy", "sunny")) Output <- data.frame(Prediction=predict(model,test), round(predict(model,test,type="prob"), 3)) print(Output) }) # Status/Output Text Box output$contents <- renderPrint({ if (input$submitbutton>0) { isolate("Calculation complete.") } else { return("Server is ready for calculation.") } }) # Prediction results table output$tabledata <- renderTable({ if (input$submitbutton>0) { isolate(datasetInput()) } }) } #################################### # Create the shiny app # #################################### shinyApp(ui = ui, server = server)
/4_Web apps in R/Data-Driven Web Application in R/Data-Driven Web Application in R.R
no_license
Rodhanp/R-language-Autodidacticism
R
false
false
3,596
r
# Import libraries library(shiny) library(shinythemes) library(data.table) library(RCurl) library(randomForest) # Read data weather <- read.csv("D:\\Other Documents\\R language Autodidacticism\\4_Web apps in R\\Data-Driven Web Application in R\\weather-weka.csv", stringsAsFactors = T ) # Build model model <- randomForest(play ~ ., data = weather, ntree = 500, mtry = 4, importance = TRUE) # Save model to RDS file # saveRDS(model, "model.rds") # Read in the RF model #model <- readRDS("model.rds") #################################### # User interface # #################################### ui <- fluidPage(theme = shinytheme("united"), # Page header headerPanel('Play Golf?'), # Input values sidebarPanel( HTML("<h3>Input parameters</h3>"), selectInput("outlook", label = "Outlook:", choices = list("Sunny" = "sunny", "Overcast" = "overcast", "Rainy" = "rainy"), selected = "Rainy"), sliderInput("temperature", "Temperature:", min = 64, max = 86, value = 70), sliderInput("humidity", "Humidity:", min = 65, max = 96, value = 90), selectInput("windy", label = "Windy:", choices = list("Yes" = "TRUE", "No" = "FALSE"), selected = "TRUE"), actionButton("submitbutton", "Submit", class = "btn btn-primary") ), mainPanel( tags$label(h3('Status/Output')), # Status/Output Text Box verbatimTextOutput('contents'), tableOutput('tabledata') # Prediction results table ) ) #################################### # Server # #################################### server <- function(input, output, session) { # Input Data datasetInput <- reactive({ # outlook,temperature,humidity,windy,play df <- data.frame( Name = c("outlook", "temperature", "humidity", "windy"), Value = as.character(c(input$outlook, input$temperature, input$humidity, input$windy)), stringsAsFactors = FALSE) play <- "play" df <- rbind(df, play) input <- transpose(df) write.table(input,"input.csv", sep=",", quote = FALSE, row.names = FALSE, col.names = FALSE) test <- read.csv(paste("input", ".csv", sep=""), header = TRUE) test$outlook <- factor(test$outlook, levels = c("overcast", "rainy", "sunny")) Output <- data.frame(Prediction=predict(model,test), round(predict(model,test,type="prob"), 3)) print(Output) }) # Status/Output Text Box output$contents <- renderPrint({ if (input$submitbutton>0) { isolate("Calculation complete.") } else { return("Server is ready for calculation.") } }) # Prediction results table output$tabledata <- renderTable({ if (input$submitbutton>0) { isolate(datasetInput()) } }) } #################################### # Create the shiny app # #################################### shinyApp(ui = ui, server = server)
# Type of variables # Quantitative x <- 3 # Constant - [0.. inf ) y <- 1.33 # Continuous z <- -1 # Interval - Numbers(-,+) # Qualitative person <- 'Girl' # or Boy - Nominal trust_level <- 'low' # middle or high - Ordinal
/02-Variables/01-variables.R
no_license
TheGlitchCat/probability-and-statistics-R
R
false
false
226
r
# Type of variables # Quantitative x <- 3 # Constant - [0.. inf ) y <- 1.33 # Continuous z <- -1 # Interval - Numbers(-,+) # Qualitative person <- 'Girl' # or Boy - Nominal trust_level <- 'low' # middle or high - Ordinal
############################################################## # Prediction of shedding scores using random forest regression ############################################################## # Set seed set.seed(4957936) # Load required libraries source("scr/functions/general/load_abs_install_pkg.R") load_abs_install_pkg(c("phyloseq", "tidyverse", "foreach", "doParallel", "randomForest")) # Create output directory dir.create("output/4_RF_regression_model") # 1. Extract and format data for the RF regression ################################################## # Read in filtered and normalized phyloseq object ps.tf.css.01 <- readRDS("output/3_filt_norm_phyl/ps_tf2_css.RDS") # Load custom function to format data from phyloseq for RF analysis source("scr/functions/data_for_rf.R") # Format data for RF regression analysis rf.data.css.01 <- data_for_rf(phyloseq = ps.tf.css.01, class.column = 'WeightedScoreII', remove.taxa.prev.less.than = 1, return.df = TRUE) # Convert the WeightedScoreII column into numeric rf.data.css.01$WeightedScoreII <- as.numeric(as.character(rf.data.css.01$WeightedScoreII)) # 2. Find the optimal number of trees necessary for the model ############################################################# # Make an RF regression model RF.tree <- randomForest(WeightedScoreII ~ . , data = rf.data.css.01, ntree = 15001) # Visualize and save error rate in response to the number of used trees pdf("output/4_RF_regression_model/Figure_S4A.pdf") plot(RF.tree) dev.off() # 3. Mtry parameter tuning ########################### # Load custom function for Mtry tuning and visualization source("scr/functions/Tree_Mtry_Plot.R") # Make a computing cluster (number of cores) cl <- makeCluster(10) # Register the cluster registerDoParallel(cl) # Visualize Mtry tunning attempts tree.mtry.plot.all <- Tree_Mtry_Plot(data = rf.data.css.01, ntrees = 7501, start_val = ncol(rf.data.css.01/3), stepF = 0.5, class_colum = "WeightedScoreII", ntimes = 3) # Stop the computing cluster stopCluster(cl) # Save the plot ggsave(plot = tree.mtry.plot.all, filename = "output/4_RF_regression_model/Figure_S4B.pdf") # 4. Prediction of Shedding values could be regression mode of RF ################################################################# # Load the function for creating matrices with randomly drawn not overlapping samples source("scr/functions/rand_draw_mat.R") # Load the custom function for building RF models without a set of samples # and consequently predict their values source("scr/functions/rf_and_test.R") # Make a computing cluster (number of cores) cl <- makeCluster(10) # Register the cluster registerDoParallel(cl) # Create an empty list rf.reg.res <- list() # Draw consequntly samples randomly substructed from from the datased # and predict their values. Samples will be drawn in five separate runs. for (i in 1:5) { # Create the matrix with randomly drawn not overlapping samples (10 samples per draw) rand.samp.reg <- rand_draw_mat(Samples_list = rownames(rf.data.css.01), Number_of_samp = 10) # Build an RF model without the randomly drawn set of samples and consequently predict their values. res.for <- foreach(i=1:nrow(rand.samp.reg),.packages = "randomForest") %dopar% { rf_and_test(rf_data = rf.data.css.01, samples_to_test = rand.samp.reg[i,], n_samples_training = 230, mtry = ncol(rf.data.css.01/3), ntree = 7501, variable_column = "WeightedScoreII", regression_TorF = TRUE)} # Combine results rf.reg.res[[i]] <- unlist(res.for) } # Stop the computing cluster stopCluster(cl) # Save the object save(rf.reg.res, file = "output/4_RF_regression_model/samples_reg_out.Rdata") # 5. Extract and optimize data for visualization ################################################ # Extract a part of metadata p.reg.meta <- data.frame(ps.tf.css.01@sam_data[, c("CowN", "WeightedScoreII", "AgeMonth")]) # Adjust row names p.reg.meta$ID <- rownames(p.reg.meta) # Create an empty dataframe for formatted data reg.pd <- data.frame() # Extract data from the list and add metadata (loop) # i is a list object number for(i in 1:length(rf.reg.res)) { # Extract individual object reg.r <- data.frame(rf.reg.res[[i]]) # Add an ID column containing rownames reg.r$ID <- rownames(reg.r) # Add metadata and bind into a long dataframe reg.pd <- rbind(reg.pd, left_join(reg.r, p.reg.meta, by="ID")) } # Prepare data for the barchart (first layer) by # leaving one row per cow reg.pd.bar <- reg.pd[!duplicated(reg.pd$CowN), ] # Create vector with ordered Cow ID ord.id <- reg.pd.bar$CowN[order(as.numeric(sub(",", ".", as.character(reg.pd.bar$WeightedScoreII))))] # Arrange levels order for Cow ID reg.pd$CowN <- factor(reg.pd$CowN, levels = ord.id) # Find mean value of predicted shedding scores per animal reg.pd.mean <- data.frame(aggregate(reg.pd$rf.reg.res..i.., list(reg.pd$CowN), mean)) # Adjust columns names colnames(reg.pd.mean) <- c("CowN", "PredV") # 6. Visualize results of RF regression model prediction ######################################################## # Plot the results pred.val.plot <- ggplot() + geom_bar(data = reg.pd[!duplicated(reg.pd$CowN), ], aes(x = CowN, y = WeightedScoreII), fill ="grey60", stat = "identity") + geom_jitter(data = reg.pd, aes(x = CowN, y = rf.reg.res..i.., color=AgeMonth), width = 0.2) + geom_smooth(data = reg.pd, aes(x = as.numeric(CowN), y = rf.reg.res..i..), method = "loess") + theme_bw() + theme(axis.text.x = element_text(angle = 90, hjust = 0.5)) + ylab("Weighted Score") + xlab("Cow ID") # Save plots ggsave(filename = "output/4_RF_regression_model/Figure_S5.pdf", pred.val.plot, width = 7, height = 4) ggsave(filename = "output/4_RF_regression_model/Figure_S5.png", pred.val.plot, width = 7, height = 4, dpi=400) # 7. Summary of the RF regression results ######################################### # Summarize differences between predicted and actual value predv.dif.summ <- summary(abs(reg.pd$rf.reg.res..i.. - reg.pd$WeightedScoreII)) # Format the summary table predv.dif.summ <- as.data.frame(cbind(names(predv.dif.summ), as.vector(predv.dif.summ))) # Write summary table into file write.csv(predv.dif.summ, "output/4_RF_regression_model/predv_dif_summ.csv") # 8. Correlation analysis between predicted and actual shedding values ###################################################################### # Prepare data for correlation analysis # Combine mean predicted values dataframe and data frame contained actual values cor.d <- inner_join(reg.pd.mean, reg.pd, by = "CowN") # Remove rows with not unique cows names cor.d1 <- cor.d[!duplicated(cor.d$CowN), ] # Correlation test between predicted and actual values. c.pred.score <- cor.test(cor.d1$PredV, cor.d1$WeightedScoreII) # Correlation between differences in predicted and actual values and animals age. c.sdiv.age <- cor.test(reg.pd$rf.reg.res..i.. - reg.pd$WeightedScoreII, reg.pd$AgeMonth) # Correlation between differences in predicted and actual values and individual cows. c.sdiv.cow <- cor.test(reg.pd$rf.reg.res..i.. - reg.pd$WeightedScoreII, as.numeric(reg.pd$CowN)) # Combine results of correlation analysis into a single dataframe. all.cor <- rbind(c(c.pred.score$conf.int, c.pred.score$estimate, c.pred.score$p.value), c(c.sdiv.age$conf.int, c.sdiv.age$estimate, c.sdiv.age$p.value), c(c.sdiv.cow$conf.int, c.sdiv.cow$estimate, c.sdiv.cow$p.value)) # Adjust columns names in the combined table. colnames(all.cor) <- c("ci_low", "ci_high", "r(cor)", "p_val") rownames(all.cor) <- c("Predicte & Real", "Age & Difference in values", "Cow ID & Difference in values") # Write the combined table into a file. write.table(as.data.frame(all.cor), "output/4_RF_regression_model/correlations.txt")
/4_RF_regression_model.R
no_license
AlexanderUm/WBVR_MAP_Microbiota
R
false
false
8,502
r
############################################################## # Prediction of shedding scores using random forest regression ############################################################## # Set seed set.seed(4957936) # Load required libraries source("scr/functions/general/load_abs_install_pkg.R") load_abs_install_pkg(c("phyloseq", "tidyverse", "foreach", "doParallel", "randomForest")) # Create output directory dir.create("output/4_RF_regression_model") # 1. Extract and format data for the RF regression ################################################## # Read in filtered and normalized phyloseq object ps.tf.css.01 <- readRDS("output/3_filt_norm_phyl/ps_tf2_css.RDS") # Load custom function to format data from phyloseq for RF analysis source("scr/functions/data_for_rf.R") # Format data for RF regression analysis rf.data.css.01 <- data_for_rf(phyloseq = ps.tf.css.01, class.column = 'WeightedScoreII', remove.taxa.prev.less.than = 1, return.df = TRUE) # Convert the WeightedScoreII column into numeric rf.data.css.01$WeightedScoreII <- as.numeric(as.character(rf.data.css.01$WeightedScoreII)) # 2. Find the optimal number of trees necessary for the model ############################################################# # Make an RF regression model RF.tree <- randomForest(WeightedScoreII ~ . , data = rf.data.css.01, ntree = 15001) # Visualize and save error rate in response to the number of used trees pdf("output/4_RF_regression_model/Figure_S4A.pdf") plot(RF.tree) dev.off() # 3. Mtry parameter tuning ########################### # Load custom function for Mtry tuning and visualization source("scr/functions/Tree_Mtry_Plot.R") # Make a computing cluster (number of cores) cl <- makeCluster(10) # Register the cluster registerDoParallel(cl) # Visualize Mtry tunning attempts tree.mtry.plot.all <- Tree_Mtry_Plot(data = rf.data.css.01, ntrees = 7501, start_val = ncol(rf.data.css.01/3), stepF = 0.5, class_colum = "WeightedScoreII", ntimes = 3) # Stop the computing cluster stopCluster(cl) # Save the plot ggsave(plot = tree.mtry.plot.all, filename = "output/4_RF_regression_model/Figure_S4B.pdf") # 4. Prediction of Shedding values could be regression mode of RF ################################################################# # Load the function for creating matrices with randomly drawn not overlapping samples source("scr/functions/rand_draw_mat.R") # Load the custom function for building RF models without a set of samples # and consequently predict their values source("scr/functions/rf_and_test.R") # Make a computing cluster (number of cores) cl <- makeCluster(10) # Register the cluster registerDoParallel(cl) # Create an empty list rf.reg.res <- list() # Draw consequntly samples randomly substructed from from the datased # and predict their values. Samples will be drawn in five separate runs. for (i in 1:5) { # Create the matrix with randomly drawn not overlapping samples (10 samples per draw) rand.samp.reg <- rand_draw_mat(Samples_list = rownames(rf.data.css.01), Number_of_samp = 10) # Build an RF model without the randomly drawn set of samples and consequently predict their values. res.for <- foreach(i=1:nrow(rand.samp.reg),.packages = "randomForest") %dopar% { rf_and_test(rf_data = rf.data.css.01, samples_to_test = rand.samp.reg[i,], n_samples_training = 230, mtry = ncol(rf.data.css.01/3), ntree = 7501, variable_column = "WeightedScoreII", regression_TorF = TRUE)} # Combine results rf.reg.res[[i]] <- unlist(res.for) } # Stop the computing cluster stopCluster(cl) # Save the object save(rf.reg.res, file = "output/4_RF_regression_model/samples_reg_out.Rdata") # 5. Extract and optimize data for visualization ################################################ # Extract a part of metadata p.reg.meta <- data.frame(ps.tf.css.01@sam_data[, c("CowN", "WeightedScoreII", "AgeMonth")]) # Adjust row names p.reg.meta$ID <- rownames(p.reg.meta) # Create an empty dataframe for formatted data reg.pd <- data.frame() # Extract data from the list and add metadata (loop) # i is a list object number for(i in 1:length(rf.reg.res)) { # Extract individual object reg.r <- data.frame(rf.reg.res[[i]]) # Add an ID column containing rownames reg.r$ID <- rownames(reg.r) # Add metadata and bind into a long dataframe reg.pd <- rbind(reg.pd, left_join(reg.r, p.reg.meta, by="ID")) } # Prepare data for the barchart (first layer) by # leaving one row per cow reg.pd.bar <- reg.pd[!duplicated(reg.pd$CowN), ] # Create vector with ordered Cow ID ord.id <- reg.pd.bar$CowN[order(as.numeric(sub(",", ".", as.character(reg.pd.bar$WeightedScoreII))))] # Arrange levels order for Cow ID reg.pd$CowN <- factor(reg.pd$CowN, levels = ord.id) # Find mean value of predicted shedding scores per animal reg.pd.mean <- data.frame(aggregate(reg.pd$rf.reg.res..i.., list(reg.pd$CowN), mean)) # Adjust columns names colnames(reg.pd.mean) <- c("CowN", "PredV") # 6. Visualize results of RF regression model prediction ######################################################## # Plot the results pred.val.plot <- ggplot() + geom_bar(data = reg.pd[!duplicated(reg.pd$CowN), ], aes(x = CowN, y = WeightedScoreII), fill ="grey60", stat = "identity") + geom_jitter(data = reg.pd, aes(x = CowN, y = rf.reg.res..i.., color=AgeMonth), width = 0.2) + geom_smooth(data = reg.pd, aes(x = as.numeric(CowN), y = rf.reg.res..i..), method = "loess") + theme_bw() + theme(axis.text.x = element_text(angle = 90, hjust = 0.5)) + ylab("Weighted Score") + xlab("Cow ID") # Save plots ggsave(filename = "output/4_RF_regression_model/Figure_S5.pdf", pred.val.plot, width = 7, height = 4) ggsave(filename = "output/4_RF_regression_model/Figure_S5.png", pred.val.plot, width = 7, height = 4, dpi=400) # 7. Summary of the RF regression results ######################################### # Summarize differences between predicted and actual value predv.dif.summ <- summary(abs(reg.pd$rf.reg.res..i.. - reg.pd$WeightedScoreII)) # Format the summary table predv.dif.summ <- as.data.frame(cbind(names(predv.dif.summ), as.vector(predv.dif.summ))) # Write summary table into file write.csv(predv.dif.summ, "output/4_RF_regression_model/predv_dif_summ.csv") # 8. Correlation analysis between predicted and actual shedding values ###################################################################### # Prepare data for correlation analysis # Combine mean predicted values dataframe and data frame contained actual values cor.d <- inner_join(reg.pd.mean, reg.pd, by = "CowN") # Remove rows with not unique cows names cor.d1 <- cor.d[!duplicated(cor.d$CowN), ] # Correlation test between predicted and actual values. c.pred.score <- cor.test(cor.d1$PredV, cor.d1$WeightedScoreII) # Correlation between differences in predicted and actual values and animals age. c.sdiv.age <- cor.test(reg.pd$rf.reg.res..i.. - reg.pd$WeightedScoreII, reg.pd$AgeMonth) # Correlation between differences in predicted and actual values and individual cows. c.sdiv.cow <- cor.test(reg.pd$rf.reg.res..i.. - reg.pd$WeightedScoreII, as.numeric(reg.pd$CowN)) # Combine results of correlation analysis into a single dataframe. all.cor <- rbind(c(c.pred.score$conf.int, c.pred.score$estimate, c.pred.score$p.value), c(c.sdiv.age$conf.int, c.sdiv.age$estimate, c.sdiv.age$p.value), c(c.sdiv.cow$conf.int, c.sdiv.cow$estimate, c.sdiv.cow$p.value)) # Adjust columns names in the combined table. colnames(all.cor) <- c("ci_low", "ci_high", "r(cor)", "p_val") rownames(all.cor) <- c("Predicte & Real", "Age & Difference in values", "Cow ID & Difference in values") # Write the combined table into a file. write.table(as.data.frame(all.cor), "output/4_RF_regression_model/correlations.txt")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dummy_rows.R \name{dummy_rows} \alias{dummy_rows} \title{Fast creation of dummy rows} \usage{ dummy_rows(.data, select_columns = NULL, dummy_value = NA, dummy_indicator = FALSE) } \arguments{ \item{.data}{An object with the data set you want to make dummy columns from.} \item{select_columns}{If NULL (default), uses all character, factor, and Date columns to produce categories to make the dummy rows by. If not NULL, you manually enter a string or vector of strings of columns name(s).} \item{dummy_value}{Value of the row for columns that are not selected. Default is a value of NA.} \item{dummy_indicator}{Adds binary column to say if row is dummy or not (i.e. included in original data or not)} } \value{ A data.frame (or tibble or data.table, depending on input data type) with same number of columns as inputted data and original rows plus the newly created dummy rows } \description{ dummy_rows() quickly creates dummy rows to fill in missing rows based on all combinations of available character, factor, and date columns (if not otherwise specified). This is useful for creating balanced panel data. Columns that are not character, factor, or dates are filled in with NA (or whatever value you specify). } \examples{ crime <- data.frame(city = c("SF", "SF", "NYC"), year = c(1990, 2000, 1990), crime = 1:3) dummy_rows(crime) # Include year column dummy_rows(crime, select_columns = c("city", "year")) # m=Make dummy value 0 dummy_rows(crime, select_columns = c("city", "year"), dummy_value = 0) # Add a dummy indicator dummy_rows(crime, select_columns = c("city", "year"), dummy_indicator = TRUE) } \seealso{ \code{\link{dummy_cols}} For creating dummy columns Other dummy functions: \code{\link{dummy_cols}}, \code{\link{dummy_columns}} } \concept{dummy functions}
/man/dummy_rows.Rd
permissive
mlamias/fastDummies
R
false
true
1,879
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dummy_rows.R \name{dummy_rows} \alias{dummy_rows} \title{Fast creation of dummy rows} \usage{ dummy_rows(.data, select_columns = NULL, dummy_value = NA, dummy_indicator = FALSE) } \arguments{ \item{.data}{An object with the data set you want to make dummy columns from.} \item{select_columns}{If NULL (default), uses all character, factor, and Date columns to produce categories to make the dummy rows by. If not NULL, you manually enter a string or vector of strings of columns name(s).} \item{dummy_value}{Value of the row for columns that are not selected. Default is a value of NA.} \item{dummy_indicator}{Adds binary column to say if row is dummy or not (i.e. included in original data or not)} } \value{ A data.frame (or tibble or data.table, depending on input data type) with same number of columns as inputted data and original rows plus the newly created dummy rows } \description{ dummy_rows() quickly creates dummy rows to fill in missing rows based on all combinations of available character, factor, and date columns (if not otherwise specified). This is useful for creating balanced panel data. Columns that are not character, factor, or dates are filled in with NA (or whatever value you specify). } \examples{ crime <- data.frame(city = c("SF", "SF", "NYC"), year = c(1990, 2000, 1990), crime = 1:3) dummy_rows(crime) # Include year column dummy_rows(crime, select_columns = c("city", "year")) # m=Make dummy value 0 dummy_rows(crime, select_columns = c("city", "year"), dummy_value = 0) # Add a dummy indicator dummy_rows(crime, select_columns = c("city", "year"), dummy_indicator = TRUE) } \seealso{ \code{\link{dummy_cols}} For creating dummy columns Other dummy functions: \code{\link{dummy_cols}}, \code{\link{dummy_columns}} } \concept{dummy functions}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/attcpo.R \name{attcpo} \alias{attcpo} \title{attcpo} \usage{ attcpo(formla, t, tmin1, tmin2, tname, data, idname, Y0tqteobj, h = NULL, yseq = NULL, yseqlen = 100, se = TRUE, iters = 100, method = "level") } \arguments{ \item{formla}{e.g. y ~ treat} \item{t}{the last time period} \item{tmin1}{the middle time period} \item{tmin2}{the first time period} \item{tname}{the name of the column containing time periods in the data} \item{data}{a data.frame} \item{idname}{the name of the column containing an individual identifier over time} \item{Y0tqteobj}{a qte object (from the qte package) containing the the counterfactual distribution of untreated potential outcomes for the treated group} \item{h}{optional bandwidth} \item{yseq}{optional sequence of y values, default is to use all unique yvalues in the data, though this can increase computation time} \item{yseqlen}{optional length of y values to use, aids in automatically generating yseq if desired} \item{se}{whether or not to compute standard errors} \item{iters}{how many bootstrap iterations to use if computing standard errors; default is 100.} \item{method}{should be either "levels" or "rank"; whether to compute the ATT-CPO using based on the levels of Y0tmin1 or the ranks of Y0tmin1; "levels" is the default.} } \value{ att-cpo } \description{ compute the Average Treatment Effect on the Treated Conditional on the previous outcome (ATT-CPO) } \examples{ data(displacements) cc <- qte::CiC(learn ~ treat, t=2011, tmin1=2007, tname="year", idname="id", panel=TRUE, data=displacements, probs=seq(.05,.95,.01),se=FALSE) cc$F.treated.tmin1 <- ecdf(subset(displacements, year==2007 & treat==1)$learn) cc$F.treated.tmin2 <- ecdf(subset(displacements, year==2003 & treat==1)$learn) ac <- attcpo(learn ~ treat, 2011, 2007, 2003, "year", displacements, "id", cc, method="rank", yseqlen=10) ac ggattcpo(ac) }
/man/attcpo.Rd
no_license
u200915986/csabounds
R
false
true
2,016
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/attcpo.R \name{attcpo} \alias{attcpo} \title{attcpo} \usage{ attcpo(formla, t, tmin1, tmin2, tname, data, idname, Y0tqteobj, h = NULL, yseq = NULL, yseqlen = 100, se = TRUE, iters = 100, method = "level") } \arguments{ \item{formla}{e.g. y ~ treat} \item{t}{the last time period} \item{tmin1}{the middle time period} \item{tmin2}{the first time period} \item{tname}{the name of the column containing time periods in the data} \item{data}{a data.frame} \item{idname}{the name of the column containing an individual identifier over time} \item{Y0tqteobj}{a qte object (from the qte package) containing the the counterfactual distribution of untreated potential outcomes for the treated group} \item{h}{optional bandwidth} \item{yseq}{optional sequence of y values, default is to use all unique yvalues in the data, though this can increase computation time} \item{yseqlen}{optional length of y values to use, aids in automatically generating yseq if desired} \item{se}{whether or not to compute standard errors} \item{iters}{how many bootstrap iterations to use if computing standard errors; default is 100.} \item{method}{should be either "levels" or "rank"; whether to compute the ATT-CPO using based on the levels of Y0tmin1 or the ranks of Y0tmin1; "levels" is the default.} } \value{ att-cpo } \description{ compute the Average Treatment Effect on the Treated Conditional on the previous outcome (ATT-CPO) } \examples{ data(displacements) cc <- qte::CiC(learn ~ treat, t=2011, tmin1=2007, tname="year", idname="id", panel=TRUE, data=displacements, probs=seq(.05,.95,.01),se=FALSE) cc$F.treated.tmin1 <- ecdf(subset(displacements, year==2007 & treat==1)$learn) cc$F.treated.tmin2 <- ecdf(subset(displacements, year==2003 & treat==1)$learn) ac <- attcpo(learn ~ treat, 2011, 2007, 2003, "year", displacements, "id", cc, method="rank", yseqlen=10) ac ggattcpo(ac) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/vm_sev_complete_unstd_fit.ols.R \name{vm_sev_fit.ols_simulation_summary} \alias{vm_sev_fit.ols_simulation_summary} \title{Fit Simple Mediation Model for Data Generated Using the Vale and Maurelli (1983) Approach (Skewness = 3, Kurtosis = 21) - Ordinary Least Squares (Simulation Summary)} \usage{ vm_sev_fit.ols_simulation_summary( dir = getwd(), all = TRUE, taskid = NULL, par = TRUE, ncores = NULL, blas_threads = TRUE, mc = TRUE, lb = FALSE, cl_eval = FALSE, cl_export = FALSE, cl_expr, cl_vars ) } \arguments{ \item{dir}{Character string. Directory where results of \verb{vm_sev_dat_*} are stored.} \item{all}{Logical. Process all results.} \item{taskid}{Numeric. Task ID.} \item{par}{Logical. If \code{TRUE}, use multiple cores. If \code{FALSE}, use \code{\link[=lapply]{lapply()}}.} \item{ncores}{Integer. Number of cores to use if \code{par = TRUE}. If unspecified, defaults to \code{detectCores() - 1}.} \item{blas_threads}{Logical. If \code{TRUE}, set BLAS threads using \code{blas_set_num_threads(threads = blas_get_num_procs())}. If \code{FALSE}, set BLAS threads using \code{blas_set_num_threads(threads = 1)}. If \code{par = TRUE}, \code{blas_threads} is automatically set to \code{FALSE} to prevent conflicts in parallel processing. This argument is useful when \code{FUN} can handle implicit parallelism when \code{par = FALSE}, for example linear algebra operations.} \item{mc}{Logical. If \code{TRUE}, use \code{\link[parallel:mclapply]{parallel::mclapply()}}. If \code{FALSE}, use \code{\link[parallel:clusterApply]{parallel::parLapply()}} or \code{\link[parallel:clusterApply]{parallel::parLapplyLB()}}. Ignored if \code{par = FALSE}.} \item{lb}{Logical. If \code{TRUE} use \code{\link[parallel:clusterApply]{parallel::parLapplyLB()}}. If \code{FALSE}, use \code{\link[parallel:clusterApply]{parallel::parLapply()}}. Ignored if \code{par = FALSE} and \code{mc = TRUE}.} \item{cl_eval}{Logical. Execute \code{\link[parallel:clusterApply]{parallel::clusterEvalQ()}} using \code{cl_expr}. Ignored if \code{mc = TRUE}.} \item{cl_export}{Logical. Execute \code{\link[parallel:clusterApply]{parallel::clusterExport()}} using \code{cl_vars}. Ignored if \code{mc = TRUE}.} \item{cl_expr}{Expression. Expression passed to \code{\link[parallel:clusterApply]{parallel::clusterEvalQ()}} Ignored if \code{mc = TRUE}.} \item{cl_vars}{Character vector. Names of objects to pass to \code{\link[parallel:clusterApply]{parallel::clusterExport()}} Ignored if \code{mc = TRUE}.} } \description{ Fit Simple Mediation Model for Data Generated Using the Vale and Maurelli (1983) Approach (Skewness = 3, Kurtosis = 21) - Ordinary Least Squares (Simulation Summary) } \seealso{ Other model fit functions: \code{\link{beta_fit.ols_simulation_summary}()}, \code{\link{beta_fit.ols_simulation}()}, \code{\link{beta_fit.ols_task_summary}()}, \code{\link{beta_fit.ols_task}()}, \code{\link{beta_fit.ols}()}, \code{\link{beta_fit.sem.mlr_simulation_summary}()}, \code{\link{beta_fit.sem.mlr_simulation}()}, \code{\link{beta_fit.sem.mlr_task_summary}()}, \code{\link{beta_fit.sem.mlr_task}()}, \code{\link{beta_fit.sem.mlr}()}, \code{\link{beta_std_fit.sem.mlr_simulation_summary}()}, \code{\link{beta_std_fit.sem.mlr_simulation}()}, \code{\link{beta_std_fit.sem.mlr_task_summary}()}, \code{\link{beta_std_fit.sem.mlr_task}()}, \code{\link{beta_std_fit.sem.mlr}()}, \code{\link{exp_fit.ols_simulation_summary}()}, \code{\link{exp_fit.ols_simulation}()}, \code{\link{exp_fit.ols_task_summary}()}, \code{\link{exp_fit.ols_task}()}, \code{\link{exp_fit.ols}()}, \code{\link{exp_fit.sem.mlr_simulation_summary}()}, \code{\link{exp_fit.sem.mlr_simulation}()}, \code{\link{exp_fit.sem.mlr_task_summary}()}, \code{\link{exp_fit.sem.mlr_task}()}, \code{\link{exp_fit.sem.mlr}()}, \code{\link{exp_std_fit.sem.mlr_simulation_summary}()}, \code{\link{exp_std_fit.sem.mlr_simulation}()}, \code{\link{exp_std_fit.sem.mlr_task_summary}()}, \code{\link{exp_std_fit.sem.mlr_task}()}, \code{\link{exp_std_fit.sem.mlr}()}, \code{\link{fit.cov}()}, \code{\link{fit.ols}()}, \code{\link{fit.sem.mlr}()}, \code{\link{fit.sem}()}, \code{\link{mvn_fit.ols_simulation_summary}()}, \code{\link{mvn_fit.ols_simulation}()}, \code{\link{mvn_fit.ols_task_summary}()}, \code{\link{mvn_fit.ols_task}()}, \code{\link{mvn_fit.ols}()}, \code{\link{mvn_fit.sem_simulation_summary}()}, \code{\link{mvn_fit.sem_simulation}()}, \code{\link{mvn_fit.sem_task_summary}()}, \code{\link{mvn_fit.sem_task}()}, \code{\link{mvn_fit.sem}()}, \code{\link{mvn_mar_10_fit.sem_simulation_summary}()}, \code{\link{mvn_mar_10_fit.sem_simulation}()}, \code{\link{mvn_mar_10_fit.sem_task_summary}()}, \code{\link{mvn_mar_10_fit.sem_task}()}, \code{\link{mvn_mar_10_fit.sem}()}, \code{\link{mvn_mar_20_fit.sem_simulation_summary}()}, \code{\link{mvn_mar_20_fit.sem_simulation}()}, \code{\link{mvn_mar_20_fit.sem_task_summary}()}, \code{\link{mvn_mar_20_fit.sem_task}()}, \code{\link{mvn_mar_20_fit.sem}()}, \code{\link{mvn_mar_30_fit.sem_simulation_summary}()}, \code{\link{mvn_mar_30_fit.sem_simulation}()}, \code{\link{mvn_mar_30_fit.sem_task_summary}()}, \code{\link{mvn_mar_30_fit.sem_task}()}, \code{\link{mvn_mar_30_fit.sem}()}, \code{\link{mvn_mcar_10_fit.sem_simulation_summary}()}, \code{\link{mvn_mcar_10_fit.sem_simulation}()}, \code{\link{mvn_mcar_10_fit.sem_task_summary}()}, \code{\link{mvn_mcar_10_fit.sem_task}()}, \code{\link{mvn_mcar_10_fit.sem}()}, \code{\link{mvn_mcar_20_fit.sem_simulation_summary}()}, \code{\link{mvn_mcar_20_fit.sem_simulation}()}, \code{\link{mvn_mcar_20_fit.sem_task_summary}()}, \code{\link{mvn_mcar_20_fit.sem_task}()}, \code{\link{mvn_mcar_20_fit.sem}()}, \code{\link{mvn_mcar_30_fit.sem_simulation_summary}()}, \code{\link{mvn_mcar_30_fit.sem_simulation}()}, \code{\link{mvn_mcar_30_fit.sem_task_summary}()}, \code{\link{mvn_mcar_30_fit.sem_task}()}, \code{\link{mvn_mcar_30_fit.sem}()}, \code{\link{mvn_mnar_10_fit.sem_simulation_summary}()}, \code{\link{mvn_mnar_10_fit.sem_simulation}()}, \code{\link{mvn_mnar_10_fit.sem_task_summary}()}, \code{\link{mvn_mnar_10_fit.sem_task}()}, \code{\link{mvn_mnar_10_fit.sem}()}, \code{\link{mvn_mnar_20_fit.sem_simulation_summary}()}, \code{\link{mvn_mnar_20_fit.sem_simulation}()}, \code{\link{mvn_mnar_20_fit.sem_task_summary}()}, \code{\link{mvn_mnar_20_fit.sem_task}()}, \code{\link{mvn_mnar_20_fit.sem}()}, \code{\link{mvn_mnar_30_fit.sem_simulation_summary}()}, \code{\link{mvn_mnar_30_fit.sem_simulation}()}, \code{\link{mvn_mnar_30_fit.sem_task_summary}()}, \code{\link{mvn_mnar_30_fit.sem_task}()}, \code{\link{mvn_mnar_30_fit.sem}()}, \code{\link{mvn_std_fit.sem_simulation_summary}()}, \code{\link{mvn_std_fit.sem_simulation}()}, \code{\link{mvn_std_fit.sem_task_summary}()}, \code{\link{mvn_std_fit.sem_task}()}, \code{\link{mvn_std_fit.sem}()}, \code{\link{vm_mod_fit.ols_simulation_summary}()}, \code{\link{vm_mod_fit.ols_simulation}()}, \code{\link{vm_mod_fit.ols_task_summary}()}, \code{\link{vm_mod_fit.ols_task}()}, \code{\link{vm_mod_fit.ols}()}, \code{\link{vm_mod_fit.sem.mlr_simulation_summary}()}, \code{\link{vm_mod_fit.sem.mlr_simulation}()}, \code{\link{vm_mod_fit.sem.mlr_task_summary}()}, \code{\link{vm_mod_fit.sem.mlr_task}()}, \code{\link{vm_mod_fit.sem.mlr}()}, \code{\link{vm_mod_std_fit.sem.mlr_simulation_summary}()}, \code{\link{vm_mod_std_fit.sem.mlr_simulation}()}, \code{\link{vm_mod_std_fit.sem.mlr_task_summary}()}, \code{\link{vm_mod_std_fit.sem.mlr_task}()}, \code{\link{vm_mod_std_fit.sem.mlr}()}, \code{\link{vm_sev_fit.ols_simulation}()}, \code{\link{vm_sev_fit.ols_task_summary}()}, \code{\link{vm_sev_fit.ols_task}()}, \code{\link{vm_sev_fit.ols}()}, \code{\link{vm_sev_fit.sem.mlr_simulation_summary}()}, \code{\link{vm_sev_fit.sem.mlr_simulation}()}, \code{\link{vm_sev_fit.sem.mlr_task_summary}()}, \code{\link{vm_sev_fit.sem.mlr_task}()}, \code{\link{vm_sev_fit.sem.mlr}()}, \code{\link{vm_sev_std_fit.sem.mlr_simulation_summary}()}, \code{\link{vm_sev_std_fit.sem.mlr_simulation}()}, \code{\link{vm_sev_std_fit.sem.mlr_task_summary}()}, \code{\link{vm_sev_std_fit.sem.mlr_task}()}, \code{\link{vm_sev_std_fit.sem.mlr}()} } \author{ Ivan Jacob Agaloos Pesigan } \concept{model fit functions} \keyword{fit}
/man/vm_sev_fit.ols_simulation_summary.Rd
permissive
jeksterslabds/jeksterslabRmedsimple
R
false
true
8,291
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/vm_sev_complete_unstd_fit.ols.R \name{vm_sev_fit.ols_simulation_summary} \alias{vm_sev_fit.ols_simulation_summary} \title{Fit Simple Mediation Model for Data Generated Using the Vale and Maurelli (1983) Approach (Skewness = 3, Kurtosis = 21) - Ordinary Least Squares (Simulation Summary)} \usage{ vm_sev_fit.ols_simulation_summary( dir = getwd(), all = TRUE, taskid = NULL, par = TRUE, ncores = NULL, blas_threads = TRUE, mc = TRUE, lb = FALSE, cl_eval = FALSE, cl_export = FALSE, cl_expr, cl_vars ) } \arguments{ \item{dir}{Character string. Directory where results of \verb{vm_sev_dat_*} are stored.} \item{all}{Logical. Process all results.} \item{taskid}{Numeric. Task ID.} \item{par}{Logical. If \code{TRUE}, use multiple cores. If \code{FALSE}, use \code{\link[=lapply]{lapply()}}.} \item{ncores}{Integer. Number of cores to use if \code{par = TRUE}. If unspecified, defaults to \code{detectCores() - 1}.} \item{blas_threads}{Logical. If \code{TRUE}, set BLAS threads using \code{blas_set_num_threads(threads = blas_get_num_procs())}. If \code{FALSE}, set BLAS threads using \code{blas_set_num_threads(threads = 1)}. If \code{par = TRUE}, \code{blas_threads} is automatically set to \code{FALSE} to prevent conflicts in parallel processing. This argument is useful when \code{FUN} can handle implicit parallelism when \code{par = FALSE}, for example linear algebra operations.} \item{mc}{Logical. If \code{TRUE}, use \code{\link[parallel:mclapply]{parallel::mclapply()}}. If \code{FALSE}, use \code{\link[parallel:clusterApply]{parallel::parLapply()}} or \code{\link[parallel:clusterApply]{parallel::parLapplyLB()}}. Ignored if \code{par = FALSE}.} \item{lb}{Logical. If \code{TRUE} use \code{\link[parallel:clusterApply]{parallel::parLapplyLB()}}. If \code{FALSE}, use \code{\link[parallel:clusterApply]{parallel::parLapply()}}. Ignored if \code{par = FALSE} and \code{mc = TRUE}.} \item{cl_eval}{Logical. Execute \code{\link[parallel:clusterApply]{parallel::clusterEvalQ()}} using \code{cl_expr}. Ignored if \code{mc = TRUE}.} \item{cl_export}{Logical. Execute \code{\link[parallel:clusterApply]{parallel::clusterExport()}} using \code{cl_vars}. Ignored if \code{mc = TRUE}.} \item{cl_expr}{Expression. Expression passed to \code{\link[parallel:clusterApply]{parallel::clusterEvalQ()}} Ignored if \code{mc = TRUE}.} \item{cl_vars}{Character vector. Names of objects to pass to \code{\link[parallel:clusterApply]{parallel::clusterExport()}} Ignored if \code{mc = TRUE}.} } \description{ Fit Simple Mediation Model for Data Generated Using the Vale and Maurelli (1983) Approach (Skewness = 3, Kurtosis = 21) - Ordinary Least Squares (Simulation Summary) } \seealso{ Other model fit functions: \code{\link{beta_fit.ols_simulation_summary}()}, \code{\link{beta_fit.ols_simulation}()}, \code{\link{beta_fit.ols_task_summary}()}, \code{\link{beta_fit.ols_task}()}, \code{\link{beta_fit.ols}()}, \code{\link{beta_fit.sem.mlr_simulation_summary}()}, \code{\link{beta_fit.sem.mlr_simulation}()}, \code{\link{beta_fit.sem.mlr_task_summary}()}, \code{\link{beta_fit.sem.mlr_task}()}, \code{\link{beta_fit.sem.mlr}()}, \code{\link{beta_std_fit.sem.mlr_simulation_summary}()}, \code{\link{beta_std_fit.sem.mlr_simulation}()}, \code{\link{beta_std_fit.sem.mlr_task_summary}()}, \code{\link{beta_std_fit.sem.mlr_task}()}, \code{\link{beta_std_fit.sem.mlr}()}, \code{\link{exp_fit.ols_simulation_summary}()}, \code{\link{exp_fit.ols_simulation}()}, \code{\link{exp_fit.ols_task_summary}()}, \code{\link{exp_fit.ols_task}()}, \code{\link{exp_fit.ols}()}, \code{\link{exp_fit.sem.mlr_simulation_summary}()}, \code{\link{exp_fit.sem.mlr_simulation}()}, \code{\link{exp_fit.sem.mlr_task_summary}()}, \code{\link{exp_fit.sem.mlr_task}()}, \code{\link{exp_fit.sem.mlr}()}, \code{\link{exp_std_fit.sem.mlr_simulation_summary}()}, \code{\link{exp_std_fit.sem.mlr_simulation}()}, \code{\link{exp_std_fit.sem.mlr_task_summary}()}, \code{\link{exp_std_fit.sem.mlr_task}()}, \code{\link{exp_std_fit.sem.mlr}()}, \code{\link{fit.cov}()}, \code{\link{fit.ols}()}, \code{\link{fit.sem.mlr}()}, \code{\link{fit.sem}()}, \code{\link{mvn_fit.ols_simulation_summary}()}, \code{\link{mvn_fit.ols_simulation}()}, \code{\link{mvn_fit.ols_task_summary}()}, \code{\link{mvn_fit.ols_task}()}, \code{\link{mvn_fit.ols}()}, \code{\link{mvn_fit.sem_simulation_summary}()}, \code{\link{mvn_fit.sem_simulation}()}, \code{\link{mvn_fit.sem_task_summary}()}, \code{\link{mvn_fit.sem_task}()}, \code{\link{mvn_fit.sem}()}, \code{\link{mvn_mar_10_fit.sem_simulation_summary}()}, \code{\link{mvn_mar_10_fit.sem_simulation}()}, \code{\link{mvn_mar_10_fit.sem_task_summary}()}, \code{\link{mvn_mar_10_fit.sem_task}()}, \code{\link{mvn_mar_10_fit.sem}()}, \code{\link{mvn_mar_20_fit.sem_simulation_summary}()}, \code{\link{mvn_mar_20_fit.sem_simulation}()}, \code{\link{mvn_mar_20_fit.sem_task_summary}()}, \code{\link{mvn_mar_20_fit.sem_task}()}, \code{\link{mvn_mar_20_fit.sem}()}, \code{\link{mvn_mar_30_fit.sem_simulation_summary}()}, \code{\link{mvn_mar_30_fit.sem_simulation}()}, \code{\link{mvn_mar_30_fit.sem_task_summary}()}, \code{\link{mvn_mar_30_fit.sem_task}()}, \code{\link{mvn_mar_30_fit.sem}()}, \code{\link{mvn_mcar_10_fit.sem_simulation_summary}()}, \code{\link{mvn_mcar_10_fit.sem_simulation}()}, \code{\link{mvn_mcar_10_fit.sem_task_summary}()}, \code{\link{mvn_mcar_10_fit.sem_task}()}, \code{\link{mvn_mcar_10_fit.sem}()}, \code{\link{mvn_mcar_20_fit.sem_simulation_summary}()}, \code{\link{mvn_mcar_20_fit.sem_simulation}()}, \code{\link{mvn_mcar_20_fit.sem_task_summary}()}, \code{\link{mvn_mcar_20_fit.sem_task}()}, \code{\link{mvn_mcar_20_fit.sem}()}, \code{\link{mvn_mcar_30_fit.sem_simulation_summary}()}, \code{\link{mvn_mcar_30_fit.sem_simulation}()}, \code{\link{mvn_mcar_30_fit.sem_task_summary}()}, \code{\link{mvn_mcar_30_fit.sem_task}()}, \code{\link{mvn_mcar_30_fit.sem}()}, \code{\link{mvn_mnar_10_fit.sem_simulation_summary}()}, \code{\link{mvn_mnar_10_fit.sem_simulation}()}, \code{\link{mvn_mnar_10_fit.sem_task_summary}()}, \code{\link{mvn_mnar_10_fit.sem_task}()}, \code{\link{mvn_mnar_10_fit.sem}()}, \code{\link{mvn_mnar_20_fit.sem_simulation_summary}()}, \code{\link{mvn_mnar_20_fit.sem_simulation}()}, \code{\link{mvn_mnar_20_fit.sem_task_summary}()}, \code{\link{mvn_mnar_20_fit.sem_task}()}, \code{\link{mvn_mnar_20_fit.sem}()}, \code{\link{mvn_mnar_30_fit.sem_simulation_summary}()}, \code{\link{mvn_mnar_30_fit.sem_simulation}()}, \code{\link{mvn_mnar_30_fit.sem_task_summary}()}, \code{\link{mvn_mnar_30_fit.sem_task}()}, \code{\link{mvn_mnar_30_fit.sem}()}, \code{\link{mvn_std_fit.sem_simulation_summary}()}, \code{\link{mvn_std_fit.sem_simulation}()}, \code{\link{mvn_std_fit.sem_task_summary}()}, \code{\link{mvn_std_fit.sem_task}()}, \code{\link{mvn_std_fit.sem}()}, \code{\link{vm_mod_fit.ols_simulation_summary}()}, \code{\link{vm_mod_fit.ols_simulation}()}, \code{\link{vm_mod_fit.ols_task_summary}()}, \code{\link{vm_mod_fit.ols_task}()}, \code{\link{vm_mod_fit.ols}()}, \code{\link{vm_mod_fit.sem.mlr_simulation_summary}()}, \code{\link{vm_mod_fit.sem.mlr_simulation}()}, \code{\link{vm_mod_fit.sem.mlr_task_summary}()}, \code{\link{vm_mod_fit.sem.mlr_task}()}, \code{\link{vm_mod_fit.sem.mlr}()}, \code{\link{vm_mod_std_fit.sem.mlr_simulation_summary}()}, \code{\link{vm_mod_std_fit.sem.mlr_simulation}()}, \code{\link{vm_mod_std_fit.sem.mlr_task_summary}()}, \code{\link{vm_mod_std_fit.sem.mlr_task}()}, \code{\link{vm_mod_std_fit.sem.mlr}()}, \code{\link{vm_sev_fit.ols_simulation}()}, \code{\link{vm_sev_fit.ols_task_summary}()}, \code{\link{vm_sev_fit.ols_task}()}, \code{\link{vm_sev_fit.ols}()}, \code{\link{vm_sev_fit.sem.mlr_simulation_summary}()}, \code{\link{vm_sev_fit.sem.mlr_simulation}()}, \code{\link{vm_sev_fit.sem.mlr_task_summary}()}, \code{\link{vm_sev_fit.sem.mlr_task}()}, \code{\link{vm_sev_fit.sem.mlr}()}, \code{\link{vm_sev_std_fit.sem.mlr_simulation_summary}()}, \code{\link{vm_sev_std_fit.sem.mlr_simulation}()}, \code{\link{vm_sev_std_fit.sem.mlr_task_summary}()}, \code{\link{vm_sev_std_fit.sem.mlr_task}()}, \code{\link{vm_sev_std_fit.sem.mlr}()} } \author{ Ivan Jacob Agaloos Pesigan } \concept{model fit functions} \keyword{fit}
#Loading activities from activity_labels.txt activities = read.table("activity_labels.txt",sep = " ",header = F)[,2] #Loading features from features.txt features = read.table("features.txt",sep = " ",header = F)[,2] #relevant features requiredFeatures = grepl("mean\\(\\)|std\\(\\)", features) #Loading test data X_test = read.table("./test/X_test.txt",header = F) Y_test = read.table("./test/Y_test.txt",header = F) subject_test = read.table("./test/subject_test.txt",header = F) #name columns of X_test from features object names(X_test) = features #remove unrequired portions from X_test X_test = X_test[,requiredFeatures] #Mapping activity data Y_test = as.data.frame(activities[Y_test[,1]]) names(Y_test) = c("Activity") names(subject_test) = "Subject" #Combined test object TestObj = cbind(subject_test,Y_test,X_test) #Similarly for training data X_train = read.table("./train/X_train.txt",header = F) Y_train = read.table("./train/Y_train.txt",header = F) subject_train = read.table("./train/subject_train.txt",header = F) names(X_train) = features X_train = X_train[,requiredFeatures] Y_train = as.data.frame(activities[Y_train[,1]]) names(Y_train) = c("Activity") names(subject_train) = "Subject" TrainObj = cbind(subject_train,Y_train,X_train) # Merging test and train data data = rbind(TestObj, TrainObj) #removing non alphabets from column names names(data) = tolower(gsub("[^[:alpha:]]", "", names(data))) dataFinal <- aggregate(data[, 3:ncol(data)], by=list(subject = data$subject, activity = data$activity), mean) write.table(format(dataFinal, scientific=T), "tidy.txt",quote=2,row.names=F)
/run_analysis.R
no_license
rsharma155/Getting-and-Cleaning-Data-Course-Assignment
R
false
false
1,687
r
#Loading activities from activity_labels.txt activities = read.table("activity_labels.txt",sep = " ",header = F)[,2] #Loading features from features.txt features = read.table("features.txt",sep = " ",header = F)[,2] #relevant features requiredFeatures = grepl("mean\\(\\)|std\\(\\)", features) #Loading test data X_test = read.table("./test/X_test.txt",header = F) Y_test = read.table("./test/Y_test.txt",header = F) subject_test = read.table("./test/subject_test.txt",header = F) #name columns of X_test from features object names(X_test) = features #remove unrequired portions from X_test X_test = X_test[,requiredFeatures] #Mapping activity data Y_test = as.data.frame(activities[Y_test[,1]]) names(Y_test) = c("Activity") names(subject_test) = "Subject" #Combined test object TestObj = cbind(subject_test,Y_test,X_test) #Similarly for training data X_train = read.table("./train/X_train.txt",header = F) Y_train = read.table("./train/Y_train.txt",header = F) subject_train = read.table("./train/subject_train.txt",header = F) names(X_train) = features X_train = X_train[,requiredFeatures] Y_train = as.data.frame(activities[Y_train[,1]]) names(Y_train) = c("Activity") names(subject_train) = "Subject" TrainObj = cbind(subject_train,Y_train,X_train) # Merging test and train data data = rbind(TestObj, TrainObj) #removing non alphabets from column names names(data) = tolower(gsub("[^[:alpha:]]", "", names(data))) dataFinal <- aggregate(data[, 3:ncol(data)], by=list(subject = data$subject, activity = data$activity), mean) write.table(format(dataFinal, scientific=T), "tidy.txt",quote=2,row.names=F)
library(DESeq2) library(ggplot2) #Preparing data---------------------------------- finalcount = read.table("lab/rawdata/finalcountGithubGenesALL.txt", header=TRUE, sep='\t') col = colnames(finalcount) ZT2 <- rep("ZT2", 14) #Create conditions for each timepoint ZT4 <- rep("ZT4", 14) ZT6 <- rep("ZT6", 14) ZT8 <- rep("ZT8", 14) ZT10 <- rep("ZT10", 14) ZT12 <- rep("ZT12", 14) ZT14 <- rep("ZT14", 14) ZT16 <- rep("ZT16", 14) ZT18 <- rep("ZT18", 14) ZT20 <- rep("ZT20", 14) ZT22 <- rep("ZT22", 14) ZT24 <- rep("ZT24", 14) ZTs <- c(ZT2,ZT4,ZT6,ZT8,ZT10,ZT12,ZT14,ZT16,ZT18,ZT20,ZT22,ZT24) colData = data.frame( row.names = colnames(counts), condition = ZTs ) cds = DESeqDataSetFromMatrix(countData=finalcount, #Creates DESeq2 object colData=colData, design= ~ condition) #DESeq2------------------------------------------ cds = estimateSizeFactors(cds) cds = estimateDispersions(cds) plotDispEsts(cds) cds <- DESeq(cds) res <- results(cds) head(res) sum(res$padj < 0.05, na.rm=T) plotMA(res, ylim=c(-5,5))
/Created_code/DESeq2/DESeq2Code.R
no_license
Kfalash/CVP-Testing
R
false
false
1,099
r
library(DESeq2) library(ggplot2) #Preparing data---------------------------------- finalcount = read.table("lab/rawdata/finalcountGithubGenesALL.txt", header=TRUE, sep='\t') col = colnames(finalcount) ZT2 <- rep("ZT2", 14) #Create conditions for each timepoint ZT4 <- rep("ZT4", 14) ZT6 <- rep("ZT6", 14) ZT8 <- rep("ZT8", 14) ZT10 <- rep("ZT10", 14) ZT12 <- rep("ZT12", 14) ZT14 <- rep("ZT14", 14) ZT16 <- rep("ZT16", 14) ZT18 <- rep("ZT18", 14) ZT20 <- rep("ZT20", 14) ZT22 <- rep("ZT22", 14) ZT24 <- rep("ZT24", 14) ZTs <- c(ZT2,ZT4,ZT6,ZT8,ZT10,ZT12,ZT14,ZT16,ZT18,ZT20,ZT22,ZT24) colData = data.frame( row.names = colnames(counts), condition = ZTs ) cds = DESeqDataSetFromMatrix(countData=finalcount, #Creates DESeq2 object colData=colData, design= ~ condition) #DESeq2------------------------------------------ cds = estimateSizeFactors(cds) cds = estimateDispersions(cds) plotDispEsts(cds) cds <- DESeq(cds) res <- results(cds) head(res) sum(res$padj < 0.05, na.rm=T) plotMA(res, ylim=c(-5,5))
testlist <- list(x = c(-8.34183228774499e-183, 1.69469457180006e-319, NaN, 3.23785921002061e-319, 0, NA, 1.390671161567e-308, 6.7198402955055e-304, -Inf, 0, 3.95252516672997e-323, 0, 1.42740904212728e+181, -1.01810330038712e+80, -6.67115915940654e+306, -5.82900682309329e+303, -1.08193841662861e+307, 6.40610294902753e-145, 5.48674796870944e-310, -2.6355489919319e-82, 1.00496080260074e+180, 2, 0, -5.61779104166239e+306, -1.29984381150753e+28, -1.34765550943381e+28, 2.2815699287538e-310, 1.00496080260074e+180, 7.29111855643999e-304, -1.30736177482179e+28, 1.27745613388713e-319, Inf), y = c(2.45008560103066e+35, 8.77707619836974e-320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(blorr:::blr_pairs_cpp,testlist) str(result)
/blorr/inst/testfiles/blr_pairs_cpp/libFuzzer_blr_pairs_cpp/blr_pairs_cpp_valgrind_files/1609955543-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
1,005
r
testlist <- list(x = c(-8.34183228774499e-183, 1.69469457180006e-319, NaN, 3.23785921002061e-319, 0, NA, 1.390671161567e-308, 6.7198402955055e-304, -Inf, 0, 3.95252516672997e-323, 0, 1.42740904212728e+181, -1.01810330038712e+80, -6.67115915940654e+306, -5.82900682309329e+303, -1.08193841662861e+307, 6.40610294902753e-145, 5.48674796870944e-310, -2.6355489919319e-82, 1.00496080260074e+180, 2, 0, -5.61779104166239e+306, -1.29984381150753e+28, -1.34765550943381e+28, 2.2815699287538e-310, 1.00496080260074e+180, 7.29111855643999e-304, -1.30736177482179e+28, 1.27745613388713e-319, Inf), y = c(2.45008560103066e+35, 8.77707619836974e-320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(blorr:::blr_pairs_cpp,testlist) str(result)
extract.rank<-function(df,rank=6,sep="."){ select.drv<-strsplit(x = row.names(df),split = sep,fixed = TRUE,useBytes = FALSE) select.drv<-sapply(1:length(select.drv),function(x){ if(length(select.drv[[x]])==rank){ return(TRUE) }else{ return(FALSE) } }) return(df[select.drv,,FALSE]) } extract.rank.name<-function(df,sep="."){ rownams.split<-strsplit(x=row.names(df),split = sep,fixed = TRUE,useBytes = FALSE) new.rownams<-sapply(1:length(rownams.split),function(x){ return(rownams.split[[x]][length(rownams.split[[x]])]) }) row.names(df)<-new.rownams return(df) } extract.mle.cutoff<-function(df,cutoff=0.01){ return(df[df[,1]>=cutoff,,FALSE]) }
/R/extract.rank.R
no_license
axrt/HMPTreesExtend
R
false
false
697
r
extract.rank<-function(df,rank=6,sep="."){ select.drv<-strsplit(x = row.names(df),split = sep,fixed = TRUE,useBytes = FALSE) select.drv<-sapply(1:length(select.drv),function(x){ if(length(select.drv[[x]])==rank){ return(TRUE) }else{ return(FALSE) } }) return(df[select.drv,,FALSE]) } extract.rank.name<-function(df,sep="."){ rownams.split<-strsplit(x=row.names(df),split = sep,fixed = TRUE,useBytes = FALSE) new.rownams<-sapply(1:length(rownams.split),function(x){ return(rownams.split[[x]][length(rownams.split[[x]])]) }) row.names(df)<-new.rownams return(df) } extract.mle.cutoff<-function(df,cutoff=0.01){ return(df[df[,1]>=cutoff,,FALSE]) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{get_swath} \alias{get_swath} \title{parse the swath} \usage{ get_swath(paths) } \description{ parse the swath }
/man/get_swath.Rd
no_license
MaThRk/soilmoistr
R
false
true
204
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{get_swath} \alias{get_swath} \title{parse the swath} \usage{ get_swath(paths) } \description{ parse the swath }
#IST 687 #Latika Mahesh Wadhwa #Homework 6 #Assignment Due Date - October 11, 2018 #Submission Date - october 09,2018 #__________________________________________________________________________________________# # Step A: Load and Merge datasets # #__________________________________________________________________________________________# #Create function readStatesInfo readStatesInfo <- function() { csv_file <- "https://www2.census.gov/programs-surveys/popest/datasets/2010-2017/state/asrh/scprc-est2017-18+pop-res.csv" States <- read.csv(url(csv_file)) summary(States) #summary for States str(States) #structure of States #Clean the dataframe head(States,5) #view top 5 rows tail(States,5) #view bottom 5 rows #Delete the First Row of the dataframe nrow(States) #Display number of rows States <- States[-1,] #Delete the first row nrow(States) #Display number of rows # Remove the last Row (for Puerto Rico) num_row <- nrow(States) num_row States <- States[-num_row,] #Delete the first 4 columns States <- States[,-1:-4] #Rename the columns colnames(States) <- c("statename", "population", "popOver18", "percentOver18") return(States) } #1) Read in the census dataset by calling the function readStatesInfo() states <- readStatesInfo() #2) Copy USArrests dataset:arrests arrests <- USArrests #Add a new column in arrests named stateName and populate it with rownames of arrests arrests$stateName <- rownames(arrests) arrests$stateName #3) Merged dataframe with the attributes from both dataset:mergeDF mergeDF <- merge(states, arrests, by ="stateName") View(mergeDF) #Des cribe mergeDF str(mergeDF) summary(mergeDF) #__________________________________________________________________________________________# # Step B: Explore the Data - Understanding distributions # #__________________________________________________________________________________________# #Install package ggplot2 install.packages("ggplot2") library(ggplot2) #4) Histogram for the population myPlotPop <- ggplot(mergeDF, aes(x=population)) #Setting up the dataframe as mergeDF and x axis to population myPlotPop <- myPlotPop + geom_histogram(binwidth = 50000) #Setting up geom to histogram with binwidth of 50000 myPlotPop <- myPlotPop + ggtitle("Histogram of Population") # Set the title myPlotPop #4) Histogram for the Murder rate myPlotMur <- ggplot(mergeDF, aes(x=Murder)) #Setting up the dataframe as mergeDF and x axis to Murder myPlotMur <- myPlotMur + geom_histogram(binwidth = 1) #Setting up geom to histogram with binwidth of 1 myPlotMur <- myPlotMur + ggtitle("Histogram of Murder") # Set the title myPlotMur #4) Histogram for the Assault myPlotAssault <- ggplot(mergeDF, aes(x=Assault)) #Setting up the dataframe as mergeDF and x axis to Assault myPlotAssault <- myPlotAssault + geom_histogram(binwidth = 5) #Setting up geom to histogram with binwidth of 5 myPlotAssault <- myPlotAssault + ggtitle("Histogram of Assault") # Set the title myPlotAssault #4) Histogram for the Rape myPlotRape <- ggplot(mergeDF, aes(x=Rape)) #Setting up the dataframe as mergeDF and x axis to Rape myPlotRape <- myPlotRape + geom_histogram(binwidth = 1) #Setting up geom to histogram with binwidth of 1 myPlotRape <- myPlotRape + ggtitle("Histogram of Rape") # Set the title myPlotRape #4) Histogram for the Urban Population myPlotUPop <- ggplot(mergeDF, aes(x=UrbanPop)) #Setting up the dataframe as mergeDF and x axis to Urban Population myPlotUPop <- myPlotUPop + geom_histogram(binwidth = 1) #Setting up geom to histogram with binwidth of 1 myPlotUPop <- myPlotUPop + ggtitle("Histogram of Urban Population") # Set the title myPlotUPop #5)- Boxplot for the population myBoxPlotPop <- ggplot(mergeDF, aes(x=factor(0), y=population)) + geom_boxplot() myBoxPlotPop #5)- Boxplot for the murder rate myBoxPlotMur <- ggplot(mergeDF, aes(x=factor(0), y=Murder)) + geom_boxplot() myBoxPlotMur #6) I would choose histogram over box plot because histogram show the number of values within the limit of a bin size # and huge dataset can be visualized easily while boxplot distributes the data based on the minimum, first quartile, # median, third quartile, and maximum of the data set which makes it difficu to visualize #__________________________________________________________________________________________# # Step C: Which State had the Most Murders - bar charts # #__________________________________________________________________________________________# #7) Calculate the number of murders per state and add it into a new column of the erdf datadframe: numMurders mergeDF$numMurders <- mergeDF$population*mergeDF$Murder/10000 View(mergeDF) #View the datadframe #8) Generate a bar chart, with the number of murders per state #SET Dataframe = mergeDF X-axis=stateNum Y-axis= numMurders Plot type-Barchart g <- ggplot(mergeDF, aes(x=stateName, y= numMurders)) + geom_col() g #9) Rotate text on the X axis and add a title named "Total Murders". g <- g+theme(axis.text.x = element_text(angle = 90, hjust = 1)) #Rotate text on the X axis g <- g+ ggtitle("Murders by State") #Set up the title g #10) Generate a new bar chart, the same as in the previous step and sort the x-axis by the murder rate #SET Dataframe = mergeDF X-axis=stateNum Y-axis= numMurders Plot type-Barchart g <- ggplot(mergeDF, aes(x= reorder(stateName, numMurders) , y=numMurders))+ geom_col() g <- g +theme(axis.text.x = element_text(angle = 90)) #Rotate text on the X axis g #11) Generate a third bar chart, the same as the previous step and show percentOver18 as the color of the bar g <- ggplot(mergeDF, aes(x= reorder(stateName, numMurders) , y=numMurders, fill=percentOver18))+ geom_col() g <- g +theme(axis.text.x = element_text(angle = 90)) #Rotate text on the X axis g <- g+ ggtitle("Total Murders") #Set up the title g #__________________________________________________________________________________________# # Step D: Explore Murders - scatter chart # #__________________________________________________________________________________________# #Generate a scatter plot - have population on the X axis, the percent over 18 on the y axis, and the size & color represent the murder rate scatterplot <- ggplot(mergeDF, aes(x=population, y= percentOver18)) +geom_point(aes(size=Murder, color=Murder)) + scale_color_gradient(low="white", high="red") scatterplot
/IST 687 Homework 6 - Latika Mahesh Wadhwa.R
no_license
latika-wadhwa/IST-687-Introduction-to-Data-Science-
R
false
false
7,023
r
#IST 687 #Latika Mahesh Wadhwa #Homework 6 #Assignment Due Date - October 11, 2018 #Submission Date - october 09,2018 #__________________________________________________________________________________________# # Step A: Load and Merge datasets # #__________________________________________________________________________________________# #Create function readStatesInfo readStatesInfo <- function() { csv_file <- "https://www2.census.gov/programs-surveys/popest/datasets/2010-2017/state/asrh/scprc-est2017-18+pop-res.csv" States <- read.csv(url(csv_file)) summary(States) #summary for States str(States) #structure of States #Clean the dataframe head(States,5) #view top 5 rows tail(States,5) #view bottom 5 rows #Delete the First Row of the dataframe nrow(States) #Display number of rows States <- States[-1,] #Delete the first row nrow(States) #Display number of rows # Remove the last Row (for Puerto Rico) num_row <- nrow(States) num_row States <- States[-num_row,] #Delete the first 4 columns States <- States[,-1:-4] #Rename the columns colnames(States) <- c("statename", "population", "popOver18", "percentOver18") return(States) } #1) Read in the census dataset by calling the function readStatesInfo() states <- readStatesInfo() #2) Copy USArrests dataset:arrests arrests <- USArrests #Add a new column in arrests named stateName and populate it with rownames of arrests arrests$stateName <- rownames(arrests) arrests$stateName #3) Merged dataframe with the attributes from both dataset:mergeDF mergeDF <- merge(states, arrests, by ="stateName") View(mergeDF) #Des cribe mergeDF str(mergeDF) summary(mergeDF) #__________________________________________________________________________________________# # Step B: Explore the Data - Understanding distributions # #__________________________________________________________________________________________# #Install package ggplot2 install.packages("ggplot2") library(ggplot2) #4) Histogram for the population myPlotPop <- ggplot(mergeDF, aes(x=population)) #Setting up the dataframe as mergeDF and x axis to population myPlotPop <- myPlotPop + geom_histogram(binwidth = 50000) #Setting up geom to histogram with binwidth of 50000 myPlotPop <- myPlotPop + ggtitle("Histogram of Population") # Set the title myPlotPop #4) Histogram for the Murder rate myPlotMur <- ggplot(mergeDF, aes(x=Murder)) #Setting up the dataframe as mergeDF and x axis to Murder myPlotMur <- myPlotMur + geom_histogram(binwidth = 1) #Setting up geom to histogram with binwidth of 1 myPlotMur <- myPlotMur + ggtitle("Histogram of Murder") # Set the title myPlotMur #4) Histogram for the Assault myPlotAssault <- ggplot(mergeDF, aes(x=Assault)) #Setting up the dataframe as mergeDF and x axis to Assault myPlotAssault <- myPlotAssault + geom_histogram(binwidth = 5) #Setting up geom to histogram with binwidth of 5 myPlotAssault <- myPlotAssault + ggtitle("Histogram of Assault") # Set the title myPlotAssault #4) Histogram for the Rape myPlotRape <- ggplot(mergeDF, aes(x=Rape)) #Setting up the dataframe as mergeDF and x axis to Rape myPlotRape <- myPlotRape + geom_histogram(binwidth = 1) #Setting up geom to histogram with binwidth of 1 myPlotRape <- myPlotRape + ggtitle("Histogram of Rape") # Set the title myPlotRape #4) Histogram for the Urban Population myPlotUPop <- ggplot(mergeDF, aes(x=UrbanPop)) #Setting up the dataframe as mergeDF and x axis to Urban Population myPlotUPop <- myPlotUPop + geom_histogram(binwidth = 1) #Setting up geom to histogram with binwidth of 1 myPlotUPop <- myPlotUPop + ggtitle("Histogram of Urban Population") # Set the title myPlotUPop #5)- Boxplot for the population myBoxPlotPop <- ggplot(mergeDF, aes(x=factor(0), y=population)) + geom_boxplot() myBoxPlotPop #5)- Boxplot for the murder rate myBoxPlotMur <- ggplot(mergeDF, aes(x=factor(0), y=Murder)) + geom_boxplot() myBoxPlotMur #6) I would choose histogram over box plot because histogram show the number of values within the limit of a bin size # and huge dataset can be visualized easily while boxplot distributes the data based on the minimum, first quartile, # median, third quartile, and maximum of the data set which makes it difficu to visualize #__________________________________________________________________________________________# # Step C: Which State had the Most Murders - bar charts # #__________________________________________________________________________________________# #7) Calculate the number of murders per state and add it into a new column of the erdf datadframe: numMurders mergeDF$numMurders <- mergeDF$population*mergeDF$Murder/10000 View(mergeDF) #View the datadframe #8) Generate a bar chart, with the number of murders per state #SET Dataframe = mergeDF X-axis=stateNum Y-axis= numMurders Plot type-Barchart g <- ggplot(mergeDF, aes(x=stateName, y= numMurders)) + geom_col() g #9) Rotate text on the X axis and add a title named "Total Murders". g <- g+theme(axis.text.x = element_text(angle = 90, hjust = 1)) #Rotate text on the X axis g <- g+ ggtitle("Murders by State") #Set up the title g #10) Generate a new bar chart, the same as in the previous step and sort the x-axis by the murder rate #SET Dataframe = mergeDF X-axis=stateNum Y-axis= numMurders Plot type-Barchart g <- ggplot(mergeDF, aes(x= reorder(stateName, numMurders) , y=numMurders))+ geom_col() g <- g +theme(axis.text.x = element_text(angle = 90)) #Rotate text on the X axis g #11) Generate a third bar chart, the same as the previous step and show percentOver18 as the color of the bar g <- ggplot(mergeDF, aes(x= reorder(stateName, numMurders) , y=numMurders, fill=percentOver18))+ geom_col() g <- g +theme(axis.text.x = element_text(angle = 90)) #Rotate text on the X axis g <- g+ ggtitle("Total Murders") #Set up the title g #__________________________________________________________________________________________# # Step D: Explore Murders - scatter chart # #__________________________________________________________________________________________# #Generate a scatter plot - have population on the X axis, the percent over 18 on the y axis, and the size & color represent the murder rate scatterplot <- ggplot(mergeDF, aes(x=population, y= percentOver18)) +geom_point(aes(size=Murder, color=Murder)) + scale_color_gradient(low="white", high="red") scatterplot
library(glmnet) mydata = read.table("../../../../TrainingSet/FullSet/Lasso/pancreas.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.85,family="gaussian",standardize=FALSE) sink('./pancreas_088.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/Lasso/pancreas/pancreas_088.R
no_license
esbgkannan/QSMART
R
false
false
349
r
library(glmnet) mydata = read.table("../../../../TrainingSet/FullSet/Lasso/pancreas.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.85,family="gaussian",standardize=FALSE) sink('./pancreas_088.txt',append=TRUE) print(glm$glmnet.fit) sink()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/checkPredict.R \name{checkPredict} \alias{checkPredict} \title{Prevention of numerical instability for a new observation} \usage{ checkPredict(x, model, threshold = 1e-04, distance = "covdist", type = "UK") } \arguments{ \item{x}{a vector representing the input to check,} \item{model}{list of objects of class \code{\link[DiceKriging]{km}}, one for each objective functions,} \item{threshold}{optional value for the minimal distance to an existing observation, default to \code{1e-4},} \item{distance}{selection of the distance between new observations, between "\code{euclidean}", "\code{covdist}" (default) and "\code{covratio}", see details,} \item{type}{"\code{SK}" or "\code{UK}" (default), depending whether uncertainty related to trend estimation has to be taken into account.} } \value{ \code{TRUE} if the point should not be tested. } \description{ Check that the new point is not too close to already known observations to avoid numerical issues. Closeness can be estimated with several distances. } \details{ If the distance between \code{x} and the closest observations in \code{model} is below \code{threshold}, \code{x} should not be evaluated to avoid numerical instabilities. The distance can simply be the Euclidean distance or the canonical distance associated with the kriging covariance k: \deqn{d(x,y) = \sqrt{k(x,x) - 2k(x,y) + k(y,y)}.}{d(x,y) = \sqrt(k(x,x) - 2k(x,y) + k(y,y)).} The last solution is the ratio between the prediction variance at \code{x} and the variance of the process. } \author{ Mickael Binois }
/man/checkPredict.Rd
no_license
ProgramMonkey-soso/DiceOptim
R
false
true
1,667
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/checkPredict.R \name{checkPredict} \alias{checkPredict} \title{Prevention of numerical instability for a new observation} \usage{ checkPredict(x, model, threshold = 1e-04, distance = "covdist", type = "UK") } \arguments{ \item{x}{a vector representing the input to check,} \item{model}{list of objects of class \code{\link[DiceKriging]{km}}, one for each objective functions,} \item{threshold}{optional value for the minimal distance to an existing observation, default to \code{1e-4},} \item{distance}{selection of the distance between new observations, between "\code{euclidean}", "\code{covdist}" (default) and "\code{covratio}", see details,} \item{type}{"\code{SK}" or "\code{UK}" (default), depending whether uncertainty related to trend estimation has to be taken into account.} } \value{ \code{TRUE} if the point should not be tested. } \description{ Check that the new point is not too close to already known observations to avoid numerical issues. Closeness can be estimated with several distances. } \details{ If the distance between \code{x} and the closest observations in \code{model} is below \code{threshold}, \code{x} should not be evaluated to avoid numerical instabilities. The distance can simply be the Euclidean distance or the canonical distance associated with the kriging covariance k: \deqn{d(x,y) = \sqrt{k(x,x) - 2k(x,y) + k(y,y)}.}{d(x,y) = \sqrt(k(x,x) - 2k(x,y) + k(y,y)).} The last solution is the ratio between the prediction variance at \code{x} and the variance of the process. } \author{ Mickael Binois }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/wd.R \name{cd} \alias{cd} \title{Change (Working) Directory} \usage{ cd(loc) } \arguments{ \item{loc}{where you want to set the working directory} } \description{ This function allows setwd() and getwd() in one step. } \examples{ cd("~/Files") } \keyword{directory} \keyword{wd,} \keyword{working}
/man/cd.Rd
no_license
chrisvacc/R.Toolshed
R
false
true
376
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/wd.R \name{cd} \alias{cd} \title{Change (Working) Directory} \usage{ cd(loc) } \arguments{ \item{loc}{where you want to set the working directory} } \description{ This function allows setwd() and getwd() in one step. } \examples{ cd("~/Files") } \keyword{directory} \keyword{wd,} \keyword{working}
# Cargamos librería arules que contiene algoritmo apriori library("arules") # Establecemos directorio activo directorio <- dirname(parent.frame(2)$ofile) setwd(directorio) # Cargamos matriz matriz_datos<-Matrix(c(1,1,0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,1,0,1,1,0,0,0,0,0,0,1,0) ,6,5,byrow=T,dimnames=list(c("suceso1","suceso2","suceso3","suceso4","suceso5","suceso6") ,c("Pan","Agua","Cafe","Leche","Naranjas")),sparse=T) # Convertimos la matriz para poder aplicar apriori muestra<-as(matriz_datos,"nsparseMatrix") transpuestaMatriz<-t(muestra) transacciones<-as(transpuestaMatriz, "transactions") # Aplicamos el algoritmo apriori con s>=50% y c>=80% asociaciones<-apriori(transacciones, parameter=list(support=0.5,confidence=0.8)) # Lo sacamos por pantalla inspect(asociaciones)
/Laboratorio/PECL2 FCD/Apartado 1.r
no_license
Marcos-Barranquero/FCD-UAH
R
false
false
804
r
# Cargamos librería arules que contiene algoritmo apriori library("arules") # Establecemos directorio activo directorio <- dirname(parent.frame(2)$ofile) setwd(directorio) # Cargamos matriz matriz_datos<-Matrix(c(1,1,0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,1,0,1,1,0,0,0,0,0,0,1,0) ,6,5,byrow=T,dimnames=list(c("suceso1","suceso2","suceso3","suceso4","suceso5","suceso6") ,c("Pan","Agua","Cafe","Leche","Naranjas")),sparse=T) # Convertimos la matriz para poder aplicar apriori muestra<-as(matriz_datos,"nsparseMatrix") transpuestaMatriz<-t(muestra) transacciones<-as(transpuestaMatriz, "transactions") # Aplicamos el algoritmo apriori con s>=50% y c>=80% asociaciones<-apriori(transacciones, parameter=list(support=0.5,confidence=0.8)) # Lo sacamos por pantalla inspect(asociaciones)
jchem.binary.lf <- function(line) { molid <- strsplit(line, "\t")[[1]][1] bitpos <- .Call("parse_jchem_binary", as.character(line), as.integer(nchar(line)) ) if (is.null(bitpos)) return(NULL) list(molid, bitpos+1, list()) ## we add 1, since C does bit positions from 0 } fps.lf <- function(line) { toks <- strsplit(line, "\\s")[[1]]; bitpos <- .Call("parse_hex", as.character(toks[1]), as.integer(nchar(toks[1]))) if (is.null(bitpos)) return(NULL) if (length(toks) > 2) { misc <- list(toks[-c(1,2)]) } else { misc <- list() } list(toks[2], bitpos+1, misc) ## we add 1, since C does bit positions from 0 } cdk.lf <- function(line) { p <- regexpr("{([0-9,\\s]*)}",line,perl=T) s <- gsub(',','',substr(line, p+1, p+attr(p,"match.length")-2)) s <- lapply( strsplit(s,' '), as.numeric ) molid <- gsub("\\s+","", strsplit(line, "\\{")[[1]][1]) list(molid, s[[1]], list()) } moe.lf <- function(line) { p <- regexpr("\"([0-9\\s]*)\"",line, perl=T) s <- substr(line, p+1, p+attr(p,"match.length")-2) s <- lapply( strsplit(s,' '), as.numeric ) list(NA, s[[1]], list()) } bci.lf <- function(line) { tokens <- strsplit(line, '\\s')[[1]] name <- tokens[1] tokens <- tokens[-c(1, length(tokens), length(tokens)-1)] list(name, as.numeric(tokens), list()) } ecfp.lf <- function(line) { tokens <- strsplit(line, '\\s')[[1]] name <- tokens[1] tokens <- tokens[-1] list(name, tokens, list()) } ## TODO we should be iterating over lines and not reading ## them all in fp.read <- function(f='fingerprint.txt', size=1024, lf=cdk.lf, header=FALSE, binary=TRUE) { lf.name <- deparse(substitute(lf)) provider <- lf.name fplist <- list() fcon <- file(description=f,open='r') lines = readLines(fcon,n=-1) if (header && lf.name != 'fps.lf') lines = lines[-1] if (lf.name == 'fps.lf') { binary <- TRUE size <- NULL ## process the header block nheaderline = 0 for (line in lines) { if (substr(line,1,1) != '#') break nheaderline <- nheaderline + 1 if (nheaderline == 1 && length(grep("#FPS1", line)) != 1) stop("Invalid FPS format") if (length(grep("#num_bits", line)) == 1) size <- as.numeric(strsplit(line, '=')[[1]][2]) if (length(grep("#software", line)) == 1) provider <- as.character(strsplit(line, '=')[[1]][2]) } lines <- lines[ (nheaderline+1):length(lines) ] if (is.null(size)) { # num_bit size <- nchar(strsplit(line, '\\s')[[1]][1]) * 4 } } c = 1 for (line in lines) { dat <- lf(line) if (is.null(dat)) { warning(sprintf("Couldn't parse: %s", line)) next } if (is.na(dat[[1]])) name <- "" else name <- dat[[1]] misc <- dat[[3]] ## usually empty if (binary) { fplist[[c]] <- new("fingerprint", nbit=size, bits=as.numeric(dat[[2]]), folded=FALSE, provider=provider, name=name, misc=misc) } else { ## convert the features to 'feature' objects feats <- lapply(dat[[2]], function(x) new("feature", feature=x)) fplist[[c]] <- new("featvec", features=feats, provider=provider, name=name, misc=misc) } c <- c+1 } close(fcon) fplist } ## Need to supply the length of the bit string since fp.read does ## not provide that information fp.read.to.matrix <- function(f='fingerprint.txt', size=1024, lf=cdk.lf, header=FALSE) { fplist <- fp.read(f, size, lf, header) fpmat <- fp.to.matrix(fplist) fpmat }
/fingerprint/R/read.R
no_license
allaway/cdkr
R
false
false
3,686
r
jchem.binary.lf <- function(line) { molid <- strsplit(line, "\t")[[1]][1] bitpos <- .Call("parse_jchem_binary", as.character(line), as.integer(nchar(line)) ) if (is.null(bitpos)) return(NULL) list(molid, bitpos+1, list()) ## we add 1, since C does bit positions from 0 } fps.lf <- function(line) { toks <- strsplit(line, "\\s")[[1]]; bitpos <- .Call("parse_hex", as.character(toks[1]), as.integer(nchar(toks[1]))) if (is.null(bitpos)) return(NULL) if (length(toks) > 2) { misc <- list(toks[-c(1,2)]) } else { misc <- list() } list(toks[2], bitpos+1, misc) ## we add 1, since C does bit positions from 0 } cdk.lf <- function(line) { p <- regexpr("{([0-9,\\s]*)}",line,perl=T) s <- gsub(',','',substr(line, p+1, p+attr(p,"match.length")-2)) s <- lapply( strsplit(s,' '), as.numeric ) molid <- gsub("\\s+","", strsplit(line, "\\{")[[1]][1]) list(molid, s[[1]], list()) } moe.lf <- function(line) { p <- regexpr("\"([0-9\\s]*)\"",line, perl=T) s <- substr(line, p+1, p+attr(p,"match.length")-2) s <- lapply( strsplit(s,' '), as.numeric ) list(NA, s[[1]], list()) } bci.lf <- function(line) { tokens <- strsplit(line, '\\s')[[1]] name <- tokens[1] tokens <- tokens[-c(1, length(tokens), length(tokens)-1)] list(name, as.numeric(tokens), list()) } ecfp.lf <- function(line) { tokens <- strsplit(line, '\\s')[[1]] name <- tokens[1] tokens <- tokens[-1] list(name, tokens, list()) } ## TODO we should be iterating over lines and not reading ## them all in fp.read <- function(f='fingerprint.txt', size=1024, lf=cdk.lf, header=FALSE, binary=TRUE) { lf.name <- deparse(substitute(lf)) provider <- lf.name fplist <- list() fcon <- file(description=f,open='r') lines = readLines(fcon,n=-1) if (header && lf.name != 'fps.lf') lines = lines[-1] if (lf.name == 'fps.lf') { binary <- TRUE size <- NULL ## process the header block nheaderline = 0 for (line in lines) { if (substr(line,1,1) != '#') break nheaderline <- nheaderline + 1 if (nheaderline == 1 && length(grep("#FPS1", line)) != 1) stop("Invalid FPS format") if (length(grep("#num_bits", line)) == 1) size <- as.numeric(strsplit(line, '=')[[1]][2]) if (length(grep("#software", line)) == 1) provider <- as.character(strsplit(line, '=')[[1]][2]) } lines <- lines[ (nheaderline+1):length(lines) ] if (is.null(size)) { # num_bit size <- nchar(strsplit(line, '\\s')[[1]][1]) * 4 } } c = 1 for (line in lines) { dat <- lf(line) if (is.null(dat)) { warning(sprintf("Couldn't parse: %s", line)) next } if (is.na(dat[[1]])) name <- "" else name <- dat[[1]] misc <- dat[[3]] ## usually empty if (binary) { fplist[[c]] <- new("fingerprint", nbit=size, bits=as.numeric(dat[[2]]), folded=FALSE, provider=provider, name=name, misc=misc) } else { ## convert the features to 'feature' objects feats <- lapply(dat[[2]], function(x) new("feature", feature=x)) fplist[[c]] <- new("featvec", features=feats, provider=provider, name=name, misc=misc) } c <- c+1 } close(fcon) fplist } ## Need to supply the length of the bit string since fp.read does ## not provide that information fp.read.to.matrix <- function(f='fingerprint.txt', size=1024, lf=cdk.lf, header=FALSE) { fplist <- fp.read(f, size, lf, header) fpmat <- fp.to.matrix(fplist) fpmat }
# 1) Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? # Using the base plotting system, make a plot showing the total PM2.5 emission from all # sources for each of the years 1999, 2002, 2005, and 2008. # set working directory setwd('~/Dropbox/Personal/Coursera/e_exploratory_data/On_git/Project2') #unzip the file unzip("./data/exdata_data_NEI_data.zip", exdir="./data/") #list data files on data directory list.files("./data/") # load the data files NEI <- readRDS("./data/summarySCC_PM25.rds") # select the data data <- tapply(NEI$Emissions, NEI$year, sum) names(data)<-as.integer(names(data)) #create the file png(filename="Plot1.png", width=480, height=480, units ="px") #plot the data plot(names(data),data, xlab = "Year", ylab="PM2.5 Total Emissions", main="PM2.5 Emission in U.S. by Year") #close the file dev.off() # Conclusion # The PM2.5 total emission have decreased in the U.S. from 1999 to 2008
/Project2/Plot1.R
no_license
martingascon/Exploratory_Data_Analysis
R
false
false
953
r
# 1) Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? # Using the base plotting system, make a plot showing the total PM2.5 emission from all # sources for each of the years 1999, 2002, 2005, and 2008. # set working directory setwd('~/Dropbox/Personal/Coursera/e_exploratory_data/On_git/Project2') #unzip the file unzip("./data/exdata_data_NEI_data.zip", exdir="./data/") #list data files on data directory list.files("./data/") # load the data files NEI <- readRDS("./data/summarySCC_PM25.rds") # select the data data <- tapply(NEI$Emissions, NEI$year, sum) names(data)<-as.integer(names(data)) #create the file png(filename="Plot1.png", width=480, height=480, units ="px") #plot the data plot(names(data),data, xlab = "Year", ylab="PM2.5 Total Emissions", main="PM2.5 Emission in U.S. by Year") #close the file dev.off() # Conclusion # The PM2.5 total emission have decreased in the U.S. from 1999 to 2008
#data = read.csv("/juice/scr/mhahn/reinforce-logs-both-short/full-logs-tsv-perItem/collect12_NormJudg_Short_Cond_W_GPT2_ByTrial.py.tsv", sep="\t") data = read.csv("/juice/scr/mhahn/reinforce-logs-both-short/full-logs-tsv-perItem/char-lm-ud-stationary_12_SuperLong_WithAutoencoder_WithEx_Samples_Short_Combination_Subseq_VeryLong_WithSurp12_NormJudg_Short_Cond_Shift_NoComma_Bugfix_Q_3_W_GPT2M.py_620912032_Model", sep="\t") library(tidyr) library(dplyr) library(lme4) nounFreqs = read.csv("../../../../../forgetting/corpus_counts/wikipedia/results/results_counts4.py.tsv", sep="\t") nounFreqs$LCount = log(1+nounFreqs$Count) nounFreqs$Condition = paste(nounFreqs$HasThat, nounFreqs$Capital, "False", sep="_") nounFreqs = as.data.frame(unique(nounFreqs) %>% select(Noun, Condition, LCount) %>% group_by(Noun) %>% spread(Condition, LCount)) %>% rename(noun = Noun) nounFreqs2 = read.csv("../../../../../forgetting/corpus_counts/wikipedia/results/archive/perNounCounts.csv") %>% mutate(X=NULL, ForgettingVerbLogOdds=NULL, ForgettingMiddleVerbLogOdds=NULL) %>% rename(noun = Noun) %>% mutate(True_True_True=NULL,True_False_True=NULL) nounFreqs = unique(rbind(nounFreqs, nounFreqs2)) nounFreqs = nounFreqs[!duplicated(nounFreqs$noun),] data = merge(data, nounFreqs %>% rename(Noun = noun), by=c("Noun"), all.x=TRUE) data = data %>% mutate(True_Minus_False.C = True_False_False-False_False_False-mean(True_False_False-False_False_False, na.rm=TRUE)) unique((data %>% filter(is.na(True_Minus_False.C)))$Noun) # [1] conjecture guess insinuation intuition observation data$compatible.C = (grepl("_co", data$Condition)-0.5) data$HasRC.C = (grepl("SCRC", data$Condition)-0.5) data$HasSC.C = (0.5-grepl("NoSC", data$Condition)) crash() summary(lmer(SurprisalReweighted ~ compatible.C + True_Minus_False.C + (1|ID) + (1+compatible.C|Item) + (1|Noun), data=data %>% filter(Region == "V1_0", deletion_rate==0.55))) model = (lmer(SurprisalReweighted ~ HasRC.C + compatible.C + True_Minus_False.C + (1+compatible.C|Item) + (1|Noun), data=data %>% filter(Region == "V1_0", deletion_rate==0.35, HasSC.C>0, !grepl("GPT2M", Script)))) # The ordering of slopes closely replicates -- so it is a property of GPT2 and stable across multiple runs of the memory model # (Intercept) HasRC.C compatible.C True_Minus_False.C #v_guest_thug 12.205749 -0.02194668 -2.545766952 -0.2042718 #v_psychiatrist_nurse 12.665236 -0.02194668 -2.208907242 -0.2042718 #o_lifeguard_swimmer 12.590915 -0.02194668 -1.732583283 -0.2042718 #o_bureaucrat_guard 13.837014 -0.02194668 -1.475658775 -0.2042718 #o_student_bully 15.162266 -0.02194668 -1.080967839 -0.2042718 #o_senator_diplomat 8.612687 -0.02194668 -1.002161147 -0.2042718 #v_victim_swimmer 12.308828 -0.02194668 -0.981938076 -0.2042718 #v_thief_detective 10.328938 -0.02194668 -0.805953009 -0.2042718 #o_child_medic 13.670466 -0.02194668 -0.723813968 -0.2042718 #o_bookseller_thief 11.756582 -0.02194668 -0.650526839 -0.2042718 #o_ceo_employee 4.019287 -0.02194668 -0.546799699 -0.2042718 #o_commander_president 14.216134 -0.02194668 -0.507111841 -0.2042718 #v_doctor_colleague 8.566765 -0.02194668 -0.448448220 -0.2042718 #o_trickster_woman 15.516596 -0.02194668 -0.266296919 -0.2042718 # Interesting difference between the VN and the other items (seems to replicate across configurations) # u = coef(model)$Item #> u$v = grepl("v_", u$item) #> u1 = u[u$v,] #> u2 = u[!u$v,] #> t.test(u1$compatible.C) # # One Sample t-test # #data: u1$compatible.C #t = 3.1617, df = 31, p-value = 0.003495 #alternative hypothesis: true mean is not equal to 0 #95 percent confidence interval: # 0.2577982 1.1948335 #sample estimates: #mean of x #0.7263159 # #> t.test(u2$compatible.C) # # One Sample t-test # #data: u2$compatible.C #t = 1.2206, df = 34, p-value = 0.2306 #alternative hypothesis: true mean is not equal to 0 #95 percent confidence interval: # -0.1278902 0.5125606 #sample estimates: #mean of x #0.1923352 model = (lmer(SurprisalReweighted ~ HasRC.C + compatible.C + True_Minus_False.C + (1+compatible.C|Item) + (1|Noun), data=data %>% filter(Region == "V1_0", deletion_rate==0.55, HasSC.C>0))) # (Intercept) HasRC.C compatible.C True_Minus_False.C #o_lifeguard_swimmer 12.502503 0.1911667 -2.79518307 -0.2625345 #v_psychiatrist_nurse 12.555227 0.1911667 -2.45026724 -0.2625345 #v_guest_thug 13.251113 0.1911667 -2.36664022 -0.2625345 #o_daughter_sister 9.373278 0.1911667 -1.56298862 -0.2625345 #o_bureaucrat_guard 13.962854 0.1911667 -1.17707479 -0.2625345 #o_student_bully 15.263687 0.1911667 -1.14062156 -0.2625345 #v_victim_swimmer 12.425828 0.1911667 -1.05488009 -0.2625345 #o_child_medic 13.984062 0.1911667 -0.96919797 -0.2625345 #v_thief_detective 10.376001 0.1911667 -0.96217550 -0.2625345 #v_guest_cousin 13.405787 0.1911667 -0.91731791 -0.2625345 summary(lmer(SurprisalReweighted ~ compatible.C + True_Minus_False.C + (1|ID) + (1+compatible.C|Item) + (1|Noun), data=data %>% filter(Region == "V1_0", deletion_rate==0.3, predictability_weight==0))) model = (lmer(SurprisalReweighted ~ deletion_rate + compatible.C + True_Minus_False.C + (1+compatible.C|ID) + (1+compatible.C|Item) + (1|Noun), data=data %>% filter(Region == "V1_0", predictability_weight==1))) model = (lmer(SurprisalReweighted ~ predictability_weight + deletion_rate + compatible.C + True_Minus_False.C + (1+compatible.C|ID) + (1+compatible.C|Item) + (1|Noun), data=data %>% filter(Region == "V1_0"))) summary(lmer(SurprisalReweighted ~ compatible.C + True_Minus_False.C + (1+compatible.C|Item) + (1|Noun), data=data %>% filter(Region == "V1_0", predictability_weight==1) %>% group_by(Noun, compatible.C, True_Minus_False.C, Item) %>% summarise(SurprisalReweighted=mean(SurprisalReweighted)))) model = (lmer(SurprisalReweighted ~ predictability_weight + deletion_rate + compatible.C + True_Minus_False.C + (1+compatible.C|ID) + (1+compatible.C|Item), data=data %>% filter(Region == "V1_0"))) byItemSlopes = coef(model)$Item byItemSlopes$Item = rownames(byItemSlopes) byItemSlopes[order(byItemSlopes$compatible.C),] #> byItemSlopes[order(byItemSlopes$compatible.C),] # (Intercept) predictability_weight deletion_rate compatible.C True_Minus_False.C Item #o_child_medic 7.4318434 -1.069783 8.131016 -3.20739170 -0.1758554 o_child_medic #o_senator_diplomat 4.8994153 -1.069783 8.131016 -3.18698116 -0.1758554 o_senator_diplomat #o_mobster_media 4.1932516 -1.069783 8.131016 -2.42209852 -0.1758554 o_mobster_media #o_victim_criminal 8.6898748 -1.069783 8.131016 -2.17947177 -0.1758554 o_victim_criminal #o_student_bully 7.9275158 -1.069783 8.131016 -1.89616001 -0.1758554 o_student_bully #v_psychiatrist_nurse 5.6874614 -1.069783 8.131016 -1.56520274 -0.1758554 v_psychiatrist_nurse #o_lifesaver_swimmer 7.9101302 -1.069783 8.131016 -1.49021589 -0.1758554 o_lifesaver_swimmer #v_guest_thug 8.2118196 -1.069783 8.131016 -1.48537021 -0.1758554 v_guest_thug #o_CEO_employee 1.9447643 -1.069783 8.131016 -1.30279823 -0.1758554 o_CEO_employee #v_victim_swimmer 6.7159427 -1.069783 8.131016 -1.26434984 -0.1758554 v_victim_swimmer #v_teacher_principal 0.7832282 -1.069783 8.131016 -0.96076600 -0.1758554 v_teacher_principal #v_sponsor_musician 7.9813119 -1.069783 8.131016 -0.95908404 -0.1758554 v_sponsor_musician #o_bureaucrat_guard 10.1710971 -1.069783 8.131016 -0.85949145 -0.1758554 o_bureaucrat_guard #v_medic_survivor 4.9993047 -1.069783 8.131016 -0.85518928 -0.1758554 v_medic_survivor #o_surgeon_patient 2.4059452 -1.069783 8.131016 -0.78521757 -0.1758554 o_surgeon_patient #v_driver_guide 3.4735911 -1.069783 8.131016 -0.68676289 -0.1758554 v_driver_guide #v_janitor_organizer 2.3548375 -1.069783 8.131016 -0.63490585 -0.1758554 v_janitor_organizer #o_sculptor_painter 3.2599189 -1.069783 8.131016 -0.62205515 -0.1758554 o_sculptor_painter #v_investor_scientist 7.4722328 -1.069783 8.131016 -0.60643089 -0.1758554 v_investor_scientist #v_doctor_colleague 4.4566625 -1.069783 8.131016 -0.54478152 -0.1758554 v_doctor_colleague #v_thief_detective 4.9405932 -1.069783 8.131016 -0.48976081 -0.1758554 v_thief_detective #o_pharmacist_stranger 6.6452869 -1.069783 8.131016 -0.46649846 -0.1758554 o_pharmacist_stranger #o_cousin_bror 7.5825574 -1.069783 8.131016 -0.43043542 -0.1758554 o_cousin_bror #o_daughter_sister 5.6936303 -1.069783 8.131016 -0.30288336 -0.1758554 o_daughter_sister #o_commander_president 8.3226388 -1.069783 8.131016 -0.29558488 -0.1758554 o_commander_president #v_pediatrician_receptionist 6.8678478 -1.069783 8.131016 -0.27311863 -0.1758554 v_pediatrician_receptionist #o_musician_far 6.2599674 -1.069783 8.131016 -0.21466778 -0.1758554 o_musician_far #o_actor_starlet 4.4613360 -1.069783 8.131016 -0.20599745 -0.1758554 o_actor_starlet #v_actor_fans 5.7156175 -1.069783 8.131016 -0.11907038 -0.1758554 v_actor_fans #v_president_farmer 4.3350909 -1.069783 8.131016 -0.11284931 -0.1758554 v_president_farmer #v_customer_vendor 2.0140520 -1.069783 8.131016 -0.08439834 -0.1758554 v_customer_vendor #o_tenant_foreman 7.6368918 -1.069783 8.131016 0.07788745 -0.1758554 o_tenant_foreman #v_plaintiff_jury 4.1507159 -1.069783 8.131016 0.17515407 -0.1758554 v_plaintiff_jury #v_criminal_stranger 4.6489168 -1.069783 8.131016 0.25419893 -0.1758554 v_criminal_stranger #v_firefighter_neighbor 6.0258317 -1.069783 8.131016 0.30957481 -0.1758554 v_firefighter_neighbor #o_extremist_agent 2.1334148 -1.069783 8.131016 0.34295117 -0.1758554 o_extremist_agent #v_fisherman_gardener 2.4621326 -1.069783 8.131016 0.37617751 -0.1758554 v_fisherman_gardener #v_plumber_apprentice 1.6586674 -1.069783 8.131016 0.49112117 -0.1758554 v_plumber_apprentice #v_bully_children 1.1935835 -1.069783 8.131016 0.60191783 -0.1758554 v_bully_children #o_student_professor 3.4573072 -1.069783 8.131016 0.60531945 -0.1758554 o_student_professor #v_guest_cousin 3.6737238 -1.069783 8.131016 0.63851850 -0.1758554 v_guest_cousin #v_judge_attorney 0.5360451 -1.069783 8.131016 0.70388971 -0.1758554 v_judge_attorney #v_vendor_salesman 6.5744689 -1.069783 8.131016 0.72010601 -0.1758554 v_vendor_salesman #o_driver_tourist 6.1229162 -1.069783 8.131016 0.76131710 -0.1758554 o_driver_tourist #v_manager_boss 6.7336547 -1.069783 8.131016 0.76873938 -0.1758554 v_manager_boss #v_lifeguard_soldier 9.3321709 -1.069783 8.131016 0.78086825 -0.1758554 v_lifeguard_soldier #o_consultant_artist 7.3432777 -1.069783 8.131016 0.79827348 -0.1758554 o_consultant_artist #v_captain_crew 3.1820146 -1.069783 8.131016 0.84955351 -0.1758554 v_captain_crew #v_fiancé_author 3.5447566 -1.069783 8.131016 0.87871249 -0.1758554 v_fiancé_author #v_banker_analyst 5.8870418 -1.069783 8.131016 0.92507940 -0.1758554 v_banker_analyst #o_principal_teacher 2.5421321 -1.069783 8.131016 1.00835969 -0.1758554 o_principal_teacher #o_scientist_mayor 1.9104177 -1.069783 8.131016 1.08710326 -0.1758554 o_scientist_mayor #o_neighbor_woman 0.7105074 -1.069783 8.131016 1.10618938 -0.1758554 o_neighbor_woman #o_bookseller_thief 6.1366239 -1.069783 8.131016 1.22605821 -0.1758554 o_bookseller_thief #o_entrepreneur_philanthropist 4.3852640 -1.069783 8.131016 1.23242511 -0.1758554 o_entrepreneur_philanthropist #o_runner_psychiatrist 3.2139144 -1.069783 8.131016 1.55997332 -0.1758554 o_runner_psychiatrist #o_trickster_woman 9.1696962 -1.069783 8.131016 1.72648661 -0.1758554 o_trickster_woman #o_clerk_customer 8.0349529 -1.069783 8.131016 1.74485865 -0.1758554 o_clerk_customer #v_businessman_sponsor 4.1200810 -1.069783 8.131016 1.76230711 -0.1758554 v_businessman_sponsor #o_carpenter_craftsman 2.3730616 -1.069783 8.131016 1.76627631 -0.1758554 o_carpenter_craftsman #v_senator_diplomat 6.8134852 -1.069783 8.131016 1.79939330 -0.1758554 v_senator_diplomat #o_violinist_sponsors 2.3170906 -1.069783 8.131016 1.84205586 -0.1758554 o_violinist_sponsors #o_politician_banker 5.1382865 -1.069783 8.131016 1.85389681 -0.1758554 o_politician_banker #o_preacher_parishioners 6.4162186 -1.069783 8.131016 2.25625818 -0.1758554 o_preacher_parishioners #v_agent_fbi 2.8422874 -1.069783 8.131016 2.52469306 -0.1758554 v_agent_fbi #o_trader_businessman 1.6695241 -1.069783 8.131016 3.05425309 -0.1758554 o_trader_businessman #o_criminal_officer 3.4731695 -1.069783 8.131016 3.26119981 -0.1758554 o_criminal_officer byIDSlopes = coef(model)$ID byIDSlopes$ID = rownames(byIDSlopes) byIDSlopes[order(byIDSlopes$compatible.C),] #> byIDSlopes[order(byIDSlopes$compatible.C),] # (Intercept) predictability_weight deletion_rate compatible.C True_Minus_False.C ID #493283383 3.422375 -1.069783 8.131016 -0.038928946 -0.1758554 493283383 #584015835 4.182614 -1.069783 8.131016 -0.037595456 -0.1758554 584015835 #922774826 5.041638 -1.069783 8.131016 -0.012694818 -0.1758554 922774826 #99767452 4.895182 -1.069783 8.131016 -0.005788567 -0.1758554 99767452 #961536309 5.059172 -1.069783 8.131016 0.008153394 -0.1758554 961536309 #553302187 4.616161 -1.069783 8.131016 0.012184127 -0.1758554 553302187 #193988359 4.630897 -1.069783 8.131016 0.059016278 -0.1758554 193988359 #345336356 4.707851 -1.069783 8.131016 0.069778601 -0.1758554 345336356 #191511088 4.877481 -1.069783 8.131016 0.114371368 -0.1758554 191511088 #992213137 4.823875 -1.069783 8.131016 0.123880359 -0.1758554 992213137 #675784233 4.414690 -1.069783 8.131016 0.124437922 -0.1758554 675784233 #464657019 4.991215 -1.069783 8.131016 0.133107835 -0.1758554 464657019 #94907627 4.970169 -1.069783 8.131016 0.145892245 -0.1758554 94907627 #444273729 4.083726 -1.069783 8.131016 0.149230489 -0.1758554 444273729 #79010925 5.793023 -1.069783 8.131016 0.156401155 -0.1758554 79010925 #767406753 4.745672 -1.069783 8.131016 0.172478313 -0.1758554 767406753 #278167740 5.511846 -1.069783 8.131016 0.182801120 -0.1758554 278167740 #116146778 4.570194 -1.069783 8.131016 0.195945208 -0.1758554 116146778 #282352930 4.850553 -1.069783 8.131016 0.205365500 -0.1758554 282352930 #637269688 4.741264 -1.069783 8.131016 0.210219717 -0.1758554 637269688 #591357781 4.594979 -1.069783 8.131016 0.210298996 -0.1758554 591357781 #954662806 5.228781 -1.069783 8.131016 0.211052906 -0.1758554 954662806 #708115795 5.516360 -1.069783 8.131016 0.217600266 -0.1758554 708115795 #936548541 4.858380 -1.069783 8.131016 0.224033626 -0.1758554 936548541 #465577363 5.359163 -1.069783 8.131016 0.229594459 -0.1758554 465577363 #95795388 5.790406 -1.069783 8.131016 0.231922422 -0.1758554 95795388 #179088476 5.236838 -1.069783 8.131016 0.235244000 -0.1758554 179088476 #681474707 5.969039 -1.069783 8.131016 0.249414018 -0.1758554 681474707 #908049000 5.110056 -1.069783 8.131016 0.265561308 -0.1758554 908049000 #498236788 4.879595 -1.069783 8.131016 0.266751106 -0.1758554 498236788 #991579562 5.325402 -1.069783 8.131016 0.275085402 -0.1758554 991579562 #250967824 5.098741 -1.069783 8.131016 0.298764890 -0.1758554 250967824 #73230605 5.517735 -1.069783 8.131016 0.309486367 -0.1758554 73230605 #788091576 5.486403 -1.069783 8.131016 0.313975632 -0.1758554 788091576 #174187200 4.203831 -1.069783 8.131016 0.427892275 -0.1758554 174187200 crash() summary(lmer(SurprisalReweighted ~ compatible.C + True_Minus_False.C + (1|ID) + (1|Item) + (1|Noun), data=data %>% filter(Region == "V1_0", deletion_rate==0.3, predictability_weight==0))) model = (lmer(SurprisalReweighted ~ compatible.C + True_Minus_False.C +(1+compatible.C|ID) + (1+compatible.C|Item), data=data %>% filter(Region == "V1_0", deletion_rate==0.4, predictability_weight==0))) byItemSlopes = coef(model)$Item byItemSlopes$Item = rownames(byItemSlopes) # Interesting (but the evidence isn't strong, might be accidental): cor.test(byItemSlopes[["(Intercept)"]], byItemSlopes$compatible.C) # These slopes are very similar with predictability_weight==0.5 # (Intercept) compatible.C True_Minus_False.C Item #o_child_medic 10.768493 -3.89992285 -0.1648946 o_child_medic was unharmed #o_senator_diplomat 8.241476 -3.16607292 -0.1648946 o_senator_diplomat was winning #o_mobster_media 7.138935 -2.93748887 -0.1648946 o_mobster_media had disappeared #o_victim_criminal 11.847292 -2.25583172 -0.1648946 o_victim_criminal were surviving #o_student_bully 10.997521 -1.87912327 -0.1648946 o_student_bully plagiarized his homework/drove everyone crazy #v_teacher_principal 4.395694 -1.84736038 -0.1648946 v_teacher_principal failed the student/annoyed the student #v_guest_thug 11.881792 -1.71798860 -0.1648946 v_guest_thug tricked the bartender/stunned the bartender #o_lifesaver_swimmer 11.766760 -1.66632545 -0.1648946 o_lifesaver_swimmer saved the children/pleased the children #v_victim_swimmer 10.106983 -1.55286668 -0.1648946 v_victim_swimmer #v_psychiatrist_nurse 9.310187 -1.40517317 -0.1648946 v_psychiatrist_nurse #o_surgeon_patient 5.661679 -1.32479741 -0.1648946 o_surgeon_patient #v_driver_guide 6.715940 -1.20077072 -0.1648946 v_driver_guide #v_janitor_organizer 5.808140 -0.91743157 -0.1648946 v_janitor_organizer #o_cousin_bror 11.838276 -0.86757017 -0.1648946 o_cousin_bror #o_pharmacist_stranger 9.513158 -0.84421798 -0.1648946 o_pharmacist_stranger #v_sponsor_musician 11.517677 -0.79750164 -0.1648946 v_sponsor_musician #o_sculptor_painter 6.743033 -0.79153043 -0.1648946 o_sculptor_painter #v_medic_survivor 8.429735 -0.72173719 -0.1648946 v_medic_survivor #o_CEO_employee 4.625234 -0.66604694 -0.1648946 o_CEO_employee #o_bureaucrat_guard 13.422069 -0.62727965 -0.1648946 o_bureaucrat_guard #v_investor_scientist 10.957524 -0.52012056 -0.1648946 v_investor_scientist #v_thief_detective 7.967355 -0.30616220 -0.1648946 v_thief_detective #v_president_farmer 7.736207 -0.28583124 -0.1648946 v_president_farmer #v_actor_fans 9.068854 -0.25547948 -0.1648946 v_actor_fans #v_pediatrician_receptionist 10.050521 -0.21028115 -0.1648946 v_pediatrician_receptionist #v_criminal_stranger 8.134394 -0.17040682 -0.1648946 v_criminal_stranger #o_musician_far 9.926035 -0.16965755 -0.1648946 o_musician_far #o_actor_starlet 7.833042 -0.13629062 -0.1648946 o_actor_starlet #o_commander_president 11.969829 -0.13440355 -0.1648946 o_commander_president #o_daughter_sister 9.036913 -0.08991114 -0.1648946 o_daughter_sister #v_doctor_colleague 8.161714 -0.04984468 -0.1648946 v_doctor_colleague #v_customer_vendor 5.281899 0.09477463 -0.1648946 v_customer_vendor #o_tenant_foreman 10.788711 0.14762019 -0.1648946 o_tenant_foreman #o_consultant_artist 11.425500 0.20614368 -0.1648946 o_consultant_artist #o_extremist_agent 5.144713 0.31047188 -0.1648946 o_extremist_agent #v_plaintiff_jury 7.536574 0.35119581 -0.1648946 v_plaintiff_jury #v_fisherman_gardener 5.598490 0.36320928 -0.1648946 v_fisherman_gardener #v_plumber_apprentice 4.920909 0.44212153 -0.1648946 v_plumber_apprentice #v_judge_attorney 3.931734 0.50723899 -0.1648946 v_judge_attorney #v_firefighter_neighbor 9.436054 0.52512666 -0.1648946 v_firefighter_neighbor #v_bully_children 4.456294 0.53172195 -0.1648946 v_bully_children #v_guest_cousin 6.623165 0.60596852 -0.1648946 v_guest_cousin #v_fiancé_author 6.795485 0.63975496 -0.1648946 v_fiancé_author #v_captain_crew 6.619463 0.65467070 -0.1648946 v_captain_crew #o_student_professor 6.678425 0.77189760 -0.1648946 o_student_professor #o_driver_tourist 9.482586 0.78555334 -0.1648946 o_driver_tourist #o_neighbor_woman 3.982770 0.81545676 -0.1648946 o_neighbor_woman #v_manager_boss 10.154127 0.83507588 -0.1648946 v_manager_boss #o_clerk_customer 10.963351 0.88229165 -0.1648946 o_clerk_customer #v_banker_analyst 9.185154 0.91583154 -0.1648946 v_banker_analyst #o_entrepreneur_philanthropist 7.932043 0.97989890 -0.1648946 o_entrepreneur_philanthropist #o_principal_teacher 5.808110 1.27180075 -0.1648946 o_principal_teacher #v_lifeguard_soldier 12.752973 1.35196404 -0.1648946 v_lifeguard_soldier #o_scientist_mayor 5.418019 1.43988180 -0.1648946 o_scientist_mayor #v_vendor_salesman 10.181121 1.45451247 -0.1648946 v_vendor_salesman #v_senator_diplomat 10.054649 1.54162695 -0.1648946 v_senator_diplomat #o_violinist_sponsors 5.692785 1.64969114 -0.1648946 o_violinist_sponsors #v_businessman_sponsor 7.489190 1.70923178 -0.1648946 v_businessman_sponsor #o_criminal_officer 6.020972 1.84912295 -0.1648946 o_criminal_officer #o_politician_banker 8.400021 2.08268570 -0.1648946 o_politician_banker #o_trickster_woman 12.605169 2.16073843 -0.1648946 o_trickster_woman #o_runner_psychiatrist 5.962136 2.21337998 -0.1648946 o_runner_psychiatrist #o_carpenter_craftsman 5.512902 2.26077326 -0.1648946 o_carpenter_craftsman #o_preacher_parishioners 9.665387 2.35810326 -0.1648946 o_preacher_parishioners #v_agent_fbi 6.293955 2.43050818 -0.1648946 v_agent_fbi arrested the criminal/confused the criminal #o_bookseller_thief 9.516952 2.86883164 -0.1648946 o_bookseller_thief got a heart attack #o_trader_businessman 4.887210 3.18120764 -0.1648946 o_trader_businessman had insider information # model2 = (lmer(ThatFractionReweighted ~ compatible.C + True_Minus_False.C +(1+compatible.C|ID) + (1+compatible.C|Item), data=data %>% filter(Region == "V1_0", deletion_rate==0.4, predictability_weight==0))) byItemSlopes = coef(model2)$Item byItemSlopes$Item = rownames(byItemSlopes) #o_scientist_mayor 87.46241 -13.11925475 2.807508 o_scientist_mayor had faked data/couldn't be trusted #o_entrepreneur_philanthropist 83.79822 -10.55660954 2.807508 o_entrepreneur_philanthropist wasted the money/exasperated the nurse #o_criminal_officer 80.39496 -10.29351953 2.807508 o_criminal_officer was guilty/was refuted #o_surgeon_patient 85.04842 -10.15585118 2.807508 o_surgeon_patient had no degree/was widely known #o_preacher_parishioners 86.71091 -9.47752685 2.807508 o_preacher_parishioners #o_trader_businessman 88.52969 -9.37560438 2.807508 o_trader_businessman #o_extremist_agent 83.56492 -8.08450427 2.807508 o_extremist_agent #v_plumber_apprentice 84.26964 -7.92866378 2.807508 v_plumber_apprentice #o_violinist_sponsors 85.94343 -7.12730111 2.807508 o_violinist_sponsors #v_banker_analyst 85.28712 -7.05238195 2.807508 v_banker_analyst #o_trickster_woman 83.10796 -7.03971028 2.807508 o_trickster_woman #v_thief_detective 85.41960 -6.97343971 2.807508 v_thief_detective #v_driver_guide 84.91185 -6.97265389 2.807508 v_driver_guide #o_sculptor_painter 83.74767 -6.87932938 2.807508 o_sculptor_painter #o_child_medic 84.87304 -6.81020907 2.807508 o_child_medic #o_cousin_bror 82.94171 -6.55995474 2.807508 o_cousin_bror #v_fisherman_gardener 82.68731 -6.47733735 2.807508 v_fisherman_gardener #v_senator_diplomat 84.04256 -6.43737907 2.807508 v_senator_diplomat #v_firefighter_neighbor 82.77441 -6.39140299 2.807508 v_firefighter_neighbor #v_guest_cousin 80.44554 -6.36035586 2.807508 v_guest_cousin #o_principal_teacher 85.09546 -6.14038844 2.807508 o_principal_teacher #o_consultant_artist 79.50840 -6.08765829 2.807508 o_consultant_artist #o_student_professor 84.82091 -5.74017191 2.807508 o_student_professor #o_neighbor_woman 87.04194 -5.44558344 2.807508 o_neighbor_woman #o_bureaucrat_guard 82.47143 -5.08322115 2.807508 o_bureaucrat_guard #v_janitor_organizer 84.11530 -5.04157705 2.807508 v_janitor_organizer #o_bookseller_thief 82.66239 -4.93231882 2.807508 o_bookseller_thief #o_lifesaver_swimmer 83.42137 -4.75432836 2.807508 o_lifesaver_swimmer #v_customer_vendor 81.99634 -4.51363232 2.807508 v_customer_vendor #o_mobster_media 85.31739 -4.44291102 2.807508 o_mobster_media #v_investor_scientist 82.73042 -4.22345472 2.807508 v_investor_scientist #v_manager_boss 83.33789 -4.22082904 2.807508 v_manager_boss #v_guest_thug 83.86669 -4.20451750 2.807508 v_guest_thug #v_bully_children 84.65522 -4.15938218 2.807508 v_bully_children #v_psychiatrist_nurse 86.60480 -3.95205088 2.807508 v_psychiatrist_nurse #o_driver_tourist 87.95997 -3.90034624 2.807508 o_driver_tourist #v_pediatrician_receptionist 84.99795 -3.87422731 2.807508 v_pediatrician_receptionist #o_commander_president 83.88214 -3.78209634 2.807508 o_commander_president #o_runner_psychiatrist 83.35741 -3.48727869 2.807508 o_runner_psychiatrist #o_actor_starlet 84.07877 -3.33478506 2.807508 o_actor_starlet #v_fiancé_author 84.16542 -3.17837258 2.807508 v_fiancé_author #v_actor_fans 82.81152 -2.95411249 2.807508 v_actor_fans #o_senator_diplomat 82.65859 -2.90629655 2.807508 o_senator_diplomat #v_plaintiff_jury 86.25058 -2.90391998 2.807508 v_plaintiff_jury #v_businessman_sponsor 89.41016 -2.62955707 2.807508 v_businessman_sponsor #v_vendor_salesman 87.88541 -2.39227253 2.807508 v_vendor_salesman #v_teacher_principal 85.68748 -2.27930926 2.807508 v_teacher_principal #v_criminal_stranger 82.34367 -2.09696178 2.807508 v_criminal_stranger #o_clerk_customer 81.56801 -2.08883340 2.807508 o_clerk_customer #v_victim_swimmer 84.62578 -1.90370924 2.807508 v_victim_swimmer #v_judge_attorney 86.81673 -1.86969388 2.807508 v_judge_attorney #o_carpenter_craftsman 84.02451 -1.69215045 2.807508 o_carpenter_craftsman #o_pharmacist_stranger 87.53160 -1.40874657 2.807508 o_pharmacist_stranger #o_daughter_sister 82.05274 -1.02103151 2.807508 o_daughter_sister #v_medic_survivor 81.97385 -0.97210659 2.807508 v_medic_survivor #v_lifeguard_soldier 83.42388 -0.86695656 2.807508 v_lifeguard_soldier #v_doctor_colleague 85.69208 -0.86127271 2.807508 v_doctor_colleague #v_sponsor_musician 79.49338 -0.62902515 2.807508 v_sponsor_musician #v_captain_crew 83.89051 -0.10333422 2.807508 v_captain_crew #o_student_bully 78.81972 0.03648398 2.807508 o_student_bully #o_victim_criminal 83.39799 0.11728546 2.807508 o_victim_criminal #o_tenant_foreman 81.54701 0.56521371 2.807508 o_tenant_foreman #o_CEO_employee 81.45328 0.74142469 2.807508 o_CEO_employee #o_musician_far 85.02372 1.34850899 2.807508 o_musician_far #v_president_farmer 83.00137 1.89168617 2.807508 v_president_farmer #v_agent_fbi 84.32057 2.37295329 2.807508 v_agent_fbi arrested the criminal/confused the criminal #o_politician_banker 78.68942 3.42211939 2.807508 o_politician_banker laundered money/was popular # # deletion_rate predictability_weight #1 0.30 0.00 #7 0.50 0.00 #8 0.55 0.00 #9 0.50 0.25 #18 0.40 0.00 #34 0.45 0.00 model = (lmer(SurprisalReweighted ~ compatible.C + True_Minus_False.C +(1+compatible.C|ID) + (1+compatible.C|Item), data=data %>% filter(Region == "V1_0", deletion_rate==0.3, predictability_weight==0))) # (Intercept) compatible.C True_Minus_False.C Item [43/1866] #o_mobster_media 4.496106 -6.40501484 -0.05044264 o_mobster_media #o_senator_diplomat 6.803331 -4.60917349 -0.05044264 o_senator_diplomat #o_surgeon_patient 5.170188 -4.55348269 -0.05044264 o_surgeon_patient #o_child_medic 8.255213 -3.90376150 -0.05044264 o_child_medic #o_victim_criminal 8.915540 -3.39266871 -0.05044264 o_victim_criminal #v_sponsor_musician 11.001467 -3.08297030 -0.05044264 v_sponsor_musician #v_janitor_organizer 6.945259 -2.86266370 -0.05044264 v_janitor_organizer #v_teacher_principal 3.672387 -2.46164142 -0.05044264 v_teacher_principal #o_student_bully 7.059583 -2.28519458 -0.05044264 o_student_bully #v_driver_guide 8.513113 -2.05352268 -0.05044264 v_driver_guide #o_actor_starlet 7.089057 -1.47102317 -0.05044264 o_actor_starlet #o_CEO_employee 5.201617 -1.36781963 -0.05044264 o_CEO_employee #o_pharmacist_stranger 5.445108 -1.21810250 -0.05044264 o_pharmacist_stranger #v_doctor_colleague 6.008020 -1.10516716 -0.05044264 v_doctor_colleague #v_investor_scientist 8.868556 -0.99664887 -0.05044264 v_investor_scientist #v_captain_crew 7.767709 -0.95443300 -0.05044264 v_captain_crew #o_lifesaver_swimmer 9.295139 -0.87443643 -0.05044264 o_lifesaver_swimmer #o_cousin_bror 8.042698 -0.85053028 -0.05044264 o_cousin_bror #v_president_farmer 6.877058 -0.85024550 -0.05044264 v_president_farmer #o_musician_far 6.591580 -0.84729952 -0.05044264 o_musician_far #v_medic_survivor 7.011600 -0.78314343 -0.05044264 v_medic_survivor #v_psychiatrist_nurse 4.338750 -0.77113555 -0.05044264 v_psychiatrist_nurse #v_plaintiff_jury 6.952710 -0.74104999 -0.05044264 v_plaintiff_jury #o_politician_banker 8.648524 -0.54187818 -0.05044264 o_politician_banker #v_fiancé_author 4.557252 -0.50849743 -0.05044264 v_fiancé_author #o_bureaucrat_guard 14.123809 -0.49331308 -0.05044264 o_bureaucrat_guard #o_daughter_sister 7.591207 -0.48653831 -0.05044264 o_daughter_sister #v_victim_swimmer 7.420335 -0.38961305 -0.05044264 v_victim_swimmer #o_tenant_foreman 11.573853 -0.19512662 -0.05044264 o_tenant_foreman #o_sculptor_painter 7.464312 -0.16894915 -0.05044264 o_sculptor_painter #v_fisherman_gardener 7.562756 -0.10264386 -0.05044264 v_fisherman_gardener #v_pediatrician_receptionist 7.905793 -0.03172616 -0.05044264 v_pediatrician_receptionist #v_firefighter_neighbor 7.783144 0.17568348 -0.05044264 v_firefighter_neighbor #v_actor_fans 6.665987 0.22495957 -0.05044264 v_actor_fans #o_commander_president 8.774396 0.24140623 -0.05044264 o_commander_president #v_thief_detective 6.095472 0.26015677 -0.05044264 v_thief_detective #v_plumber_apprentice 4.651002 0.28086979 -0.05044264 v_plumber_apprentice #v_customer_vendor 5.653264 0.39329909 -0.05044264 v_customer_vendor #o_extremist_agent 4.553955 0.41307063 -0.05044264 o_extremist_agent #v_banker_analyst 6.972246 0.48150220 -0.05044264 v_banker_analyst #v_vendor_salesman 7.713977 0.54299233 -0.05044264 v_vendor_salesman #v_criminal_stranger 6.107583 0.55601081 -0.05044264 v_criminal_stranger #v_lifeguard_soldier 8.272183 0.62926844 -0.05044264 v_lifeguard_soldier #o_neighbor_woman 3.474469 0.76624450 -0.05044264 o_neighbor_woman #v_bully_children 4.190889 0.81043300 -0.05044264 v_bully_children #v_guest_thug 9.103566 0.97714896 -0.05044264 v_guest_thug #v_judge_attorney 3.392447 1.12898983 -0.05044264 v_judge_attorney #v_manager_boss 9.351564 1.68272760 -0.05044264 v_manager_boss #o_carpenter_craftsman 6.782810 1.74324717 -0.05044264 o_carpenter_craftsman #o_scientist_mayor 5.842819 1.79555457 -0.05044264 o_scientist_mayor #v_guest_cousin 9.372787 1.85162335 -0.05044264 v_guest_cousin #o_principal_teacher 6.446017 1.87171343 -0.05044264 o_principal_teacher #o_runner_psychiatrist 8.347469 2.00833295 -0.05044264 o_runner_psychiatrist #o_driver_tourist 7.971082 2.23188110 -0.05044264 o_driver_tourist #v_agent_fbi 6.750762 2.28444486 -0.05044264 v_agent_fbi #v_businessman_sponsor 6.073498 2.67452419 -0.05044264 v_businessman_sponsor #o_student_professor 4.150456 2.72633119 -0.05044264 o_student_professor #v_senator_diplomat 8.010948 2.81262533 -0.05044264 v_senator_diplomat #o_violinist_sponsors 6.755304 3.07899238 -0.05044264 o_violinist_sponsors #o_bookseller_thief 3.514508 3.23024444 -0.05044264 o_bookseller_thief #o_clerk_customer 12.057934 3.62944288 -0.05044264 o_clerk_customer #o_consultant_artist 7.842055 3.72479296 -0.05044264 o_consultant_artist #o_trickster_woman 8.248278 4.07853995 -0.05044264 o_trickster_woman #o_preacher_parishioners 7.460335 4.76061998 -0.05044264 o_preacher_parishioners #o_criminal_officer 6.968645 4.93237544 -0.05044264 o_criminal_officer #o_trader_businessman 4.644144 5.95040413 -0.05044264 o_trader_businessman #o_entrepreneur_philanthropist 7.154386 7.32652036 -0.05044264 o_entrepreneur_philanthropist data$Script.C = ifelse(data$Script == "script__J_3_W_GPT2M", -0.5, 0.5) data$deletion_rate.C = data$deletion_rate-mean(data$deletion_rate, na.rm=TRUE) model = (lmer(SurprisalReweighted ~ deletion_rate.C*compatible.C + Script.C*compatible.C + True_Minus_False.C+ (1|ID) + (1+compatible.C|Item), data=data %>% filter(Region == "V1_0", predictability_weight==0.5))) byItemSlopes = coef(model)$Item byItemSlopes$Item = rownames(byItemSlopes)
/initial/lm/trainAttention/analyze_byItem/analyze_M.R
no_license
m-hahn/forgetting-model
R
false
false
44,209
r
#data = read.csv("/juice/scr/mhahn/reinforce-logs-both-short/full-logs-tsv-perItem/collect12_NormJudg_Short_Cond_W_GPT2_ByTrial.py.tsv", sep="\t") data = read.csv("/juice/scr/mhahn/reinforce-logs-both-short/full-logs-tsv-perItem/char-lm-ud-stationary_12_SuperLong_WithAutoencoder_WithEx_Samples_Short_Combination_Subseq_VeryLong_WithSurp12_NormJudg_Short_Cond_Shift_NoComma_Bugfix_Q_3_W_GPT2M.py_620912032_Model", sep="\t") library(tidyr) library(dplyr) library(lme4) nounFreqs = read.csv("../../../../../forgetting/corpus_counts/wikipedia/results/results_counts4.py.tsv", sep="\t") nounFreqs$LCount = log(1+nounFreqs$Count) nounFreqs$Condition = paste(nounFreqs$HasThat, nounFreqs$Capital, "False", sep="_") nounFreqs = as.data.frame(unique(nounFreqs) %>% select(Noun, Condition, LCount) %>% group_by(Noun) %>% spread(Condition, LCount)) %>% rename(noun = Noun) nounFreqs2 = read.csv("../../../../../forgetting/corpus_counts/wikipedia/results/archive/perNounCounts.csv") %>% mutate(X=NULL, ForgettingVerbLogOdds=NULL, ForgettingMiddleVerbLogOdds=NULL) %>% rename(noun = Noun) %>% mutate(True_True_True=NULL,True_False_True=NULL) nounFreqs = unique(rbind(nounFreqs, nounFreqs2)) nounFreqs = nounFreqs[!duplicated(nounFreqs$noun),] data = merge(data, nounFreqs %>% rename(Noun = noun), by=c("Noun"), all.x=TRUE) data = data %>% mutate(True_Minus_False.C = True_False_False-False_False_False-mean(True_False_False-False_False_False, na.rm=TRUE)) unique((data %>% filter(is.na(True_Minus_False.C)))$Noun) # [1] conjecture guess insinuation intuition observation data$compatible.C = (grepl("_co", data$Condition)-0.5) data$HasRC.C = (grepl("SCRC", data$Condition)-0.5) data$HasSC.C = (0.5-grepl("NoSC", data$Condition)) crash() summary(lmer(SurprisalReweighted ~ compatible.C + True_Minus_False.C + (1|ID) + (1+compatible.C|Item) + (1|Noun), data=data %>% filter(Region == "V1_0", deletion_rate==0.55))) model = (lmer(SurprisalReweighted ~ HasRC.C + compatible.C + True_Minus_False.C + (1+compatible.C|Item) + (1|Noun), data=data %>% filter(Region == "V1_0", deletion_rate==0.35, HasSC.C>0, !grepl("GPT2M", Script)))) # The ordering of slopes closely replicates -- so it is a property of GPT2 and stable across multiple runs of the memory model # (Intercept) HasRC.C compatible.C True_Minus_False.C #v_guest_thug 12.205749 -0.02194668 -2.545766952 -0.2042718 #v_psychiatrist_nurse 12.665236 -0.02194668 -2.208907242 -0.2042718 #o_lifeguard_swimmer 12.590915 -0.02194668 -1.732583283 -0.2042718 #o_bureaucrat_guard 13.837014 -0.02194668 -1.475658775 -0.2042718 #o_student_bully 15.162266 -0.02194668 -1.080967839 -0.2042718 #o_senator_diplomat 8.612687 -0.02194668 -1.002161147 -0.2042718 #v_victim_swimmer 12.308828 -0.02194668 -0.981938076 -0.2042718 #v_thief_detective 10.328938 -0.02194668 -0.805953009 -0.2042718 #o_child_medic 13.670466 -0.02194668 -0.723813968 -0.2042718 #o_bookseller_thief 11.756582 -0.02194668 -0.650526839 -0.2042718 #o_ceo_employee 4.019287 -0.02194668 -0.546799699 -0.2042718 #o_commander_president 14.216134 -0.02194668 -0.507111841 -0.2042718 #v_doctor_colleague 8.566765 -0.02194668 -0.448448220 -0.2042718 #o_trickster_woman 15.516596 -0.02194668 -0.266296919 -0.2042718 # Interesting difference between the VN and the other items (seems to replicate across configurations) # u = coef(model)$Item #> u$v = grepl("v_", u$item) #> u1 = u[u$v,] #> u2 = u[!u$v,] #> t.test(u1$compatible.C) # # One Sample t-test # #data: u1$compatible.C #t = 3.1617, df = 31, p-value = 0.003495 #alternative hypothesis: true mean is not equal to 0 #95 percent confidence interval: # 0.2577982 1.1948335 #sample estimates: #mean of x #0.7263159 # #> t.test(u2$compatible.C) # # One Sample t-test # #data: u2$compatible.C #t = 1.2206, df = 34, p-value = 0.2306 #alternative hypothesis: true mean is not equal to 0 #95 percent confidence interval: # -0.1278902 0.5125606 #sample estimates: #mean of x #0.1923352 model = (lmer(SurprisalReweighted ~ HasRC.C + compatible.C + True_Minus_False.C + (1+compatible.C|Item) + (1|Noun), data=data %>% filter(Region == "V1_0", deletion_rate==0.55, HasSC.C>0))) # (Intercept) HasRC.C compatible.C True_Minus_False.C #o_lifeguard_swimmer 12.502503 0.1911667 -2.79518307 -0.2625345 #v_psychiatrist_nurse 12.555227 0.1911667 -2.45026724 -0.2625345 #v_guest_thug 13.251113 0.1911667 -2.36664022 -0.2625345 #o_daughter_sister 9.373278 0.1911667 -1.56298862 -0.2625345 #o_bureaucrat_guard 13.962854 0.1911667 -1.17707479 -0.2625345 #o_student_bully 15.263687 0.1911667 -1.14062156 -0.2625345 #v_victim_swimmer 12.425828 0.1911667 -1.05488009 -0.2625345 #o_child_medic 13.984062 0.1911667 -0.96919797 -0.2625345 #v_thief_detective 10.376001 0.1911667 -0.96217550 -0.2625345 #v_guest_cousin 13.405787 0.1911667 -0.91731791 -0.2625345 summary(lmer(SurprisalReweighted ~ compatible.C + True_Minus_False.C + (1|ID) + (1+compatible.C|Item) + (1|Noun), data=data %>% filter(Region == "V1_0", deletion_rate==0.3, predictability_weight==0))) model = (lmer(SurprisalReweighted ~ deletion_rate + compatible.C + True_Minus_False.C + (1+compatible.C|ID) + (1+compatible.C|Item) + (1|Noun), data=data %>% filter(Region == "V1_0", predictability_weight==1))) model = (lmer(SurprisalReweighted ~ predictability_weight + deletion_rate + compatible.C + True_Minus_False.C + (1+compatible.C|ID) + (1+compatible.C|Item) + (1|Noun), data=data %>% filter(Region == "V1_0"))) summary(lmer(SurprisalReweighted ~ compatible.C + True_Minus_False.C + (1+compatible.C|Item) + (1|Noun), data=data %>% filter(Region == "V1_0", predictability_weight==1) %>% group_by(Noun, compatible.C, True_Minus_False.C, Item) %>% summarise(SurprisalReweighted=mean(SurprisalReweighted)))) model = (lmer(SurprisalReweighted ~ predictability_weight + deletion_rate + compatible.C + True_Minus_False.C + (1+compatible.C|ID) + (1+compatible.C|Item), data=data %>% filter(Region == "V1_0"))) byItemSlopes = coef(model)$Item byItemSlopes$Item = rownames(byItemSlopes) byItemSlopes[order(byItemSlopes$compatible.C),] #> byItemSlopes[order(byItemSlopes$compatible.C),] # (Intercept) predictability_weight deletion_rate compatible.C True_Minus_False.C Item #o_child_medic 7.4318434 -1.069783 8.131016 -3.20739170 -0.1758554 o_child_medic #o_senator_diplomat 4.8994153 -1.069783 8.131016 -3.18698116 -0.1758554 o_senator_diplomat #o_mobster_media 4.1932516 -1.069783 8.131016 -2.42209852 -0.1758554 o_mobster_media #o_victim_criminal 8.6898748 -1.069783 8.131016 -2.17947177 -0.1758554 o_victim_criminal #o_student_bully 7.9275158 -1.069783 8.131016 -1.89616001 -0.1758554 o_student_bully #v_psychiatrist_nurse 5.6874614 -1.069783 8.131016 -1.56520274 -0.1758554 v_psychiatrist_nurse #o_lifesaver_swimmer 7.9101302 -1.069783 8.131016 -1.49021589 -0.1758554 o_lifesaver_swimmer #v_guest_thug 8.2118196 -1.069783 8.131016 -1.48537021 -0.1758554 v_guest_thug #o_CEO_employee 1.9447643 -1.069783 8.131016 -1.30279823 -0.1758554 o_CEO_employee #v_victim_swimmer 6.7159427 -1.069783 8.131016 -1.26434984 -0.1758554 v_victim_swimmer #v_teacher_principal 0.7832282 -1.069783 8.131016 -0.96076600 -0.1758554 v_teacher_principal #v_sponsor_musician 7.9813119 -1.069783 8.131016 -0.95908404 -0.1758554 v_sponsor_musician #o_bureaucrat_guard 10.1710971 -1.069783 8.131016 -0.85949145 -0.1758554 o_bureaucrat_guard #v_medic_survivor 4.9993047 -1.069783 8.131016 -0.85518928 -0.1758554 v_medic_survivor #o_surgeon_patient 2.4059452 -1.069783 8.131016 -0.78521757 -0.1758554 o_surgeon_patient #v_driver_guide 3.4735911 -1.069783 8.131016 -0.68676289 -0.1758554 v_driver_guide #v_janitor_organizer 2.3548375 -1.069783 8.131016 -0.63490585 -0.1758554 v_janitor_organizer #o_sculptor_painter 3.2599189 -1.069783 8.131016 -0.62205515 -0.1758554 o_sculptor_painter #v_investor_scientist 7.4722328 -1.069783 8.131016 -0.60643089 -0.1758554 v_investor_scientist #v_doctor_colleague 4.4566625 -1.069783 8.131016 -0.54478152 -0.1758554 v_doctor_colleague #v_thief_detective 4.9405932 -1.069783 8.131016 -0.48976081 -0.1758554 v_thief_detective #o_pharmacist_stranger 6.6452869 -1.069783 8.131016 -0.46649846 -0.1758554 o_pharmacist_stranger #o_cousin_bror 7.5825574 -1.069783 8.131016 -0.43043542 -0.1758554 o_cousin_bror #o_daughter_sister 5.6936303 -1.069783 8.131016 -0.30288336 -0.1758554 o_daughter_sister #o_commander_president 8.3226388 -1.069783 8.131016 -0.29558488 -0.1758554 o_commander_president #v_pediatrician_receptionist 6.8678478 -1.069783 8.131016 -0.27311863 -0.1758554 v_pediatrician_receptionist #o_musician_far 6.2599674 -1.069783 8.131016 -0.21466778 -0.1758554 o_musician_far #o_actor_starlet 4.4613360 -1.069783 8.131016 -0.20599745 -0.1758554 o_actor_starlet #v_actor_fans 5.7156175 -1.069783 8.131016 -0.11907038 -0.1758554 v_actor_fans #v_president_farmer 4.3350909 -1.069783 8.131016 -0.11284931 -0.1758554 v_president_farmer #v_customer_vendor 2.0140520 -1.069783 8.131016 -0.08439834 -0.1758554 v_customer_vendor #o_tenant_foreman 7.6368918 -1.069783 8.131016 0.07788745 -0.1758554 o_tenant_foreman #v_plaintiff_jury 4.1507159 -1.069783 8.131016 0.17515407 -0.1758554 v_plaintiff_jury #v_criminal_stranger 4.6489168 -1.069783 8.131016 0.25419893 -0.1758554 v_criminal_stranger #v_firefighter_neighbor 6.0258317 -1.069783 8.131016 0.30957481 -0.1758554 v_firefighter_neighbor #o_extremist_agent 2.1334148 -1.069783 8.131016 0.34295117 -0.1758554 o_extremist_agent #v_fisherman_gardener 2.4621326 -1.069783 8.131016 0.37617751 -0.1758554 v_fisherman_gardener #v_plumber_apprentice 1.6586674 -1.069783 8.131016 0.49112117 -0.1758554 v_plumber_apprentice #v_bully_children 1.1935835 -1.069783 8.131016 0.60191783 -0.1758554 v_bully_children #o_student_professor 3.4573072 -1.069783 8.131016 0.60531945 -0.1758554 o_student_professor #v_guest_cousin 3.6737238 -1.069783 8.131016 0.63851850 -0.1758554 v_guest_cousin #v_judge_attorney 0.5360451 -1.069783 8.131016 0.70388971 -0.1758554 v_judge_attorney #v_vendor_salesman 6.5744689 -1.069783 8.131016 0.72010601 -0.1758554 v_vendor_salesman #o_driver_tourist 6.1229162 -1.069783 8.131016 0.76131710 -0.1758554 o_driver_tourist #v_manager_boss 6.7336547 -1.069783 8.131016 0.76873938 -0.1758554 v_manager_boss #v_lifeguard_soldier 9.3321709 -1.069783 8.131016 0.78086825 -0.1758554 v_lifeguard_soldier #o_consultant_artist 7.3432777 -1.069783 8.131016 0.79827348 -0.1758554 o_consultant_artist #v_captain_crew 3.1820146 -1.069783 8.131016 0.84955351 -0.1758554 v_captain_crew #v_fiancé_author 3.5447566 -1.069783 8.131016 0.87871249 -0.1758554 v_fiancé_author #v_banker_analyst 5.8870418 -1.069783 8.131016 0.92507940 -0.1758554 v_banker_analyst #o_principal_teacher 2.5421321 -1.069783 8.131016 1.00835969 -0.1758554 o_principal_teacher #o_scientist_mayor 1.9104177 -1.069783 8.131016 1.08710326 -0.1758554 o_scientist_mayor #o_neighbor_woman 0.7105074 -1.069783 8.131016 1.10618938 -0.1758554 o_neighbor_woman #o_bookseller_thief 6.1366239 -1.069783 8.131016 1.22605821 -0.1758554 o_bookseller_thief #o_entrepreneur_philanthropist 4.3852640 -1.069783 8.131016 1.23242511 -0.1758554 o_entrepreneur_philanthropist #o_runner_psychiatrist 3.2139144 -1.069783 8.131016 1.55997332 -0.1758554 o_runner_psychiatrist #o_trickster_woman 9.1696962 -1.069783 8.131016 1.72648661 -0.1758554 o_trickster_woman #o_clerk_customer 8.0349529 -1.069783 8.131016 1.74485865 -0.1758554 o_clerk_customer #v_businessman_sponsor 4.1200810 -1.069783 8.131016 1.76230711 -0.1758554 v_businessman_sponsor #o_carpenter_craftsman 2.3730616 -1.069783 8.131016 1.76627631 -0.1758554 o_carpenter_craftsman #v_senator_diplomat 6.8134852 -1.069783 8.131016 1.79939330 -0.1758554 v_senator_diplomat #o_violinist_sponsors 2.3170906 -1.069783 8.131016 1.84205586 -0.1758554 o_violinist_sponsors #o_politician_banker 5.1382865 -1.069783 8.131016 1.85389681 -0.1758554 o_politician_banker #o_preacher_parishioners 6.4162186 -1.069783 8.131016 2.25625818 -0.1758554 o_preacher_parishioners #v_agent_fbi 2.8422874 -1.069783 8.131016 2.52469306 -0.1758554 v_agent_fbi #o_trader_businessman 1.6695241 -1.069783 8.131016 3.05425309 -0.1758554 o_trader_businessman #o_criminal_officer 3.4731695 -1.069783 8.131016 3.26119981 -0.1758554 o_criminal_officer byIDSlopes = coef(model)$ID byIDSlopes$ID = rownames(byIDSlopes) byIDSlopes[order(byIDSlopes$compatible.C),] #> byIDSlopes[order(byIDSlopes$compatible.C),] # (Intercept) predictability_weight deletion_rate compatible.C True_Minus_False.C ID #493283383 3.422375 -1.069783 8.131016 -0.038928946 -0.1758554 493283383 #584015835 4.182614 -1.069783 8.131016 -0.037595456 -0.1758554 584015835 #922774826 5.041638 -1.069783 8.131016 -0.012694818 -0.1758554 922774826 #99767452 4.895182 -1.069783 8.131016 -0.005788567 -0.1758554 99767452 #961536309 5.059172 -1.069783 8.131016 0.008153394 -0.1758554 961536309 #553302187 4.616161 -1.069783 8.131016 0.012184127 -0.1758554 553302187 #193988359 4.630897 -1.069783 8.131016 0.059016278 -0.1758554 193988359 #345336356 4.707851 -1.069783 8.131016 0.069778601 -0.1758554 345336356 #191511088 4.877481 -1.069783 8.131016 0.114371368 -0.1758554 191511088 #992213137 4.823875 -1.069783 8.131016 0.123880359 -0.1758554 992213137 #675784233 4.414690 -1.069783 8.131016 0.124437922 -0.1758554 675784233 #464657019 4.991215 -1.069783 8.131016 0.133107835 -0.1758554 464657019 #94907627 4.970169 -1.069783 8.131016 0.145892245 -0.1758554 94907627 #444273729 4.083726 -1.069783 8.131016 0.149230489 -0.1758554 444273729 #79010925 5.793023 -1.069783 8.131016 0.156401155 -0.1758554 79010925 #767406753 4.745672 -1.069783 8.131016 0.172478313 -0.1758554 767406753 #278167740 5.511846 -1.069783 8.131016 0.182801120 -0.1758554 278167740 #116146778 4.570194 -1.069783 8.131016 0.195945208 -0.1758554 116146778 #282352930 4.850553 -1.069783 8.131016 0.205365500 -0.1758554 282352930 #637269688 4.741264 -1.069783 8.131016 0.210219717 -0.1758554 637269688 #591357781 4.594979 -1.069783 8.131016 0.210298996 -0.1758554 591357781 #954662806 5.228781 -1.069783 8.131016 0.211052906 -0.1758554 954662806 #708115795 5.516360 -1.069783 8.131016 0.217600266 -0.1758554 708115795 #936548541 4.858380 -1.069783 8.131016 0.224033626 -0.1758554 936548541 #465577363 5.359163 -1.069783 8.131016 0.229594459 -0.1758554 465577363 #95795388 5.790406 -1.069783 8.131016 0.231922422 -0.1758554 95795388 #179088476 5.236838 -1.069783 8.131016 0.235244000 -0.1758554 179088476 #681474707 5.969039 -1.069783 8.131016 0.249414018 -0.1758554 681474707 #908049000 5.110056 -1.069783 8.131016 0.265561308 -0.1758554 908049000 #498236788 4.879595 -1.069783 8.131016 0.266751106 -0.1758554 498236788 #991579562 5.325402 -1.069783 8.131016 0.275085402 -0.1758554 991579562 #250967824 5.098741 -1.069783 8.131016 0.298764890 -0.1758554 250967824 #73230605 5.517735 -1.069783 8.131016 0.309486367 -0.1758554 73230605 #788091576 5.486403 -1.069783 8.131016 0.313975632 -0.1758554 788091576 #174187200 4.203831 -1.069783 8.131016 0.427892275 -0.1758554 174187200 crash() summary(lmer(SurprisalReweighted ~ compatible.C + True_Minus_False.C + (1|ID) + (1|Item) + (1|Noun), data=data %>% filter(Region == "V1_0", deletion_rate==0.3, predictability_weight==0))) model = (lmer(SurprisalReweighted ~ compatible.C + True_Minus_False.C +(1+compatible.C|ID) + (1+compatible.C|Item), data=data %>% filter(Region == "V1_0", deletion_rate==0.4, predictability_weight==0))) byItemSlopes = coef(model)$Item byItemSlopes$Item = rownames(byItemSlopes) # Interesting (but the evidence isn't strong, might be accidental): cor.test(byItemSlopes[["(Intercept)"]], byItemSlopes$compatible.C) # These slopes are very similar with predictability_weight==0.5 # (Intercept) compatible.C True_Minus_False.C Item #o_child_medic 10.768493 -3.89992285 -0.1648946 o_child_medic was unharmed #o_senator_diplomat 8.241476 -3.16607292 -0.1648946 o_senator_diplomat was winning #o_mobster_media 7.138935 -2.93748887 -0.1648946 o_mobster_media had disappeared #o_victim_criminal 11.847292 -2.25583172 -0.1648946 o_victim_criminal were surviving #o_student_bully 10.997521 -1.87912327 -0.1648946 o_student_bully plagiarized his homework/drove everyone crazy #v_teacher_principal 4.395694 -1.84736038 -0.1648946 v_teacher_principal failed the student/annoyed the student #v_guest_thug 11.881792 -1.71798860 -0.1648946 v_guest_thug tricked the bartender/stunned the bartender #o_lifesaver_swimmer 11.766760 -1.66632545 -0.1648946 o_lifesaver_swimmer saved the children/pleased the children #v_victim_swimmer 10.106983 -1.55286668 -0.1648946 v_victim_swimmer #v_psychiatrist_nurse 9.310187 -1.40517317 -0.1648946 v_psychiatrist_nurse #o_surgeon_patient 5.661679 -1.32479741 -0.1648946 o_surgeon_patient #v_driver_guide 6.715940 -1.20077072 -0.1648946 v_driver_guide #v_janitor_organizer 5.808140 -0.91743157 -0.1648946 v_janitor_organizer #o_cousin_bror 11.838276 -0.86757017 -0.1648946 o_cousin_bror #o_pharmacist_stranger 9.513158 -0.84421798 -0.1648946 o_pharmacist_stranger #v_sponsor_musician 11.517677 -0.79750164 -0.1648946 v_sponsor_musician #o_sculptor_painter 6.743033 -0.79153043 -0.1648946 o_sculptor_painter #v_medic_survivor 8.429735 -0.72173719 -0.1648946 v_medic_survivor #o_CEO_employee 4.625234 -0.66604694 -0.1648946 o_CEO_employee #o_bureaucrat_guard 13.422069 -0.62727965 -0.1648946 o_bureaucrat_guard #v_investor_scientist 10.957524 -0.52012056 -0.1648946 v_investor_scientist #v_thief_detective 7.967355 -0.30616220 -0.1648946 v_thief_detective #v_president_farmer 7.736207 -0.28583124 -0.1648946 v_president_farmer #v_actor_fans 9.068854 -0.25547948 -0.1648946 v_actor_fans #v_pediatrician_receptionist 10.050521 -0.21028115 -0.1648946 v_pediatrician_receptionist #v_criminal_stranger 8.134394 -0.17040682 -0.1648946 v_criminal_stranger #o_musician_far 9.926035 -0.16965755 -0.1648946 o_musician_far #o_actor_starlet 7.833042 -0.13629062 -0.1648946 o_actor_starlet #o_commander_president 11.969829 -0.13440355 -0.1648946 o_commander_president #o_daughter_sister 9.036913 -0.08991114 -0.1648946 o_daughter_sister #v_doctor_colleague 8.161714 -0.04984468 -0.1648946 v_doctor_colleague #v_customer_vendor 5.281899 0.09477463 -0.1648946 v_customer_vendor #o_tenant_foreman 10.788711 0.14762019 -0.1648946 o_tenant_foreman #o_consultant_artist 11.425500 0.20614368 -0.1648946 o_consultant_artist #o_extremist_agent 5.144713 0.31047188 -0.1648946 o_extremist_agent #v_plaintiff_jury 7.536574 0.35119581 -0.1648946 v_plaintiff_jury #v_fisherman_gardener 5.598490 0.36320928 -0.1648946 v_fisherman_gardener #v_plumber_apprentice 4.920909 0.44212153 -0.1648946 v_plumber_apprentice #v_judge_attorney 3.931734 0.50723899 -0.1648946 v_judge_attorney #v_firefighter_neighbor 9.436054 0.52512666 -0.1648946 v_firefighter_neighbor #v_bully_children 4.456294 0.53172195 -0.1648946 v_bully_children #v_guest_cousin 6.623165 0.60596852 -0.1648946 v_guest_cousin #v_fiancé_author 6.795485 0.63975496 -0.1648946 v_fiancé_author #v_captain_crew 6.619463 0.65467070 -0.1648946 v_captain_crew #o_student_professor 6.678425 0.77189760 -0.1648946 o_student_professor #o_driver_tourist 9.482586 0.78555334 -0.1648946 o_driver_tourist #o_neighbor_woman 3.982770 0.81545676 -0.1648946 o_neighbor_woman #v_manager_boss 10.154127 0.83507588 -0.1648946 v_manager_boss #o_clerk_customer 10.963351 0.88229165 -0.1648946 o_clerk_customer #v_banker_analyst 9.185154 0.91583154 -0.1648946 v_banker_analyst #o_entrepreneur_philanthropist 7.932043 0.97989890 -0.1648946 o_entrepreneur_philanthropist #o_principal_teacher 5.808110 1.27180075 -0.1648946 o_principal_teacher #v_lifeguard_soldier 12.752973 1.35196404 -0.1648946 v_lifeguard_soldier #o_scientist_mayor 5.418019 1.43988180 -0.1648946 o_scientist_mayor #v_vendor_salesman 10.181121 1.45451247 -0.1648946 v_vendor_salesman #v_senator_diplomat 10.054649 1.54162695 -0.1648946 v_senator_diplomat #o_violinist_sponsors 5.692785 1.64969114 -0.1648946 o_violinist_sponsors #v_businessman_sponsor 7.489190 1.70923178 -0.1648946 v_businessman_sponsor #o_criminal_officer 6.020972 1.84912295 -0.1648946 o_criminal_officer #o_politician_banker 8.400021 2.08268570 -0.1648946 o_politician_banker #o_trickster_woman 12.605169 2.16073843 -0.1648946 o_trickster_woman #o_runner_psychiatrist 5.962136 2.21337998 -0.1648946 o_runner_psychiatrist #o_carpenter_craftsman 5.512902 2.26077326 -0.1648946 o_carpenter_craftsman #o_preacher_parishioners 9.665387 2.35810326 -0.1648946 o_preacher_parishioners #v_agent_fbi 6.293955 2.43050818 -0.1648946 v_agent_fbi arrested the criminal/confused the criminal #o_bookseller_thief 9.516952 2.86883164 -0.1648946 o_bookseller_thief got a heart attack #o_trader_businessman 4.887210 3.18120764 -0.1648946 o_trader_businessman had insider information # model2 = (lmer(ThatFractionReweighted ~ compatible.C + True_Minus_False.C +(1+compatible.C|ID) + (1+compatible.C|Item), data=data %>% filter(Region == "V1_0", deletion_rate==0.4, predictability_weight==0))) byItemSlopes = coef(model2)$Item byItemSlopes$Item = rownames(byItemSlopes) #o_scientist_mayor 87.46241 -13.11925475 2.807508 o_scientist_mayor had faked data/couldn't be trusted #o_entrepreneur_philanthropist 83.79822 -10.55660954 2.807508 o_entrepreneur_philanthropist wasted the money/exasperated the nurse #o_criminal_officer 80.39496 -10.29351953 2.807508 o_criminal_officer was guilty/was refuted #o_surgeon_patient 85.04842 -10.15585118 2.807508 o_surgeon_patient had no degree/was widely known #o_preacher_parishioners 86.71091 -9.47752685 2.807508 o_preacher_parishioners #o_trader_businessman 88.52969 -9.37560438 2.807508 o_trader_businessman #o_extremist_agent 83.56492 -8.08450427 2.807508 o_extremist_agent #v_plumber_apprentice 84.26964 -7.92866378 2.807508 v_plumber_apprentice #o_violinist_sponsors 85.94343 -7.12730111 2.807508 o_violinist_sponsors #v_banker_analyst 85.28712 -7.05238195 2.807508 v_banker_analyst #o_trickster_woman 83.10796 -7.03971028 2.807508 o_trickster_woman #v_thief_detective 85.41960 -6.97343971 2.807508 v_thief_detective #v_driver_guide 84.91185 -6.97265389 2.807508 v_driver_guide #o_sculptor_painter 83.74767 -6.87932938 2.807508 o_sculptor_painter #o_child_medic 84.87304 -6.81020907 2.807508 o_child_medic #o_cousin_bror 82.94171 -6.55995474 2.807508 o_cousin_bror #v_fisherman_gardener 82.68731 -6.47733735 2.807508 v_fisherman_gardener #v_senator_diplomat 84.04256 -6.43737907 2.807508 v_senator_diplomat #v_firefighter_neighbor 82.77441 -6.39140299 2.807508 v_firefighter_neighbor #v_guest_cousin 80.44554 -6.36035586 2.807508 v_guest_cousin #o_principal_teacher 85.09546 -6.14038844 2.807508 o_principal_teacher #o_consultant_artist 79.50840 -6.08765829 2.807508 o_consultant_artist #o_student_professor 84.82091 -5.74017191 2.807508 o_student_professor #o_neighbor_woman 87.04194 -5.44558344 2.807508 o_neighbor_woman #o_bureaucrat_guard 82.47143 -5.08322115 2.807508 o_bureaucrat_guard #v_janitor_organizer 84.11530 -5.04157705 2.807508 v_janitor_organizer #o_bookseller_thief 82.66239 -4.93231882 2.807508 o_bookseller_thief #o_lifesaver_swimmer 83.42137 -4.75432836 2.807508 o_lifesaver_swimmer #v_customer_vendor 81.99634 -4.51363232 2.807508 v_customer_vendor #o_mobster_media 85.31739 -4.44291102 2.807508 o_mobster_media #v_investor_scientist 82.73042 -4.22345472 2.807508 v_investor_scientist #v_manager_boss 83.33789 -4.22082904 2.807508 v_manager_boss #v_guest_thug 83.86669 -4.20451750 2.807508 v_guest_thug #v_bully_children 84.65522 -4.15938218 2.807508 v_bully_children #v_psychiatrist_nurse 86.60480 -3.95205088 2.807508 v_psychiatrist_nurse #o_driver_tourist 87.95997 -3.90034624 2.807508 o_driver_tourist #v_pediatrician_receptionist 84.99795 -3.87422731 2.807508 v_pediatrician_receptionist #o_commander_president 83.88214 -3.78209634 2.807508 o_commander_president #o_runner_psychiatrist 83.35741 -3.48727869 2.807508 o_runner_psychiatrist #o_actor_starlet 84.07877 -3.33478506 2.807508 o_actor_starlet #v_fiancé_author 84.16542 -3.17837258 2.807508 v_fiancé_author #v_actor_fans 82.81152 -2.95411249 2.807508 v_actor_fans #o_senator_diplomat 82.65859 -2.90629655 2.807508 o_senator_diplomat #v_plaintiff_jury 86.25058 -2.90391998 2.807508 v_plaintiff_jury #v_businessman_sponsor 89.41016 -2.62955707 2.807508 v_businessman_sponsor #v_vendor_salesman 87.88541 -2.39227253 2.807508 v_vendor_salesman #v_teacher_principal 85.68748 -2.27930926 2.807508 v_teacher_principal #v_criminal_stranger 82.34367 -2.09696178 2.807508 v_criminal_stranger #o_clerk_customer 81.56801 -2.08883340 2.807508 o_clerk_customer #v_victim_swimmer 84.62578 -1.90370924 2.807508 v_victim_swimmer #v_judge_attorney 86.81673 -1.86969388 2.807508 v_judge_attorney #o_carpenter_craftsman 84.02451 -1.69215045 2.807508 o_carpenter_craftsman #o_pharmacist_stranger 87.53160 -1.40874657 2.807508 o_pharmacist_stranger #o_daughter_sister 82.05274 -1.02103151 2.807508 o_daughter_sister #v_medic_survivor 81.97385 -0.97210659 2.807508 v_medic_survivor #v_lifeguard_soldier 83.42388 -0.86695656 2.807508 v_lifeguard_soldier #v_doctor_colleague 85.69208 -0.86127271 2.807508 v_doctor_colleague #v_sponsor_musician 79.49338 -0.62902515 2.807508 v_sponsor_musician #v_captain_crew 83.89051 -0.10333422 2.807508 v_captain_crew #o_student_bully 78.81972 0.03648398 2.807508 o_student_bully #o_victim_criminal 83.39799 0.11728546 2.807508 o_victim_criminal #o_tenant_foreman 81.54701 0.56521371 2.807508 o_tenant_foreman #o_CEO_employee 81.45328 0.74142469 2.807508 o_CEO_employee #o_musician_far 85.02372 1.34850899 2.807508 o_musician_far #v_president_farmer 83.00137 1.89168617 2.807508 v_president_farmer #v_agent_fbi 84.32057 2.37295329 2.807508 v_agent_fbi arrested the criminal/confused the criminal #o_politician_banker 78.68942 3.42211939 2.807508 o_politician_banker laundered money/was popular # # deletion_rate predictability_weight #1 0.30 0.00 #7 0.50 0.00 #8 0.55 0.00 #9 0.50 0.25 #18 0.40 0.00 #34 0.45 0.00 model = (lmer(SurprisalReweighted ~ compatible.C + True_Minus_False.C +(1+compatible.C|ID) + (1+compatible.C|Item), data=data %>% filter(Region == "V1_0", deletion_rate==0.3, predictability_weight==0))) # (Intercept) compatible.C True_Minus_False.C Item [43/1866] #o_mobster_media 4.496106 -6.40501484 -0.05044264 o_mobster_media #o_senator_diplomat 6.803331 -4.60917349 -0.05044264 o_senator_diplomat #o_surgeon_patient 5.170188 -4.55348269 -0.05044264 o_surgeon_patient #o_child_medic 8.255213 -3.90376150 -0.05044264 o_child_medic #o_victim_criminal 8.915540 -3.39266871 -0.05044264 o_victim_criminal #v_sponsor_musician 11.001467 -3.08297030 -0.05044264 v_sponsor_musician #v_janitor_organizer 6.945259 -2.86266370 -0.05044264 v_janitor_organizer #v_teacher_principal 3.672387 -2.46164142 -0.05044264 v_teacher_principal #o_student_bully 7.059583 -2.28519458 -0.05044264 o_student_bully #v_driver_guide 8.513113 -2.05352268 -0.05044264 v_driver_guide #o_actor_starlet 7.089057 -1.47102317 -0.05044264 o_actor_starlet #o_CEO_employee 5.201617 -1.36781963 -0.05044264 o_CEO_employee #o_pharmacist_stranger 5.445108 -1.21810250 -0.05044264 o_pharmacist_stranger #v_doctor_colleague 6.008020 -1.10516716 -0.05044264 v_doctor_colleague #v_investor_scientist 8.868556 -0.99664887 -0.05044264 v_investor_scientist #v_captain_crew 7.767709 -0.95443300 -0.05044264 v_captain_crew #o_lifesaver_swimmer 9.295139 -0.87443643 -0.05044264 o_lifesaver_swimmer #o_cousin_bror 8.042698 -0.85053028 -0.05044264 o_cousin_bror #v_president_farmer 6.877058 -0.85024550 -0.05044264 v_president_farmer #o_musician_far 6.591580 -0.84729952 -0.05044264 o_musician_far #v_medic_survivor 7.011600 -0.78314343 -0.05044264 v_medic_survivor #v_psychiatrist_nurse 4.338750 -0.77113555 -0.05044264 v_psychiatrist_nurse #v_plaintiff_jury 6.952710 -0.74104999 -0.05044264 v_plaintiff_jury #o_politician_banker 8.648524 -0.54187818 -0.05044264 o_politician_banker #v_fiancé_author 4.557252 -0.50849743 -0.05044264 v_fiancé_author #o_bureaucrat_guard 14.123809 -0.49331308 -0.05044264 o_bureaucrat_guard #o_daughter_sister 7.591207 -0.48653831 -0.05044264 o_daughter_sister #v_victim_swimmer 7.420335 -0.38961305 -0.05044264 v_victim_swimmer #o_tenant_foreman 11.573853 -0.19512662 -0.05044264 o_tenant_foreman #o_sculptor_painter 7.464312 -0.16894915 -0.05044264 o_sculptor_painter #v_fisherman_gardener 7.562756 -0.10264386 -0.05044264 v_fisherman_gardener #v_pediatrician_receptionist 7.905793 -0.03172616 -0.05044264 v_pediatrician_receptionist #v_firefighter_neighbor 7.783144 0.17568348 -0.05044264 v_firefighter_neighbor #v_actor_fans 6.665987 0.22495957 -0.05044264 v_actor_fans #o_commander_president 8.774396 0.24140623 -0.05044264 o_commander_president #v_thief_detective 6.095472 0.26015677 -0.05044264 v_thief_detective #v_plumber_apprentice 4.651002 0.28086979 -0.05044264 v_plumber_apprentice #v_customer_vendor 5.653264 0.39329909 -0.05044264 v_customer_vendor #o_extremist_agent 4.553955 0.41307063 -0.05044264 o_extremist_agent #v_banker_analyst 6.972246 0.48150220 -0.05044264 v_banker_analyst #v_vendor_salesman 7.713977 0.54299233 -0.05044264 v_vendor_salesman #v_criminal_stranger 6.107583 0.55601081 -0.05044264 v_criminal_stranger #v_lifeguard_soldier 8.272183 0.62926844 -0.05044264 v_lifeguard_soldier #o_neighbor_woman 3.474469 0.76624450 -0.05044264 o_neighbor_woman #v_bully_children 4.190889 0.81043300 -0.05044264 v_bully_children #v_guest_thug 9.103566 0.97714896 -0.05044264 v_guest_thug #v_judge_attorney 3.392447 1.12898983 -0.05044264 v_judge_attorney #v_manager_boss 9.351564 1.68272760 -0.05044264 v_manager_boss #o_carpenter_craftsman 6.782810 1.74324717 -0.05044264 o_carpenter_craftsman #o_scientist_mayor 5.842819 1.79555457 -0.05044264 o_scientist_mayor #v_guest_cousin 9.372787 1.85162335 -0.05044264 v_guest_cousin #o_principal_teacher 6.446017 1.87171343 -0.05044264 o_principal_teacher #o_runner_psychiatrist 8.347469 2.00833295 -0.05044264 o_runner_psychiatrist #o_driver_tourist 7.971082 2.23188110 -0.05044264 o_driver_tourist #v_agent_fbi 6.750762 2.28444486 -0.05044264 v_agent_fbi #v_businessman_sponsor 6.073498 2.67452419 -0.05044264 v_businessman_sponsor #o_student_professor 4.150456 2.72633119 -0.05044264 o_student_professor #v_senator_diplomat 8.010948 2.81262533 -0.05044264 v_senator_diplomat #o_violinist_sponsors 6.755304 3.07899238 -0.05044264 o_violinist_sponsors #o_bookseller_thief 3.514508 3.23024444 -0.05044264 o_bookseller_thief #o_clerk_customer 12.057934 3.62944288 -0.05044264 o_clerk_customer #o_consultant_artist 7.842055 3.72479296 -0.05044264 o_consultant_artist #o_trickster_woman 8.248278 4.07853995 -0.05044264 o_trickster_woman #o_preacher_parishioners 7.460335 4.76061998 -0.05044264 o_preacher_parishioners #o_criminal_officer 6.968645 4.93237544 -0.05044264 o_criminal_officer #o_trader_businessman 4.644144 5.95040413 -0.05044264 o_trader_businessman #o_entrepreneur_philanthropist 7.154386 7.32652036 -0.05044264 o_entrepreneur_philanthropist data$Script.C = ifelse(data$Script == "script__J_3_W_GPT2M", -0.5, 0.5) data$deletion_rate.C = data$deletion_rate-mean(data$deletion_rate, na.rm=TRUE) model = (lmer(SurprisalReweighted ~ deletion_rate.C*compatible.C + Script.C*compatible.C + True_Minus_False.C+ (1|ID) + (1+compatible.C|Item), data=data %>% filter(Region == "V1_0", predictability_weight==0.5))) byItemSlopes = coef(model)$Item byItemSlopes$Item = rownames(byItemSlopes)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/FC_corr_graphs.function.R \name{strain.spec.gene.func} \alias{strain.spec.gene.func} \title{Function to compute a strain specific gene expression Fold-Change barplot} \usage{ strain.spec.gene.func(dPhno.interest = NULL, gene.probe = "", plot.title = "") } \arguments{ \item{dPhno.interest}{specific phenotype FC values for every acceptable (no NA's) strains. Use the output of the plot.hist.phenoFC.func} \item{gene.probe}{probe number of the gene of interest. Use gene with nax correlation. Refer to maxcor.gene.name.func} \item{plot.title}{character of desired plot title} } \value{ b , a barplot with the FC for every strain for a specific phenotype } \description{ The function plots a color annoted barplot representing the FC for every strain for a specific gene. Red represents a positive FC meaning the Female gene expression value is greater, Blue is the opposite The order of the strain is the same as for the strain.spec.pheno.dec.func for the same dPhno.interest and gene }
/fcCor1/man/strain.spec.gene.func.Rd
no_license
alexisdimanche/AD.REU2019
R
false
true
1,068
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/FC_corr_graphs.function.R \name{strain.spec.gene.func} \alias{strain.spec.gene.func} \title{Function to compute a strain specific gene expression Fold-Change barplot} \usage{ strain.spec.gene.func(dPhno.interest = NULL, gene.probe = "", plot.title = "") } \arguments{ \item{dPhno.interest}{specific phenotype FC values for every acceptable (no NA's) strains. Use the output of the plot.hist.phenoFC.func} \item{gene.probe}{probe number of the gene of interest. Use gene with nax correlation. Refer to maxcor.gene.name.func} \item{plot.title}{character of desired plot title} } \value{ b , a barplot with the FC for every strain for a specific phenotype } \description{ The function plots a color annoted barplot representing the FC for every strain for a specific gene. Red represents a positive FC meaning the Female gene expression value is greater, Blue is the opposite The order of the strain is the same as for the strain.spec.pheno.dec.func for the same dPhno.interest and gene }
library(Biocomb) ### Name: leukemia_miss ### Title: desease data ### Aliases: leukemia_miss ### Keywords: datasets ### ** Examples # load the dataset data(leukemia_miss) # X95735_at with(leukemia_miss, by(X95735_at,Class,mean,na.rm=TRUE)) # M27891_at with(leukemia_miss,tapply(M27891_at, Class, FUN = mean,na.rm=TRUE))
/data/genthat_extracted_code/Biocomb/examples/leukemia_miss.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
327
r
library(Biocomb) ### Name: leukemia_miss ### Title: desease data ### Aliases: leukemia_miss ### Keywords: datasets ### ** Examples # load the dataset data(leukemia_miss) # X95735_at with(leukemia_miss, by(X95735_at,Class,mean,na.rm=TRUE)) # M27891_at with(leukemia_miss,tapply(M27891_at, Class, FUN = mean,na.rm=TRUE))
###################################################### ### ### Project: Getting and Cleaning Data - Course Project ### Date: 5/12/2019 ### Goal: Show ability to read data, manipulate it, ### and export it with R. ### ####################################################### # Since the Test and Training data are in the same format, # I'm going to create a function that takes either test # or train as an input and prepares the data. library(dplyr) prepare_data <- function(input){ #Input for this function should be either "test" or "train" if(input != "test" & input != "train"){ stop("Input should be 'test' or 'train'.") } #This will load in the appropriate test or train files. subject.data <- read.table(file = paste0("./UCI HAR Dataset/",input,"/subject_",input,".txt"), col.names = "Subject.ID") features <- read.table(file = paste0("./UCI HAR Dataset/features.txt"), col.names = c("Feature.ID","Feature.Name")) x.data <- read.table(file = paste0("./UCI HAR Dataset/",input,"/X_",input,".txt")) names(x.data) <- features$Feature.Name #This renames the features using the names given to us. y.data <- read.table(file = paste0("./UCI HAR Dataset/",input,"/Y_",input,".txt"), col.names = "Activity.Code") activity.names <- read.table(file = paste0("./UCI HAR Dataset/activity_labels.txt"), col.names = c("Activity.Code","Activity.Name")) y.data <- left_join(x = y.data, y = activity.names) #This renames the activities using the names given to us. output <- cbind(subject.data,y.data,x.data) #Here we combine the subject, activity, and feature data all together. #This is a bit of an abuse of regex and R stuff. I'll try my best to explain what's going on. # * which(grepl(x = colnames(output),pattern = "(std()|-mean()|Subject|Activity)")) # This line is creating a vector of all the indices of the columns in our dataset that have any of the following in their name: # 1. std() # 2. -mean() # 3. Subject # 4. Activity # * [!(which(grepl(x = colnames(output),pattern = "(std()|-mean()|Subject|Activity)")) %in% which(grepl(x = colnames(output), pattern = "Freq()")))]] # This line is creating a logical vector which will be used to subset the above vector. # The entire purpose of this part is to remove anything that has meanFreq() in the name. # * Finally we use the subsetted first vector to pull out what columns we want from the dataset. output <- output[,which(grepl(x = colnames(output),pattern = "(std()|-mean()|Subject|Activity)"))[!(which(grepl(x = colnames(output),pattern = "(std()|-mean()|Subject|Activity)")) %in% which(grepl(x = colnames(output), pattern = "Freq()")))]] new.names <- as.character() #This is another regex moment. Instead of manually going through and renaming each column, I created this loop which # builds up an understandable feature name based on the slightly more cryptic name given to us. for(name in names(output)){ temp.name <- as.character() if(grepl(x = name, pattern = "Subject|Activity")){ temp.name <- paste0(name) } if(grepl(x = name, pattern = "-mean()")){ temp.name <- paste0("Mean",temp.name) } if(grepl(x = name, pattern = "-std()")){ temp.name <- paste0("Standard.Dev",temp.name) } if(grepl(x = name, pattern = "Gyro")){ temp.name <- paste0(temp.name,".Gryoscope") } if(grepl(x = name, pattern = "Acc")){ temp.name <- paste0(temp.name,".Accelerometer") } if(grepl(x = name, pattern = "^t")){ temp.name <- paste0(temp.name,".Time") } if(grepl(x = name, pattern = "^f")){ temp.name <- paste0(temp.name,".Frequency") } if(grepl(x = name, pattern = "Body")){ temp.name <- paste0(temp.name,".Body") } if(grepl(x = name, pattern = "Gravity")){ temp.name <- paste0(temp.name,".Gravity") } if(grepl(x = name, pattern = "Jerk")){ temp.name <- paste0(temp.name,".Jerk") } if(grepl(x = name, pattern = "Mag")){ temp.name <- paste0(temp.name,".Magnitude") } if(grepl(x = name, pattern = "(X|Y|Z)$")){ temp.name <- paste0(temp.name,".",substr(name,nchar(name),nchar(name))) } new.names <- append(new.names,temp.name) } #Take the new names and assign them to the columns of our dataset and return it. colnames(output) <- new.names return(output) } #Prep the test and train data. test_prep <- prepare_data("test") train_prep <- prepare_data("train") #Combine the test and train data. combined_data <- rbind(test_prep,train_prep) #Create the summary dataset showing the mean for each variable grouped by activity and subject. summary_data <- combined_data %>% group_by(Subject.ID, Activity.Name) %>% summarise_at(vars(-Activity.Code),funs(mean(.)))
/run_analysis.R
no_license
fryejw/Getting-and-Cleaning-Data-Course-Project
R
false
false
5,071
r
###################################################### ### ### Project: Getting and Cleaning Data - Course Project ### Date: 5/12/2019 ### Goal: Show ability to read data, manipulate it, ### and export it with R. ### ####################################################### # Since the Test and Training data are in the same format, # I'm going to create a function that takes either test # or train as an input and prepares the data. library(dplyr) prepare_data <- function(input){ #Input for this function should be either "test" or "train" if(input != "test" & input != "train"){ stop("Input should be 'test' or 'train'.") } #This will load in the appropriate test or train files. subject.data <- read.table(file = paste0("./UCI HAR Dataset/",input,"/subject_",input,".txt"), col.names = "Subject.ID") features <- read.table(file = paste0("./UCI HAR Dataset/features.txt"), col.names = c("Feature.ID","Feature.Name")) x.data <- read.table(file = paste0("./UCI HAR Dataset/",input,"/X_",input,".txt")) names(x.data) <- features$Feature.Name #This renames the features using the names given to us. y.data <- read.table(file = paste0("./UCI HAR Dataset/",input,"/Y_",input,".txt"), col.names = "Activity.Code") activity.names <- read.table(file = paste0("./UCI HAR Dataset/activity_labels.txt"), col.names = c("Activity.Code","Activity.Name")) y.data <- left_join(x = y.data, y = activity.names) #This renames the activities using the names given to us. output <- cbind(subject.data,y.data,x.data) #Here we combine the subject, activity, and feature data all together. #This is a bit of an abuse of regex and R stuff. I'll try my best to explain what's going on. # * which(grepl(x = colnames(output),pattern = "(std()|-mean()|Subject|Activity)")) # This line is creating a vector of all the indices of the columns in our dataset that have any of the following in their name: # 1. std() # 2. -mean() # 3. Subject # 4. Activity # * [!(which(grepl(x = colnames(output),pattern = "(std()|-mean()|Subject|Activity)")) %in% which(grepl(x = colnames(output), pattern = "Freq()")))]] # This line is creating a logical vector which will be used to subset the above vector. # The entire purpose of this part is to remove anything that has meanFreq() in the name. # * Finally we use the subsetted first vector to pull out what columns we want from the dataset. output <- output[,which(grepl(x = colnames(output),pattern = "(std()|-mean()|Subject|Activity)"))[!(which(grepl(x = colnames(output),pattern = "(std()|-mean()|Subject|Activity)")) %in% which(grepl(x = colnames(output), pattern = "Freq()")))]] new.names <- as.character() #This is another regex moment. Instead of manually going through and renaming each column, I created this loop which # builds up an understandable feature name based on the slightly more cryptic name given to us. for(name in names(output)){ temp.name <- as.character() if(grepl(x = name, pattern = "Subject|Activity")){ temp.name <- paste0(name) } if(grepl(x = name, pattern = "-mean()")){ temp.name <- paste0("Mean",temp.name) } if(grepl(x = name, pattern = "-std()")){ temp.name <- paste0("Standard.Dev",temp.name) } if(grepl(x = name, pattern = "Gyro")){ temp.name <- paste0(temp.name,".Gryoscope") } if(grepl(x = name, pattern = "Acc")){ temp.name <- paste0(temp.name,".Accelerometer") } if(grepl(x = name, pattern = "^t")){ temp.name <- paste0(temp.name,".Time") } if(grepl(x = name, pattern = "^f")){ temp.name <- paste0(temp.name,".Frequency") } if(grepl(x = name, pattern = "Body")){ temp.name <- paste0(temp.name,".Body") } if(grepl(x = name, pattern = "Gravity")){ temp.name <- paste0(temp.name,".Gravity") } if(grepl(x = name, pattern = "Jerk")){ temp.name <- paste0(temp.name,".Jerk") } if(grepl(x = name, pattern = "Mag")){ temp.name <- paste0(temp.name,".Magnitude") } if(grepl(x = name, pattern = "(X|Y|Z)$")){ temp.name <- paste0(temp.name,".",substr(name,nchar(name),nchar(name))) } new.names <- append(new.names,temp.name) } #Take the new names and assign them to the columns of our dataset and return it. colnames(output) <- new.names return(output) } #Prep the test and train data. test_prep <- prepare_data("test") train_prep <- prepare_data("train") #Combine the test and train data. combined_data <- rbind(test_prep,train_prep) #Create the summary dataset showing the mean for each variable grouped by activity and subject. summary_data <- combined_data %>% group_by(Subject.ID, Activity.Name) %>% summarise_at(vars(-Activity.Code),funs(mean(.)))
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/permute.R \name{permu_CI_pearson} \alias{permu_CI_pearson} \title{Confidence interval for the correlation between residuals and treatment} \usage{ permu_CI_pearson(prediction, response, treatment, iters, alpha = 0.05, side = "both", verbosity = FALSE) } \arguments{ \item{prediction}{Vector of predicted outcomes} \item{response}{Vector of responses} \item{treatment}{Vector of treatments} \item{iters}{The number of Monte Carlo iterations (default 1000)} \item{alpha}{Significance level} \item{side}{Type of interval, either "both", "upper", or "lower". Default is "both".} \item{verbose}{Verbosity switch - print the p-value and confidence interval endpoint at each step? (Default FALSE)} } \value{ a confidence interval (vector) } \description{ Invert the permutation test to get a \eqn{1-\alpha} confidence interval for the Pearson correlation between residuals and treatment }
/ModelMatch/man/permu_CI_pearson.Rd
no_license
Sandy4321/ModelMatch
R
false
false
977
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/permute.R \name{permu_CI_pearson} \alias{permu_CI_pearson} \title{Confidence interval for the correlation between residuals and treatment} \usage{ permu_CI_pearson(prediction, response, treatment, iters, alpha = 0.05, side = "both", verbosity = FALSE) } \arguments{ \item{prediction}{Vector of predicted outcomes} \item{response}{Vector of responses} \item{treatment}{Vector of treatments} \item{iters}{The number of Monte Carlo iterations (default 1000)} \item{alpha}{Significance level} \item{side}{Type of interval, either "both", "upper", or "lower". Default is "both".} \item{verbose}{Verbosity switch - print the p-value and confidence interval endpoint at each step? (Default FALSE)} } \value{ a confidence interval (vector) } \description{ Invert the permutation test to get a \eqn{1-\alpha} confidence interval for the Pearson correlation between residuals and treatment }
# Wisconsin 2016 Fraud Analysis # George Elliott Mmrris | TheCrosstab.com # Please give credit where credit is due. # *NOTE: You should run the "County Maps.R" file in the root directory to gather all the data and dependencies. ############### # Regressions # ############### sample_data <- Difference %>% filter(abbr_state %in% c("wi")) sample_data$paperballot <- 0 paper_ballot_counties <- c("ashland", "bayfield","brown" ,"columbia" ,"dane" ,"douglas","door" ,"fond du lac","green" ,"kenosha", "la crosse","lincoln","milwaukee","ozaukee" ,"portage","rock" ,"sauk","st croix " ,"washington" ,"waukasha" ,"winnebago" ,"wood") sample_data[sample_data$county.name %in% paper_ballot_counties,]$paperballot <- 1 attach(sample_data) model <- lm(value ~ percent_white + BachelorsPlus + paperballot) summary(model) detach(sample_data) ############# # Table ############# library(knitr) # format outputTable <- data.frame("County" =sample_data$county.name, "Clinton" =round(sample_data$Clinton2Party, 2), "Obama" =round(sample_data$Obama2Party,2), "Change" = round(sample_data$value, 2), "Paper Ballot" =sample_data$paperballot, "Percent White" = sample_data$percent_white, "Percent College+" = sample_data$BachelorsPlus) mean(outputTable[outputTable$Paper.Ballot == 1,]$Change) mean(outputTable[outputTable$Paper.Ballot == 0,]$Change) mean(outputTable[outputTable$Paper.Ballot == 1,]$Percent.White) mean(outputTable[outputTable$Paper.Ballot == 0,]$Percent.White) mean(outputTable[outputTable$Paper.Ballot == 1,]$Percent.College.) mean(outputTable[outputTable$Paper.Ballot == 0,]$Percent.College.) kable(outputTable, row.names = FALSE) ############# #Graphs ############# #paper sample_data1 <- sample_data %>% filter(paperballot == 1) gg1<- county_choropleth(sample_data1,num_colors = 1, title = "Clinton's Vote Share vs Obama's - \nPaper Bllot Counties", state_zoom = "wisconsin")+ scale_fill_gradient2(high = "blue", low = "red", na.value = "#EAECEE", breaks = pretty(sample_data1$value, n = 10),name = "Clinton - Obama %") + theme(plot.title = element_text(face = "bold",hjust = .5, size = 20), legend.position = "bottom") grid.newpage() footnote <- "By @gelliottmorris | thecrosstab.com | elliott@thecrosstab.com" g <- arrangeGrob(gg1, right = textGrob(footnote, x = 0, rot = 90, hjust = .8, vjust=1.3, gp = gpar(fontface = "italic", fontsize = 12))) grid.draw(g) print(g) dev.copy(png,"ClintonWIPaper.png",width = 10, height = 8, unit = "in", res = 200) dev.off() #machine sample_data1 <- sample_data %>% filter(paperballot == 0) gg2<- county_choropleth(sample_data1,num_colors = 1, title = "Clinton's Vote Share vs Obama's - \nMachine Ballot Counties", state_zoom = "wisconsin")+ scale_fill_gradient2(high = "blue", low = "red", na.value = "#EAECEE", breaks = pretty(sample_data1$value, n = 10),name = "Clinton - Obama %") + theme(plot.title = element_text(face = "bold",hjust = .5, size = 20), legend.position = "bottom") grid.newpage() footnote <- "By @gelliottmorris | thecrosstab.com | elliott@thecrosstab.com" g <- arrangeGrob(gg2, right = textGrob(footnote, x = 0, rot = 90, hjust = .8, vjust=1.3, gp = gpar(fontface = "italic", fontsize = 12))) grid.draw(g) print(g) dev.copy(png,"ClintonWIMachine.png",width = 10, height = 8, unit = "in", res = 200) dev.off() #uneducated sample_data2 <- sample_data %>% filter(BachelorsPlus > mean(BachelorsPlus)) gg3<- county_choropleth(sample_data2,num_colors = 1, title = "Clinton's Vote Share vs Obama's - \nCounties With Above\nAverage Educational Attainment", state_zoom = "wisconsin")+ scale_fill_gradient2(high = "blue", low = "red", na.value = "#EAECEE", breaks = pretty(sample_data2$value, n = 10),name = "Clinton - Obama %") + theme(plot.title = element_text(face = "bold",hjust = .5, size = 20), legend.position = "bottom") grid.newpage() footnote <- "By @gelliottmorris | thecrosstab.com | elliott@thecrosstab.com" g <- arrangeGrob(gg3, right = textGrob(footnote, x = 0, rot = 90, hjust = .8, vjust=1.3, gp = gpar(fontface = "italic", fontsize = 12))) grid.draw(g) print(g) dev.copy(png,"ClintonWIUrban.png",width = 10, height = 8, unit = "in", res = 200) dev.off() ## printy grid.newpage() g <- arrangeGrob(gg1, gg3, ncol = 2) grid.draw(g) dev.copy(png,"ClintonWinCorr.png",width = 14, height = 8, unit = "in", res = 200) dev.off()
/WI_Fraud_Analysis.R
no_license
markpanny/Election-Results-Analysis-2016
R
false
false
4,793
r
# Wisconsin 2016 Fraud Analysis # George Elliott Mmrris | TheCrosstab.com # Please give credit where credit is due. # *NOTE: You should run the "County Maps.R" file in the root directory to gather all the data and dependencies. ############### # Regressions # ############### sample_data <- Difference %>% filter(abbr_state %in% c("wi")) sample_data$paperballot <- 0 paper_ballot_counties <- c("ashland", "bayfield","brown" ,"columbia" ,"dane" ,"douglas","door" ,"fond du lac","green" ,"kenosha", "la crosse","lincoln","milwaukee","ozaukee" ,"portage","rock" ,"sauk","st croix " ,"washington" ,"waukasha" ,"winnebago" ,"wood") sample_data[sample_data$county.name %in% paper_ballot_counties,]$paperballot <- 1 attach(sample_data) model <- lm(value ~ percent_white + BachelorsPlus + paperballot) summary(model) detach(sample_data) ############# # Table ############# library(knitr) # format outputTable <- data.frame("County" =sample_data$county.name, "Clinton" =round(sample_data$Clinton2Party, 2), "Obama" =round(sample_data$Obama2Party,2), "Change" = round(sample_data$value, 2), "Paper Ballot" =sample_data$paperballot, "Percent White" = sample_data$percent_white, "Percent College+" = sample_data$BachelorsPlus) mean(outputTable[outputTable$Paper.Ballot == 1,]$Change) mean(outputTable[outputTable$Paper.Ballot == 0,]$Change) mean(outputTable[outputTable$Paper.Ballot == 1,]$Percent.White) mean(outputTable[outputTable$Paper.Ballot == 0,]$Percent.White) mean(outputTable[outputTable$Paper.Ballot == 1,]$Percent.College.) mean(outputTable[outputTable$Paper.Ballot == 0,]$Percent.College.) kable(outputTable, row.names = FALSE) ############# #Graphs ############# #paper sample_data1 <- sample_data %>% filter(paperballot == 1) gg1<- county_choropleth(sample_data1,num_colors = 1, title = "Clinton's Vote Share vs Obama's - \nPaper Bllot Counties", state_zoom = "wisconsin")+ scale_fill_gradient2(high = "blue", low = "red", na.value = "#EAECEE", breaks = pretty(sample_data1$value, n = 10),name = "Clinton - Obama %") + theme(plot.title = element_text(face = "bold",hjust = .5, size = 20), legend.position = "bottom") grid.newpage() footnote <- "By @gelliottmorris | thecrosstab.com | elliott@thecrosstab.com" g <- arrangeGrob(gg1, right = textGrob(footnote, x = 0, rot = 90, hjust = .8, vjust=1.3, gp = gpar(fontface = "italic", fontsize = 12))) grid.draw(g) print(g) dev.copy(png,"ClintonWIPaper.png",width = 10, height = 8, unit = "in", res = 200) dev.off() #machine sample_data1 <- sample_data %>% filter(paperballot == 0) gg2<- county_choropleth(sample_data1,num_colors = 1, title = "Clinton's Vote Share vs Obama's - \nMachine Ballot Counties", state_zoom = "wisconsin")+ scale_fill_gradient2(high = "blue", low = "red", na.value = "#EAECEE", breaks = pretty(sample_data1$value, n = 10),name = "Clinton - Obama %") + theme(plot.title = element_text(face = "bold",hjust = .5, size = 20), legend.position = "bottom") grid.newpage() footnote <- "By @gelliottmorris | thecrosstab.com | elliott@thecrosstab.com" g <- arrangeGrob(gg2, right = textGrob(footnote, x = 0, rot = 90, hjust = .8, vjust=1.3, gp = gpar(fontface = "italic", fontsize = 12))) grid.draw(g) print(g) dev.copy(png,"ClintonWIMachine.png",width = 10, height = 8, unit = "in", res = 200) dev.off() #uneducated sample_data2 <- sample_data %>% filter(BachelorsPlus > mean(BachelorsPlus)) gg3<- county_choropleth(sample_data2,num_colors = 1, title = "Clinton's Vote Share vs Obama's - \nCounties With Above\nAverage Educational Attainment", state_zoom = "wisconsin")+ scale_fill_gradient2(high = "blue", low = "red", na.value = "#EAECEE", breaks = pretty(sample_data2$value, n = 10),name = "Clinton - Obama %") + theme(plot.title = element_text(face = "bold",hjust = .5, size = 20), legend.position = "bottom") grid.newpage() footnote <- "By @gelliottmorris | thecrosstab.com | elliott@thecrosstab.com" g <- arrangeGrob(gg3, right = textGrob(footnote, x = 0, rot = 90, hjust = .8, vjust=1.3, gp = gpar(fontface = "italic", fontsize = 12))) grid.draw(g) print(g) dev.copy(png,"ClintonWIUrban.png",width = 10, height = 8, unit = "in", res = 200) dev.off() ## printy grid.newpage() g <- arrangeGrob(gg1, gg3, ncol = 2) grid.draw(g) dev.copy(png,"ClintonWinCorr.png",width = 14, height = 8, unit = "in", res = 200) dev.off()
library(shiny) install.packages("shinythemes") library(shinythemes) ui <- navbarPage("App Title", theme = shinytheme("united"), tabPanel("Plot"), navbarMenu("More", tabPanel("summary"), "----------", "Section header", tabPanel("Table")) ) server <- function(input, output, session) { } shinyApp(ui, server)
/s21~30/s_test26_menu.R
no_license
bjh1646/R_data
R
false
false
479
r
library(shiny) install.packages("shinythemes") library(shinythemes) ui <- navbarPage("App Title", theme = shinytheme("united"), tabPanel("Plot"), navbarMenu("More", tabPanel("summary"), "----------", "Section header", tabPanel("Table")) ) server <- function(input, output, session) { } shinyApp(ui, server)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/scenario_QALYloss.R \name{scenario_QALYloss} \alias{scenario_QALYloss} \title{scenario_QALYloss} \usage{ scenario_QALYloss(prop_avoided, endpoint, cohort) } \arguments{ \item{prop_avoided}{proportion LTBI cured via screening} \item{endpoint}{exit uk or death time horizon} \item{cohort}{individual level data} } \value{ list \itemize{ \item statusquo_mortality \item statusquo_morbidity \item screened_mortality \item screened_morbidity \item statusquo_mort_pp \item statusquo_morb_pp \item screened_mort_pp \item screened_morb_pp } } \description{ Splits output also into due to morbidity and mortality. } \examples{ }
/man/scenario_QALYloss.Rd
no_license
n8thangreen/ltbiScreenLite
R
false
true
711
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/scenario_QALYloss.R \name{scenario_QALYloss} \alias{scenario_QALYloss} \title{scenario_QALYloss} \usage{ scenario_QALYloss(prop_avoided, endpoint, cohort) } \arguments{ \item{prop_avoided}{proportion LTBI cured via screening} \item{endpoint}{exit uk or death time horizon} \item{cohort}{individual level data} } \value{ list \itemize{ \item statusquo_mortality \item statusquo_morbidity \item screened_mortality \item screened_morbidity \item statusquo_mort_pp \item statusquo_morb_pp \item screened_mort_pp \item screened_morb_pp } } \description{ Splits output also into due to morbidity and mortality. } \examples{ }
library(boot) source('Estimators.R') source('utils.R') source('partial_LTMLE.R') source('Magic_estimator.R') MAGIC_new_bootstrap_LTMLE <- function(D, Q_hat, V_hat, gamma, evaluation_action_matrix, force_PD=T){ n <- dim(D)[1] horizon <- dim(D)[2] # # Get g^(j)s n_ids <- 10 alphas <- c(rep(0, n_ids/2), seq(0, 1, length.out = n_ids/2)) lambdas <- c(rev(seq(0, 5e-4, length.out = n_ids/2)), rep(0, n_ids/2)) js <- c(seq(1, horizon, length.out = n_ids/2), rep(horizon, n_ids/2)) R1 <- min(horizon * 8, 40) R2 <- min(horizon * 4, 20) g_js <- sapply(1:n_ids, function(j) partial_LTMLE_estimator(D, Q_hat=Q_hat, V_hat=V_hat, gamma=gamma, evaluation_action_matrix = evaluation_action_matrix, alpha=alphas[j], j=js[j], lambda=lambdas[j])$estimate) # # Get bias by bootstrapping g^(horizon) bootstrap_CI <- quantile(boot(data=D, statistic=function(data, indices) partial_LTMLE_estimator(D[indices, , ], Q_hat=Q_hat, V_hat=V_hat, gamma=gamma, evaluation_action_matrix = evaluation_action_matrix, alpha=1, j=horizon)$estimate, R = R1)$t, probs = c(0.1 / 2, 1 - 0.1 / 2)) b_n <- sapply(g_js, Vectorize(function(g_j) distance_to_interval(bootstrap_CI, g_j)) ) # Get covariance matrix by bootstrapping all g_js X <- boot(data=D, statistic=function(data, indices) sapply(1:n_ids, function(j) partial_LTMLE_estimator(D[indices, , ], Q_hat=Q_hat, V_hat=V_hat, gamma=gamma, evaluation_action_matrix = evaluation_action_matrix, alpha=alphas[j], j=js[j], lambda=lambdas[j])$estimate), R = R2)$t Omega_n <- t((X - rep(1, R2) %*% t(apply(X, 2, mean)))) %*% (X - rep(1, R2) %*% t(apply(X, 2, mean))) / R2 # Define and solve QP Dmat <- Omega_n + b_n %*% t(b_n) if(force_PD) Dmat <- Matrix::nearPD(Dmat, eig.tol=1e-10)$mat Amat <- t(rbind(rep(1, n_ids), diag(n_ids)) ) dvec <- rep(0, n_ids) b0 <- c(1, rep(0, n_ids)) x_star <- solve.QP(Dmat=Dmat, dvec=dvec, Amat=Amat, bvec=b0, meq=1)$solution # Compute the MAGIC estimate as the weighted sum of the g^(j)'s, that is x_star^\top b_n[2:horizon] estimate <- t(x_star) %*% g_js # Output list(estimate=estimate, x_star=x_star, g_js=g_js, b_n=b_n, Omega_n=Omega_n, bootstrap_CI=bootstrap_CI) } # # MAGIC full library debugging experiments ------------------------------------- # source('MDP_modelWin.R') # horizon <- 5; gamma <- 1; n_states <- 3; n_actions <- 2 # V0_and_Q0 <- compute_true_V_and_Q(state_transition_matrix, # transition_based_rewards, # evaluation_action_matrix, horizon, gamma = gamma) # V0 <- V0_and_Q0$V0; Q0 <- V0_and_Q0$Q0 # Q_hat <- array(dim=dim(Q0)); V_hat <- array(dim=dim(V0)) # Q_hat <- Q0; V_hat <- V0 # b <- 5e-2 * rnorm(1) # Delta_t <- 0 # n <- 200; gamma <- 1 # # cat(detectCores(), 'cores detected\n') # cl <- makeCluster(getOption("cl.cores", detectCores()-1), outfile = '') # registerDoParallel(cl) # x_stars <- foreach(i=1:63, .combine = rbind, # .packages = c('boot', 'quadprog'), # .verbose = T, .inorder = T) %dopar% { # D <- generate_discrete_MDP_dataset(n, 1, state_transition_matrix, # behavior_action_matrix, # transition_based_rewards, # horizon) # MAGIC_new_bootstrap_LTMLE(D, Q_hat, V_hat, gamma, evaluation_action_matrix)$x_star # } # stopCluster(cl) # # mean_x_star <- apply(x_stars, 2, mean) # x_star_plot <-ggplot(data=data.frame(id=1:10, mean_x_star=mean_x_star), aes(x=id, y=mean_x_star)) + # geom_bar(stat="identity") # # library(gridExtra) # grid.arrange(MSE_plot, x_star_plot, nrow=2)
/R/Magic_full_bootstrap.R
no_license
aurelienbibaut2/LTMLE_OPE
R
false
false
4,480
r
library(boot) source('Estimators.R') source('utils.R') source('partial_LTMLE.R') source('Magic_estimator.R') MAGIC_new_bootstrap_LTMLE <- function(D, Q_hat, V_hat, gamma, evaluation_action_matrix, force_PD=T){ n <- dim(D)[1] horizon <- dim(D)[2] # # Get g^(j)s n_ids <- 10 alphas <- c(rep(0, n_ids/2), seq(0, 1, length.out = n_ids/2)) lambdas <- c(rev(seq(0, 5e-4, length.out = n_ids/2)), rep(0, n_ids/2)) js <- c(seq(1, horizon, length.out = n_ids/2), rep(horizon, n_ids/2)) R1 <- min(horizon * 8, 40) R2 <- min(horizon * 4, 20) g_js <- sapply(1:n_ids, function(j) partial_LTMLE_estimator(D, Q_hat=Q_hat, V_hat=V_hat, gamma=gamma, evaluation_action_matrix = evaluation_action_matrix, alpha=alphas[j], j=js[j], lambda=lambdas[j])$estimate) # # Get bias by bootstrapping g^(horizon) bootstrap_CI <- quantile(boot(data=D, statistic=function(data, indices) partial_LTMLE_estimator(D[indices, , ], Q_hat=Q_hat, V_hat=V_hat, gamma=gamma, evaluation_action_matrix = evaluation_action_matrix, alpha=1, j=horizon)$estimate, R = R1)$t, probs = c(0.1 / 2, 1 - 0.1 / 2)) b_n <- sapply(g_js, Vectorize(function(g_j) distance_to_interval(bootstrap_CI, g_j)) ) # Get covariance matrix by bootstrapping all g_js X <- boot(data=D, statistic=function(data, indices) sapply(1:n_ids, function(j) partial_LTMLE_estimator(D[indices, , ], Q_hat=Q_hat, V_hat=V_hat, gamma=gamma, evaluation_action_matrix = evaluation_action_matrix, alpha=alphas[j], j=js[j], lambda=lambdas[j])$estimate), R = R2)$t Omega_n <- t((X - rep(1, R2) %*% t(apply(X, 2, mean)))) %*% (X - rep(1, R2) %*% t(apply(X, 2, mean))) / R2 # Define and solve QP Dmat <- Omega_n + b_n %*% t(b_n) if(force_PD) Dmat <- Matrix::nearPD(Dmat, eig.tol=1e-10)$mat Amat <- t(rbind(rep(1, n_ids), diag(n_ids)) ) dvec <- rep(0, n_ids) b0 <- c(1, rep(0, n_ids)) x_star <- solve.QP(Dmat=Dmat, dvec=dvec, Amat=Amat, bvec=b0, meq=1)$solution # Compute the MAGIC estimate as the weighted sum of the g^(j)'s, that is x_star^\top b_n[2:horizon] estimate <- t(x_star) %*% g_js # Output list(estimate=estimate, x_star=x_star, g_js=g_js, b_n=b_n, Omega_n=Omega_n, bootstrap_CI=bootstrap_CI) } # # MAGIC full library debugging experiments ------------------------------------- # source('MDP_modelWin.R') # horizon <- 5; gamma <- 1; n_states <- 3; n_actions <- 2 # V0_and_Q0 <- compute_true_V_and_Q(state_transition_matrix, # transition_based_rewards, # evaluation_action_matrix, horizon, gamma = gamma) # V0 <- V0_and_Q0$V0; Q0 <- V0_and_Q0$Q0 # Q_hat <- array(dim=dim(Q0)); V_hat <- array(dim=dim(V0)) # Q_hat <- Q0; V_hat <- V0 # b <- 5e-2 * rnorm(1) # Delta_t <- 0 # n <- 200; gamma <- 1 # # cat(detectCores(), 'cores detected\n') # cl <- makeCluster(getOption("cl.cores", detectCores()-1), outfile = '') # registerDoParallel(cl) # x_stars <- foreach(i=1:63, .combine = rbind, # .packages = c('boot', 'quadprog'), # .verbose = T, .inorder = T) %dopar% { # D <- generate_discrete_MDP_dataset(n, 1, state_transition_matrix, # behavior_action_matrix, # transition_based_rewards, # horizon) # MAGIC_new_bootstrap_LTMLE(D, Q_hat, V_hat, gamma, evaluation_action_matrix)$x_star # } # stopCluster(cl) # # mean_x_star <- apply(x_stars, 2, mean) # x_star_plot <-ggplot(data=data.frame(id=1:10, mean_x_star=mean_x_star), aes(x=id, y=mean_x_star)) + # geom_bar(stat="identity") # # library(gridExtra) # grid.arrange(MSE_plot, x_star_plot, nrow=2)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/load_genelist.r \name{load_genelist} \alias{load_genelist} \title{load_genelist} \usage{ load_genelist(list_path, lookuptable, speciesWanted = "human") } \arguments{ \item{list_path}{Folder where it is stored} \item{lookuptable}{Dataframe containing HGNC.symbol and MGI.symbol as columns} \item{speciesWanted}{Either 'human' or 'mouse'} } \value{ res Results } \description{ \code{load_genelist} Loads a gene list from a file } \details{ Expects the first line to be descriptive } \examples{ # list_path = sprintf("\%s/\%s.txt",path,listN) # geneListHGNC = load.genelist(list_path,orthologs,speciesWanted="human") }
/man/load_genelist.Rd
no_license
NathanSkene/DendriticEnrichmentPackage
R
false
true
696
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/load_genelist.r \name{load_genelist} \alias{load_genelist} \title{load_genelist} \usage{ load_genelist(list_path, lookuptable, speciesWanted = "human") } \arguments{ \item{list_path}{Folder where it is stored} \item{lookuptable}{Dataframe containing HGNC.symbol and MGI.symbol as columns} \item{speciesWanted}{Either 'human' or 'mouse'} } \value{ res Results } \description{ \code{load_genelist} Loads a gene list from a file } \details{ Expects the first line to be descriptive } \examples{ # list_path = sprintf("\%s/\%s.txt",path,listN) # geneListHGNC = load.genelist(list_path,orthologs,speciesWanted="human") }
#install.packages("httr", dependencies = TRUE) #install.packages("xml2", dependencies = TRUE) #install.packages("urltools", dependencies = TRUE) #install.packages("jsonlite", dependencies = TRUE) #install.packages("anytime", dependencies = TRUE) library(httr) library(xml2) library(urltools) library(jsonlite) library(anytime) create_so_req_url <- function (filter_text, api_key, query, pagesize, page) { #Search for items matching the query string from StackOverflow api_url = 'https://api.stackexchange.com/2.2/search/advanced?order=desc&sort=activity&q=' api_url = paste(api_url, query, sep = '', collapse = '') api_url = paste(api_url, '&filter=', sep = '', collapse = '') api_url = paste(api_url, filter_text, sep = '', collapse = '') api_url = paste(api_url, '&site=stackoverflow',sep = '', collapse = '') api_url = paste(api_url, '&key=', sep = '', collapse = '') api_url = paste(api_url, api_key, sep = '', collapse = '') api_url = paste(api_url, '&pagesize=', sep = '', collapse = '') api_url = paste(api_url, pagesize, sep = '', collapse = '') api_url = paste(api_url, '&page=', sep = '', collapse = '') api_url = paste(api_url, page, sep = '', collapse = '') } get_stackoverflow_data = function (query_string, pagesize) { if (is.null(so_api_key) || so_api_key == "") { print("WARN! so_api_key variable not set") } #get total api_url_total <- create_so_req_url("total", so_api_key, query_string, pagesize, 1) sample_total <- GET(URLencode(api_url_total)) content_total <- content(sample_total) #get data filter_text = "withbody" api_url <- create_so_req_url(filter_text, so_api_key, query_string, pagesize, 1) #Prepare the url and fetch the data api_url = URLencode(api_url) sample2 = GET(api_url) my_data = content(sample2) return_data_frame = data.frame() has_more_pages = my_data$has_more page_number = 1 tag_number = 0 #while (length(my_data$items) > 0) { repeat { print(paste("page number: ", page_number, " items: ", page_number*pagesize, " out of ", content_total$total, sep = '', collapse = '')) for (outerloop in 1:(length(my_data$items))) { for (tagloop in 1: (length(my_data$items[[outerloop]]$tags))) { if (tag_number == 0) { tags = my_data$items[[outerloop]]$tags[[tagloop]] tag_number = 1 } else { tags = paste(tags, my_data$items[[outerloop]]$tags[[tagloop]], sep = ';', collapse = '') tag_number = tag_number + 1 } } date_cr = anydate(my_data$items[[outerloop]]$creation_date) date_la = anydate(my_data$items[[outerloop]]$last_activity_date) temp <- data.frame( AuthorId = ifelse(is.null(my_data$items[[outerloop]]$owner$user_id),0,my_data$items[[outerloop]]$owner$user_id), Q_id = ifelse(is.null(my_data$items[[outerloop]]$question_id), '', my_data$items[[outerloop]]$question_id), Title = ifelse(is.null(my_data$items[[outerloop]]$title),'',my_data$items[[outerloop]]$title), Abstract = ifelse(is.null(my_data$items[[outerloop]]$body),'',my_data$items[[outerloop]]$body), Views = ifelse(is.null(my_data$items[[outerloop]]$view_count),0,my_data$items[[outerloop]]$view_count), Answers = ifelse(is.null(my_data$items[[outerloop]]$answer_count),0,my_data$items[[outerloop]]$answer_count), Cites = ifelse(is.null(my_data$items[[outerloop]]$score),0,my_data$items[[outerloop]]$score), Tags_n = tag_number, Tags = ifelse(is.null(tags),'',tags), Date = ifelse(is.null(date_cr), 0, as.character(date_cr)), CR_Date = ifelse(is.null(date_cr), 0, as.character(date_cr)), LA_Date = ifelse(is.null(date_la), 0, as.character(date_la)), stringsAsFactors=F) tag_number = 0 tags = NULL return_data_frame <- rbind(return_data_frame, temp) } if (!has_more_pages) break else { page_number = page_number + 1 api_url <- create_so_req_url(filter_text, so_api_key, query_string, pagesize, page_number) api_url = URLencode(api_url) sample2 = GET(api_url) my_data = content(sample2) has_more_pages = my_data$has_more } } return_data_frame }
/FunctionsStackOverflowApi.R
no_license
ppeerttu/TrendMining
R
false
false
4,317
r
#install.packages("httr", dependencies = TRUE) #install.packages("xml2", dependencies = TRUE) #install.packages("urltools", dependencies = TRUE) #install.packages("jsonlite", dependencies = TRUE) #install.packages("anytime", dependencies = TRUE) library(httr) library(xml2) library(urltools) library(jsonlite) library(anytime) create_so_req_url <- function (filter_text, api_key, query, pagesize, page) { #Search for items matching the query string from StackOverflow api_url = 'https://api.stackexchange.com/2.2/search/advanced?order=desc&sort=activity&q=' api_url = paste(api_url, query, sep = '', collapse = '') api_url = paste(api_url, '&filter=', sep = '', collapse = '') api_url = paste(api_url, filter_text, sep = '', collapse = '') api_url = paste(api_url, '&site=stackoverflow',sep = '', collapse = '') api_url = paste(api_url, '&key=', sep = '', collapse = '') api_url = paste(api_url, api_key, sep = '', collapse = '') api_url = paste(api_url, '&pagesize=', sep = '', collapse = '') api_url = paste(api_url, pagesize, sep = '', collapse = '') api_url = paste(api_url, '&page=', sep = '', collapse = '') api_url = paste(api_url, page, sep = '', collapse = '') } get_stackoverflow_data = function (query_string, pagesize) { if (is.null(so_api_key) || so_api_key == "") { print("WARN! so_api_key variable not set") } #get total api_url_total <- create_so_req_url("total", so_api_key, query_string, pagesize, 1) sample_total <- GET(URLencode(api_url_total)) content_total <- content(sample_total) #get data filter_text = "withbody" api_url <- create_so_req_url(filter_text, so_api_key, query_string, pagesize, 1) #Prepare the url and fetch the data api_url = URLencode(api_url) sample2 = GET(api_url) my_data = content(sample2) return_data_frame = data.frame() has_more_pages = my_data$has_more page_number = 1 tag_number = 0 #while (length(my_data$items) > 0) { repeat { print(paste("page number: ", page_number, " items: ", page_number*pagesize, " out of ", content_total$total, sep = '', collapse = '')) for (outerloop in 1:(length(my_data$items))) { for (tagloop in 1: (length(my_data$items[[outerloop]]$tags))) { if (tag_number == 0) { tags = my_data$items[[outerloop]]$tags[[tagloop]] tag_number = 1 } else { tags = paste(tags, my_data$items[[outerloop]]$tags[[tagloop]], sep = ';', collapse = '') tag_number = tag_number + 1 } } date_cr = anydate(my_data$items[[outerloop]]$creation_date) date_la = anydate(my_data$items[[outerloop]]$last_activity_date) temp <- data.frame( AuthorId = ifelse(is.null(my_data$items[[outerloop]]$owner$user_id),0,my_data$items[[outerloop]]$owner$user_id), Q_id = ifelse(is.null(my_data$items[[outerloop]]$question_id), '', my_data$items[[outerloop]]$question_id), Title = ifelse(is.null(my_data$items[[outerloop]]$title),'',my_data$items[[outerloop]]$title), Abstract = ifelse(is.null(my_data$items[[outerloop]]$body),'',my_data$items[[outerloop]]$body), Views = ifelse(is.null(my_data$items[[outerloop]]$view_count),0,my_data$items[[outerloop]]$view_count), Answers = ifelse(is.null(my_data$items[[outerloop]]$answer_count),0,my_data$items[[outerloop]]$answer_count), Cites = ifelse(is.null(my_data$items[[outerloop]]$score),0,my_data$items[[outerloop]]$score), Tags_n = tag_number, Tags = ifelse(is.null(tags),'',tags), Date = ifelse(is.null(date_cr), 0, as.character(date_cr)), CR_Date = ifelse(is.null(date_cr), 0, as.character(date_cr)), LA_Date = ifelse(is.null(date_la), 0, as.character(date_la)), stringsAsFactors=F) tag_number = 0 tags = NULL return_data_frame <- rbind(return_data_frame, temp) } if (!has_more_pages) break else { page_number = page_number + 1 api_url <- create_so_req_url(filter_text, so_api_key, query_string, pagesize, page_number) api_url = URLencode(api_url) sample2 = GET(api_url) my_data = content(sample2) has_more_pages = my_data$has_more } } return_data_frame }
#### Gui Russo e Gabi Campos ### Invalid Votes for Senator- Error in the Ballot ## Matching neighboring cities by presidential votes # October 22, 2018 # Clearing rm(list=ls()) # Loading the neighbors data #------------------------------------------------------ #setwd("/Users/robertaazzi1/Desktop/Nulo_Senador/") # Change HERE #data<-read.table("Data/BR_mun_neighbors.txt", encoding="latin1") # Neighboring Municipality Data pres2010<-get_votes(2010, "President", "Municipality") %>% mutate(COD_MUN_TSE=as.integer(COD_MUN_TSE)) # Electoral Data pres2010<-pres2010[, -which(colnames(pres2010) %in% c("DESCRICAO_CARGO", "DESCRICAO_ELEICAO", "CODIGO_CARGO", "SIGLA_UE"))] # Subsetting pres2010_round1<-pres2010[pres2010$NUM_TURNO==1,] # Por turno pres2010_round2<-pres2010[pres2010$NUM_TURNO==2,] pres2010_municipality1<-pres2010_round1[, c("QTDE_VOTOS")] # Subsetting head(pres2010_municipality1); nrow(pres2010_municipality1) pres2010_mun<-aggregate(pres2010_municipality1, by=list(pres2010_round1$COD_MUN_IBGE), FUN=sum) head(pres2010_mun); nrow(pres2010_mun) colnames(pres2010_mun)<-c("COD_MUN_IBGE", "QTDE_VOTOS") # Total number of votes # Aécio pres2010_aecio<-as.data.frame( # Votes for Aecio pres2010_round1[pres2010_round1 $NUMERO_CANDIDATO==45,c("COD_MUN_TSE", "NOME_MUNICIPIO", "UF", "NOME_MACRO", "COD_MUN_IBGE", "QTDE_VOTOS")]) head(pres2010_aecio); nrow(pres2010_aecio); colnames(pres2010_aecio)[6]<-"votos_aecio" # Dilma pres2010_dilma<-as.data.frame( # Votes for Dilma pres2010_round1[pres2010_round1 $NUMERO_CANDIDATO==13,c("COD_MUN_IBGE", "QTDE_VOTOS")]) head(pres2010_dilma); nrow(pres2010_dilma); colnames(pres2010_dilma)[2]<-"votos_dilma" pres2010_aecio[!pres2010_aecio$COD_MUN_IBGE %in% pres2010_dilma$COD_MUN_IBGE,] # In what municipality did Dilma not have votes? pres2010_dilma[nrow(pres2010_dilma)+1,]<-c(29297, 0) # Creating the extra municipality # Marina pres2010_marina<-as.data.frame( # Votes for Marina pres2010_round1[pres2010_round1$NUMERO_CANDIDATO==40,c("COD_MUN_IBGE", "QTDE_VOTOS")]) head(pres2010_marina); nrow(pres2010_marina); colnames(pres2010_marina)[2]<-"votos_marina" # Invalid votes #------------------------------------------------------- pres2010_inv<-as.data.frame( # Votes - Invalid pres2010_round1[pres2010_round1$NUMERO_CANDIDATO==95,c("COD_MUN_IBGE", "QTDE_VOTOS")]) head(pres2010_inv); nrow(pres2010_inv); colnames(pres2010_inv)[2]<-"votos_inv" # In what municipality there was not invalid votes? mis<-pres2010_aecio[!pres2010_aecio$COD_MUN_IBGE %in% pres2010_inv$COD_MUN_IBGE,] pres2010_inv<-rbind(pres2010_inv, data.frame(COD_MUN_IBGE=mis[,1], votos_inv=rep(0, nrow(mis)))) pres2010_inv2<-as.data.frame( # Votes - Invalid pres2010_round1[pres2010_round1$NUMERO_CANDIDATO==96,c("COD_MUN_IBGE", "QTDE_VOTOS")]) head(pres2010_inv2); nrow(pres2010_inv2); colnames(pres2010_inv2)[2]<-"votos_inv2" pres2010_inv<-merge(pres2010_inv, pres2010_inv2, by="COD_MUN_IBGE", all=T) pres2010_inv$votos_inv<-pres2010_inv$votos_inv+pres2010_inv$votos_inv2 pres2010_inv$votos_inv2<-NULL pres2010_inv$votos_inv[is.na(pres2010_inv$votos_inv)]<-0 # Downloading senator votes --------------------------------------------------------- senate2010<- get_elections(2010, "Senator", regional_aggregation = "Municipality") %>% mutate(COD_MUN_TSE=as.integer(COD_MUN_TSE)) # Electoral Data senate2010_municipality<-senate2010[, c("QTDE_VOTOS")] # Subsetting head(senate2010_municipality); nrow(senate2010_municipality) senate2010_mun<-aggregate(senate2010_municipality, by=list(senate2010$COD_MUN_IBGE), FUN=sum) head(senate2010_mun); nrow(senate2010_mun) colnames(senate2010_mun)<-c("COD_MUN_IBGE", "QTDE_VOTOS") # Total number of votes # PSDB senate2010_PSDB<-as.data.frame( # Votes for PSDB Senators senate2010[senate2010 $NUMERO_PARTIDO==45,c("COD_MUN_TSE", "UF","COD_MUN_IBGE", "QTDE_VOTOS")]) head(senate2010_PSDB); nrow(senate2010_PSDB); colnames(senate2010_PSDB)[4]<-"votos_PSDB_sen" # PT senate2010_PT<-as.data.frame( # Votes for PT Senators senate2010[senate2010 $NUMERO_PARTIDO==13,c("COD_MUN_TSE", "UF","COD_MUN_IBGE", "QTDE_VOTOS")]) head(senate2010_PT); nrow(senate2010_PT); colnames(senate2010_PT)[4]<-"votos_PT_sen" # PSB senate2010_PSB<-as.data.frame( # Votes for PSB Senators senate2010[senate2010 $NUMERO_PARTIDO==40,c("COD_MUN_TSE", "UF","COD_MUN_IBGE", "QTDE_VOTOS")]) head(senate2010_PSB); nrow(senate2010_PSB); colnames(senate2010_PSB)[4]<-"votos_PSB_sen" # Invalid votes for Senator senate2010_invalid<-get_elections(2010, "Senator", regional_aggregation = "Municipality", political_aggregation = "Consolidated") senate2010_invalid<-senate2010_invalid %>% select(COD_MUN_TSE, UF, QT_VOTOS_BRANCOS, QT_VOTOS_NULOS, QT_VOTOS_NOMINAIS) %>% group_by(COD_MUN_TSE, UF) %>% summarise_at(c("QT_VOTOS_BRANCOS", "QT_VOTOS_NULOS", "QT_VOTOS_NOMINAIS"), funs(sum)) %>% mutate(votos_inv_sen=(QT_VOTOS_BRANCOS+QT_VOTOS_NULOS)) %>% select(-QT_VOTOS_NULOS, -QT_VOTOS_BRANCOS) %>% rename(QT_VOTOS_sen=QT_VOTOS_NOMINAIS) # Downloading legislative federal votes ------------------------------------------- DF2010<- get_votes(2010, "Deputado Federal", regional_aggregation = "Municipality") %>% mutate(COD_MUN_TSE=as.integer(COD_MUN_TSE)) # Electoral Data DF2010_municipality<-DF2010[, c("QTDE_VOTOS")] # Subsetting head(DF2010_municipality); nrow(DF2010_municipality) DF2010_mun<-aggregate(DF2010_municipality, by=list(DF2010$COD_MUN_IBGE), FUN=sum) head(DF2010_mun); nrow(DF2010_mun) colnames(DF2010_mun)<-c("COD_MUN_IBGE", "QTDE_VOTOS") # Total number of votes # PSDB DF2010_PSDB<-as.data.frame( # Votes for PSDB Senators DF2010[DF2010 $NUMERO_CANDIDATO==45,c("COD_MUN_TSE", "UF","COD_MUN_IBGE", "QTDE_VOTOS")]) head(DF2010_PSDB); nrow(DF2010_PSDB); colnames(DF2010_PSDB)[4]<-"votos_PSDB_PLV" # PT DF2010_PT<-as.data.frame( # Votes for PT Senators DF2010[DF2010 $NUMERO_CANDIDATO==13,c("COD_MUN_TSE", "UF","COD_MUN_IBGE", "QTDE_VOTOS")]) head(DF2010_PT); nrow(DF2010_PT); colnames(DF2010_PT)[4]<-"votos_PT_PLV" # PSB DF2010_PSB<-as.data.frame( # Votes for PSB Senators DF2010[DF2010 $NUMERO_CANDIDATO==40,c("COD_MUN_TSE", "UF","COD_MUN_IBGE", "QTDE_VOTOS")]) head(DF2010_PSB); nrow(DF2010_PSB); colnames(DF2010_PSB)[4]<-"votos_PSB_PLV" # Invalid votes for Senator DF2010_invalid<-get_elections(2010, "Deputado Federal", regional_aggregation = "Municipality", political_aggregation = "Consolidated") DF2010_invalid<-DF2010_invalid %>% select(COD_MUN_TSE, UF, QT_VOTOS_NOMINAIS, QT_VOTOS_BRANCOS, QT_VOTOS_NULOS) %>% group_by(COD_MUN_TSE, UF) %>% summarise_at(c("QT_VOTOS_BRANCOS", "QT_VOTOS_NULOS","QT_VOTOS_NOMINAIS"), funs(sum)) %>% mutate(votos_inv_DF=(QT_VOTOS_BRANCOS+QT_VOTOS_NULOS)) %>% select(-QT_VOTOS_NULOS, -QT_VOTOS_BRANCOS) %>% rename(QT_VOTOS_DF=QT_VOTOS_NOMINAIS) rm(DF2010) # Now, let's merge the presidential votes datafiles #----------------------------- data_politics<-merge(pres2010_aecio, pres2010_dilma, by="COD_MUN_IBGE", all=T) %>% filter(UF!="ZZ") data_politics<-left_join(data_politics, pres2010_marina, by="COD_MUN_IBGE") data_politics<-left_join(data_politics, pres2010_inv, by="COD_MUN_IBGE") data_politics<-left_join(data_politics, pres2010_mun, by="COD_MUN_IBGE"); head(data_politics) data_politics$votos_aecio_pc<-100*data_politics$votos_aecio/data_politics$QTDE_VOTOS data_politics$votos_dilma_pc<-100*data_politics$votos_dilma/data_politics$QTDE_VOTOS # Percentages data_politics$votos_marina_pc<-100*data_politics$votos_marina/data_politics$QTDE_VOTOS data_politics$votos_inv_pc<-100*data_politics$votos_inv/data_politics$QTDE_VOTOS; head(data_politics) head(data_politics) # Now, let's merge the senator votes datafiles #----------------------------- data_politics<-left_join(data_politics, senate2010_PSDB, by=c("COD_MUN_IBGE", "COD_MUN_TSE", "UF") , all=T) data_politics<-left_join(data_politics, senate2010_PT, by=c("COD_MUN_IBGE", "COD_MUN_TSE", "UF") , all=T) data_politics<-left_join(data_politics, senate2010_PSB, by=c("COD_MUN_IBGE", "COD_MUN_TSE", "UF") , all=T) data_politics<-left_join(data_politics, senate2010_invalid, by=c("COD_MUN_TSE", "UF") , all=T) #sub NA per 0 data_politics$votos_PSDB_sen<-ifelse(is.na(data_politics$votos_PSDB_sen),0,data_politics$votos_PSDB_sen) data_politics$votos_PT_sen<-ifelse(is.na(data_politics$votos_PT_sen),0,data_politics$votos_PT_sen) data_politics$votos_PSB_sen<-ifelse(is.na(data_politics$votos_PSB_sen),0,data_politics$votos_PSB_sen) data_politics$votos_PSDB_sen_pc<-100*data_politics$votos_PSDB_sen/data_politics$QT_VOTOS_sen data_politics$votos_PT_sen_pc<-100*data_politics$votos_PT_sen/data_politics$QT_VOTOS_sen # Percentages data_politics$votos_PSB_sen_pc<-100*data_politics$votos_PSB_sen/data_politics$QT_VOTOS_sen data_politics$votos_inv_sen_pc<-100*data_politics$votos_inv_sen/data_politics$QT_VOTOS_sen; head(data_politics) # Now, let's merge the Federal Deputy votes datafiles #----------------------------- data_politics<-left_join(data_politics, DF2010_PSDB, by=c("COD_MUN_IBGE", "COD_MUN_TSE", "UF") , all=T) data_politics<-left_join(data_politics, DF2010_PT, by=c("COD_MUN_IBGE", "COD_MUN_TSE", "UF") , all=T) data_politics<-left_join(data_politics, DF2010_PSB, by=c("COD_MUN_IBGE", "COD_MUN_TSE", "UF") , all=T) data_politics<-left_join(data_politics, DF2010_invalid, by=c("COD_MUN_TSE", "UF") , all=T) #sub NA per 0 data_politics$votos_PSDB_PLV<-ifelse(is.na(data_politics$votos_PSDB_PLV),0,data_politics$votos_PSDB_PLV) data_politics$votos_PT_PLV<-ifelse(is.na(data_politics$votos_PT_PLV),0,data_politics$votos_PT_PLV) data_politics$votos_PSB_PLV<-ifelse(is.na(data_politics$votos_PSB_PLV),0,data_politics$votos_PSB_PLV) data_politics$votos_PSDB_PLV_pc<-100*data_politics$votos_PSDB_PLV/data_politics$QT_VOTOS_DF data_politics$votos_PT_PLV_pc<-100*data_politics$votos_PT_PLV/data_politics$QT_VOTOS_DF # Percentages data_politics$votos_PSB_PLV_pc<-100*data_politics$votos_PSB_PLV/data_politics$QT_VOTOS_DF data_politics$votos_inv_PLV_pc<-100*data_politics$votos_inv_DF/data_politics$QT_VOTOS_DF; head(data_politics) library(ggplot2) data_politics %>% select(COD_MUN_IBGE, votos_PT_PLV_pc, votos_PSDB_PLV_pc, votos_PSB_PLV_pc) %>% gather(COD_MUN_IBGE) %>% ggplot(aes(x = factor(COD_MUN_IBGE), y=value)) + geom_boxplot() #voto na legenda do PSDB tem mediana bem próxima a do PT e maior variância PLV_2010<-data_politics %>% mutate(regiao = substr(COD_MUN_IBGE,1,1)) %>% select(regiao, COD_MUN_IBGE, votos_PT_PLV_pc, votos_PSDB_PLV_pc, votos_PSB_PLV_pc) %>% gather(variavel, valor, 3:5) %>% ggplot(aes(x = regiao, y=valor)) + geom_boxplot(aes(fill=variavel)) + scale_y_continuous(limits = c(0,10)) PLV_2010
/PLV_2010.R
no_license
Gabiscampos/nulo_senador
R
false
false
10,689
r
#### Gui Russo e Gabi Campos ### Invalid Votes for Senator- Error in the Ballot ## Matching neighboring cities by presidential votes # October 22, 2018 # Clearing rm(list=ls()) # Loading the neighbors data #------------------------------------------------------ #setwd("/Users/robertaazzi1/Desktop/Nulo_Senador/") # Change HERE #data<-read.table("Data/BR_mun_neighbors.txt", encoding="latin1") # Neighboring Municipality Data pres2010<-get_votes(2010, "President", "Municipality") %>% mutate(COD_MUN_TSE=as.integer(COD_MUN_TSE)) # Electoral Data pres2010<-pres2010[, -which(colnames(pres2010) %in% c("DESCRICAO_CARGO", "DESCRICAO_ELEICAO", "CODIGO_CARGO", "SIGLA_UE"))] # Subsetting pres2010_round1<-pres2010[pres2010$NUM_TURNO==1,] # Por turno pres2010_round2<-pres2010[pres2010$NUM_TURNO==2,] pres2010_municipality1<-pres2010_round1[, c("QTDE_VOTOS")] # Subsetting head(pres2010_municipality1); nrow(pres2010_municipality1) pres2010_mun<-aggregate(pres2010_municipality1, by=list(pres2010_round1$COD_MUN_IBGE), FUN=sum) head(pres2010_mun); nrow(pres2010_mun) colnames(pres2010_mun)<-c("COD_MUN_IBGE", "QTDE_VOTOS") # Total number of votes # Aécio pres2010_aecio<-as.data.frame( # Votes for Aecio pres2010_round1[pres2010_round1 $NUMERO_CANDIDATO==45,c("COD_MUN_TSE", "NOME_MUNICIPIO", "UF", "NOME_MACRO", "COD_MUN_IBGE", "QTDE_VOTOS")]) head(pres2010_aecio); nrow(pres2010_aecio); colnames(pres2010_aecio)[6]<-"votos_aecio" # Dilma pres2010_dilma<-as.data.frame( # Votes for Dilma pres2010_round1[pres2010_round1 $NUMERO_CANDIDATO==13,c("COD_MUN_IBGE", "QTDE_VOTOS")]) head(pres2010_dilma); nrow(pres2010_dilma); colnames(pres2010_dilma)[2]<-"votos_dilma" pres2010_aecio[!pres2010_aecio$COD_MUN_IBGE %in% pres2010_dilma$COD_MUN_IBGE,] # In what municipality did Dilma not have votes? pres2010_dilma[nrow(pres2010_dilma)+1,]<-c(29297, 0) # Creating the extra municipality # Marina pres2010_marina<-as.data.frame( # Votes for Marina pres2010_round1[pres2010_round1$NUMERO_CANDIDATO==40,c("COD_MUN_IBGE", "QTDE_VOTOS")]) head(pres2010_marina); nrow(pres2010_marina); colnames(pres2010_marina)[2]<-"votos_marina" # Invalid votes #------------------------------------------------------- pres2010_inv<-as.data.frame( # Votes - Invalid pres2010_round1[pres2010_round1$NUMERO_CANDIDATO==95,c("COD_MUN_IBGE", "QTDE_VOTOS")]) head(pres2010_inv); nrow(pres2010_inv); colnames(pres2010_inv)[2]<-"votos_inv" # In what municipality there was not invalid votes? mis<-pres2010_aecio[!pres2010_aecio$COD_MUN_IBGE %in% pres2010_inv$COD_MUN_IBGE,] pres2010_inv<-rbind(pres2010_inv, data.frame(COD_MUN_IBGE=mis[,1], votos_inv=rep(0, nrow(mis)))) pres2010_inv2<-as.data.frame( # Votes - Invalid pres2010_round1[pres2010_round1$NUMERO_CANDIDATO==96,c("COD_MUN_IBGE", "QTDE_VOTOS")]) head(pres2010_inv2); nrow(pres2010_inv2); colnames(pres2010_inv2)[2]<-"votos_inv2" pres2010_inv<-merge(pres2010_inv, pres2010_inv2, by="COD_MUN_IBGE", all=T) pres2010_inv$votos_inv<-pres2010_inv$votos_inv+pres2010_inv$votos_inv2 pres2010_inv$votos_inv2<-NULL pres2010_inv$votos_inv[is.na(pres2010_inv$votos_inv)]<-0 # Downloading senator votes --------------------------------------------------------- senate2010<- get_elections(2010, "Senator", regional_aggregation = "Municipality") %>% mutate(COD_MUN_TSE=as.integer(COD_MUN_TSE)) # Electoral Data senate2010_municipality<-senate2010[, c("QTDE_VOTOS")] # Subsetting head(senate2010_municipality); nrow(senate2010_municipality) senate2010_mun<-aggregate(senate2010_municipality, by=list(senate2010$COD_MUN_IBGE), FUN=sum) head(senate2010_mun); nrow(senate2010_mun) colnames(senate2010_mun)<-c("COD_MUN_IBGE", "QTDE_VOTOS") # Total number of votes # PSDB senate2010_PSDB<-as.data.frame( # Votes for PSDB Senators senate2010[senate2010 $NUMERO_PARTIDO==45,c("COD_MUN_TSE", "UF","COD_MUN_IBGE", "QTDE_VOTOS")]) head(senate2010_PSDB); nrow(senate2010_PSDB); colnames(senate2010_PSDB)[4]<-"votos_PSDB_sen" # PT senate2010_PT<-as.data.frame( # Votes for PT Senators senate2010[senate2010 $NUMERO_PARTIDO==13,c("COD_MUN_TSE", "UF","COD_MUN_IBGE", "QTDE_VOTOS")]) head(senate2010_PT); nrow(senate2010_PT); colnames(senate2010_PT)[4]<-"votos_PT_sen" # PSB senate2010_PSB<-as.data.frame( # Votes for PSB Senators senate2010[senate2010 $NUMERO_PARTIDO==40,c("COD_MUN_TSE", "UF","COD_MUN_IBGE", "QTDE_VOTOS")]) head(senate2010_PSB); nrow(senate2010_PSB); colnames(senate2010_PSB)[4]<-"votos_PSB_sen" # Invalid votes for Senator senate2010_invalid<-get_elections(2010, "Senator", regional_aggregation = "Municipality", political_aggregation = "Consolidated") senate2010_invalid<-senate2010_invalid %>% select(COD_MUN_TSE, UF, QT_VOTOS_BRANCOS, QT_VOTOS_NULOS, QT_VOTOS_NOMINAIS) %>% group_by(COD_MUN_TSE, UF) %>% summarise_at(c("QT_VOTOS_BRANCOS", "QT_VOTOS_NULOS", "QT_VOTOS_NOMINAIS"), funs(sum)) %>% mutate(votos_inv_sen=(QT_VOTOS_BRANCOS+QT_VOTOS_NULOS)) %>% select(-QT_VOTOS_NULOS, -QT_VOTOS_BRANCOS) %>% rename(QT_VOTOS_sen=QT_VOTOS_NOMINAIS) # Downloading legislative federal votes ------------------------------------------- DF2010<- get_votes(2010, "Deputado Federal", regional_aggregation = "Municipality") %>% mutate(COD_MUN_TSE=as.integer(COD_MUN_TSE)) # Electoral Data DF2010_municipality<-DF2010[, c("QTDE_VOTOS")] # Subsetting head(DF2010_municipality); nrow(DF2010_municipality) DF2010_mun<-aggregate(DF2010_municipality, by=list(DF2010$COD_MUN_IBGE), FUN=sum) head(DF2010_mun); nrow(DF2010_mun) colnames(DF2010_mun)<-c("COD_MUN_IBGE", "QTDE_VOTOS") # Total number of votes # PSDB DF2010_PSDB<-as.data.frame( # Votes for PSDB Senators DF2010[DF2010 $NUMERO_CANDIDATO==45,c("COD_MUN_TSE", "UF","COD_MUN_IBGE", "QTDE_VOTOS")]) head(DF2010_PSDB); nrow(DF2010_PSDB); colnames(DF2010_PSDB)[4]<-"votos_PSDB_PLV" # PT DF2010_PT<-as.data.frame( # Votes for PT Senators DF2010[DF2010 $NUMERO_CANDIDATO==13,c("COD_MUN_TSE", "UF","COD_MUN_IBGE", "QTDE_VOTOS")]) head(DF2010_PT); nrow(DF2010_PT); colnames(DF2010_PT)[4]<-"votos_PT_PLV" # PSB DF2010_PSB<-as.data.frame( # Votes for PSB Senators DF2010[DF2010 $NUMERO_CANDIDATO==40,c("COD_MUN_TSE", "UF","COD_MUN_IBGE", "QTDE_VOTOS")]) head(DF2010_PSB); nrow(DF2010_PSB); colnames(DF2010_PSB)[4]<-"votos_PSB_PLV" # Invalid votes for Senator DF2010_invalid<-get_elections(2010, "Deputado Federal", regional_aggregation = "Municipality", political_aggregation = "Consolidated") DF2010_invalid<-DF2010_invalid %>% select(COD_MUN_TSE, UF, QT_VOTOS_NOMINAIS, QT_VOTOS_BRANCOS, QT_VOTOS_NULOS) %>% group_by(COD_MUN_TSE, UF) %>% summarise_at(c("QT_VOTOS_BRANCOS", "QT_VOTOS_NULOS","QT_VOTOS_NOMINAIS"), funs(sum)) %>% mutate(votos_inv_DF=(QT_VOTOS_BRANCOS+QT_VOTOS_NULOS)) %>% select(-QT_VOTOS_NULOS, -QT_VOTOS_BRANCOS) %>% rename(QT_VOTOS_DF=QT_VOTOS_NOMINAIS) rm(DF2010) # Now, let's merge the presidential votes datafiles #----------------------------- data_politics<-merge(pres2010_aecio, pres2010_dilma, by="COD_MUN_IBGE", all=T) %>% filter(UF!="ZZ") data_politics<-left_join(data_politics, pres2010_marina, by="COD_MUN_IBGE") data_politics<-left_join(data_politics, pres2010_inv, by="COD_MUN_IBGE") data_politics<-left_join(data_politics, pres2010_mun, by="COD_MUN_IBGE"); head(data_politics) data_politics$votos_aecio_pc<-100*data_politics$votos_aecio/data_politics$QTDE_VOTOS data_politics$votos_dilma_pc<-100*data_politics$votos_dilma/data_politics$QTDE_VOTOS # Percentages data_politics$votos_marina_pc<-100*data_politics$votos_marina/data_politics$QTDE_VOTOS data_politics$votos_inv_pc<-100*data_politics$votos_inv/data_politics$QTDE_VOTOS; head(data_politics) head(data_politics) # Now, let's merge the senator votes datafiles #----------------------------- data_politics<-left_join(data_politics, senate2010_PSDB, by=c("COD_MUN_IBGE", "COD_MUN_TSE", "UF") , all=T) data_politics<-left_join(data_politics, senate2010_PT, by=c("COD_MUN_IBGE", "COD_MUN_TSE", "UF") , all=T) data_politics<-left_join(data_politics, senate2010_PSB, by=c("COD_MUN_IBGE", "COD_MUN_TSE", "UF") , all=T) data_politics<-left_join(data_politics, senate2010_invalid, by=c("COD_MUN_TSE", "UF") , all=T) #sub NA per 0 data_politics$votos_PSDB_sen<-ifelse(is.na(data_politics$votos_PSDB_sen),0,data_politics$votos_PSDB_sen) data_politics$votos_PT_sen<-ifelse(is.na(data_politics$votos_PT_sen),0,data_politics$votos_PT_sen) data_politics$votos_PSB_sen<-ifelse(is.na(data_politics$votos_PSB_sen),0,data_politics$votos_PSB_sen) data_politics$votos_PSDB_sen_pc<-100*data_politics$votos_PSDB_sen/data_politics$QT_VOTOS_sen data_politics$votos_PT_sen_pc<-100*data_politics$votos_PT_sen/data_politics$QT_VOTOS_sen # Percentages data_politics$votos_PSB_sen_pc<-100*data_politics$votos_PSB_sen/data_politics$QT_VOTOS_sen data_politics$votos_inv_sen_pc<-100*data_politics$votos_inv_sen/data_politics$QT_VOTOS_sen; head(data_politics) # Now, let's merge the Federal Deputy votes datafiles #----------------------------- data_politics<-left_join(data_politics, DF2010_PSDB, by=c("COD_MUN_IBGE", "COD_MUN_TSE", "UF") , all=T) data_politics<-left_join(data_politics, DF2010_PT, by=c("COD_MUN_IBGE", "COD_MUN_TSE", "UF") , all=T) data_politics<-left_join(data_politics, DF2010_PSB, by=c("COD_MUN_IBGE", "COD_MUN_TSE", "UF") , all=T) data_politics<-left_join(data_politics, DF2010_invalid, by=c("COD_MUN_TSE", "UF") , all=T) #sub NA per 0 data_politics$votos_PSDB_PLV<-ifelse(is.na(data_politics$votos_PSDB_PLV),0,data_politics$votos_PSDB_PLV) data_politics$votos_PT_PLV<-ifelse(is.na(data_politics$votos_PT_PLV),0,data_politics$votos_PT_PLV) data_politics$votos_PSB_PLV<-ifelse(is.na(data_politics$votos_PSB_PLV),0,data_politics$votos_PSB_PLV) data_politics$votos_PSDB_PLV_pc<-100*data_politics$votos_PSDB_PLV/data_politics$QT_VOTOS_DF data_politics$votos_PT_PLV_pc<-100*data_politics$votos_PT_PLV/data_politics$QT_VOTOS_DF # Percentages data_politics$votos_PSB_PLV_pc<-100*data_politics$votos_PSB_PLV/data_politics$QT_VOTOS_DF data_politics$votos_inv_PLV_pc<-100*data_politics$votos_inv_DF/data_politics$QT_VOTOS_DF; head(data_politics) library(ggplot2) data_politics %>% select(COD_MUN_IBGE, votos_PT_PLV_pc, votos_PSDB_PLV_pc, votos_PSB_PLV_pc) %>% gather(COD_MUN_IBGE) %>% ggplot(aes(x = factor(COD_MUN_IBGE), y=value)) + geom_boxplot() #voto na legenda do PSDB tem mediana bem próxima a do PT e maior variância PLV_2010<-data_politics %>% mutate(regiao = substr(COD_MUN_IBGE,1,1)) %>% select(regiao, COD_MUN_IBGE, votos_PT_PLV_pc, votos_PSDB_PLV_pc, votos_PSB_PLV_pc) %>% gather(variavel, valor, 3:5) %>% ggplot(aes(x = regiao, y=valor)) + geom_boxplot(aes(fill=variavel)) + scale_y_continuous(limits = c(0,10)) PLV_2010
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/get_enroll.R \name{get_enroll} \alias{get_enroll} \title{get_enroll()} \usage{ get_enroll(enroll_data, eto_programs = NULL, min_enroll = 8) } \arguments{ \item{enroll_data}{dataframe: a dataframe containing enrollment data."[Admin] raw_enrollment_report"} \item{eto_programs}{character vector: a vector of character containing the name of ETO programs to keep for analysis.} \item{min_enroll}{numeric: The minimum number of days enrolled to be consiedered an active participant} } \value{ numeric } \description{ This function returns the number of ubduplicated participants who enrolled in a specific program or group of programs. It takes one dataframe as input. The data comes from the following ETO Results report: "[Admin] raw_enrollment_report". } \examples{ enroll <- laycUtils::load_txt('./my_data_folder/enrollment.txt') enroll <- laycUtils::format_data(enroll) get_enroll(enroll_data = enroll) }
/man/get_enroll.Rd
no_license
thelayc/laycEnrollment
R
false
false
998
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/get_enroll.R \name{get_enroll} \alias{get_enroll} \title{get_enroll()} \usage{ get_enroll(enroll_data, eto_programs = NULL, min_enroll = 8) } \arguments{ \item{enroll_data}{dataframe: a dataframe containing enrollment data."[Admin] raw_enrollment_report"} \item{eto_programs}{character vector: a vector of character containing the name of ETO programs to keep for analysis.} \item{min_enroll}{numeric: The minimum number of days enrolled to be consiedered an active participant} } \value{ numeric } \description{ This function returns the number of ubduplicated participants who enrolled in a specific program or group of programs. It takes one dataframe as input. The data comes from the following ETO Results report: "[Admin] raw_enrollment_report". } \examples{ enroll <- laycUtils::load_txt('./my_data_folder/enrollment.txt') enroll <- laycUtils::format_data(enroll) get_enroll(enroll_data = enroll) }
elnormAltSinglyCensored.qmvue <- function (x, censored, N, T1, n.cen, censoring.side, ci, ci.method = c("delta", "cox"), ci.type, conf.level, ci.sample.size = En, pivot.statistic = c("z", "t")) { pivot.statistic <- match.arg(pivot.statistic) enorm.list <- enormSinglyCensored.mle(x = log(x), censored = censored, N = N, T1 = log(T1), n.cen = n.cen, censoring.side = censoring.side, ci = ci, ci.method = "normal.approx", ci.type = ci.type, conf.level = conf.level, pivot.statistic = pivot.statistic) log.parameters <- enorm.list$parameters meanlog <- log.parameters[1] sdlog <- log.parameters[2] s2 <- sdlog^2 mean <- exp(meanlog) * finneys.g(N - 1, s2/2) sd <- sqrt(exp(2 * meanlog) * (finneys.g(N - 1, 2 * s2) - finneys.g(N - 1, (s2 * (N - 2))/(N - 1)))) cv <- sd/mean parameters <- c(mean, cv) names(parameters) <- c("mean", "cv") if (ci) { sep.string <- paste("\n", space(33), sep = "") ci.method <- match.arg(ci.method) En <- enorm.list$ci.obj$sample.size V <- enorm.list$var.cov.params if (ci.method == "delta") { lambda.vec <- c(mean, sdlog * mean) var.mean <- lambda.vec %*% V %*% lambda.vec ci.obj <- ci.normal.approx(theta.hat = mean, sd.theta.hat = sqrt(var.mean), n = ci.sample.size, df = ci.sample.size - 1, ci.type = ci.type, alpha = 1 - conf.level, lb = 0, test.statistic = pivot.statistic) ci.obj$parameter <- "mean" ci.obj$method <- paste(ci.obj$method, "Based on Delta Method", sep = sep.string) } else { beta <- log(mean) sd.beta <- sqrt(V[1, 1] + 2 * sdlog * V[1, 2] + sdlog^2 * V[2, 2]) ci.obj <- ci.normal.approx(theta.hat = beta, sd.theta.hat = sd.beta, n = ci.sample.size, df = ci.sample.size - 1, ci.type = ci.type, alpha = 1 - conf.level, test.statistic = pivot.statistic) ci.obj$limits <- exp(ci.obj$limits) ci.obj$parameter <- "mean" ci.obj$method <- paste(ci.obj$method, "Based on Cox's Method", sep = sep.string) } return(list(parameters = parameters, ci.obj = ci.obj)) } else return(list(parameters = parameters)) }
/R/elnormAltSinglyCensored.qmvue.R
no_license
alexkowa/EnvStats
R
false
false
2,441
r
elnormAltSinglyCensored.qmvue <- function (x, censored, N, T1, n.cen, censoring.side, ci, ci.method = c("delta", "cox"), ci.type, conf.level, ci.sample.size = En, pivot.statistic = c("z", "t")) { pivot.statistic <- match.arg(pivot.statistic) enorm.list <- enormSinglyCensored.mle(x = log(x), censored = censored, N = N, T1 = log(T1), n.cen = n.cen, censoring.side = censoring.side, ci = ci, ci.method = "normal.approx", ci.type = ci.type, conf.level = conf.level, pivot.statistic = pivot.statistic) log.parameters <- enorm.list$parameters meanlog <- log.parameters[1] sdlog <- log.parameters[2] s2 <- sdlog^2 mean <- exp(meanlog) * finneys.g(N - 1, s2/2) sd <- sqrt(exp(2 * meanlog) * (finneys.g(N - 1, 2 * s2) - finneys.g(N - 1, (s2 * (N - 2))/(N - 1)))) cv <- sd/mean parameters <- c(mean, cv) names(parameters) <- c("mean", "cv") if (ci) { sep.string <- paste("\n", space(33), sep = "") ci.method <- match.arg(ci.method) En <- enorm.list$ci.obj$sample.size V <- enorm.list$var.cov.params if (ci.method == "delta") { lambda.vec <- c(mean, sdlog * mean) var.mean <- lambda.vec %*% V %*% lambda.vec ci.obj <- ci.normal.approx(theta.hat = mean, sd.theta.hat = sqrt(var.mean), n = ci.sample.size, df = ci.sample.size - 1, ci.type = ci.type, alpha = 1 - conf.level, lb = 0, test.statistic = pivot.statistic) ci.obj$parameter <- "mean" ci.obj$method <- paste(ci.obj$method, "Based on Delta Method", sep = sep.string) } else { beta <- log(mean) sd.beta <- sqrt(V[1, 1] + 2 * sdlog * V[1, 2] + sdlog^2 * V[2, 2]) ci.obj <- ci.normal.approx(theta.hat = beta, sd.theta.hat = sd.beta, n = ci.sample.size, df = ci.sample.size - 1, ci.type = ci.type, alpha = 1 - conf.level, test.statistic = pivot.statistic) ci.obj$limits <- exp(ci.obj$limits) ci.obj$parameter <- "mean" ci.obj$method <- paste(ci.obj$method, "Based on Cox's Method", sep = sep.string) } return(list(parameters = parameters, ci.obj = ci.obj)) } else return(list(parameters = parameters)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/FFTrees.R \name{FFTrees} \alias{FFTrees} \title{Creates a Fast and Frugal Trees (FFTrees) object.} \usage{ FFTrees(formula = NULL, data = NULL, data.test = NULL, train.p = 1, rank.method = "m", hr.weight = 0.5, verbose = FALSE, max.levels = 4, do.cart = TRUE, do.lr = TRUE, do.rf = TRUE, object = NULL) } \arguments{ \item{formula}{A formula} \item{data}{dataframe. A model training dataset. An m x n dataframe containing n cue values for each of the m exemplars.} \item{data.test}{dataframe. An optional model testing dataset (same format as data.train)} \item{train.p}{numeric. What percentage of the data to use for training. This only applies when data.test is not specified by the user.} \item{rank.method}{character. How should cues be ranked during tree construction. "m" (for marginal) means that cues will only be ranked once with the entire training dataset. "c" (conditional) means that cues will be ranked after each level in the tree with the remaining unclassified training exemplars. This also means that the same cue can be used multiple times in the trees. However, the "c" method will take longer and may be prone to overfitting.} \item{hr.weight}{numeric. A number between 0 and 1 indicating how much weight to give to maximizing hits versus minimizing false alarms.} \item{verbose}{logical. Should progress reports be printed? Can be helpful for diagnosis when the function is running slowly...} \item{max.levels}{integer. The maximum number of levels considered for the tree.} \item{do.cart, do.lr, do.rf}{logical. Should alternative algorithms be created for comparison? cart = regression trees, lr = logistic regression, rf = random forests.} \item{object}{An optional existing FFTrees object (do not specify by hand)} } \value{ A list of length 3. The first element "tree.acc" is a dataframe containing the final statistics of all trees. The second element "cue.accuracies" shows the accuracies of all cues. The third element "tree.class.ls" is a list with n.trees elements, where each element shows the final decisions for each tree for each exemplar. } \description{ Creates a Fast and Frugal Trees (FFTrees) object. }
/man/FFTrees.Rd
no_license
JackStat/FFTrees
R
false
true
2,237
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/FFTrees.R \name{FFTrees} \alias{FFTrees} \title{Creates a Fast and Frugal Trees (FFTrees) object.} \usage{ FFTrees(formula = NULL, data = NULL, data.test = NULL, train.p = 1, rank.method = "m", hr.weight = 0.5, verbose = FALSE, max.levels = 4, do.cart = TRUE, do.lr = TRUE, do.rf = TRUE, object = NULL) } \arguments{ \item{formula}{A formula} \item{data}{dataframe. A model training dataset. An m x n dataframe containing n cue values for each of the m exemplars.} \item{data.test}{dataframe. An optional model testing dataset (same format as data.train)} \item{train.p}{numeric. What percentage of the data to use for training. This only applies when data.test is not specified by the user.} \item{rank.method}{character. How should cues be ranked during tree construction. "m" (for marginal) means that cues will only be ranked once with the entire training dataset. "c" (conditional) means that cues will be ranked after each level in the tree with the remaining unclassified training exemplars. This also means that the same cue can be used multiple times in the trees. However, the "c" method will take longer and may be prone to overfitting.} \item{hr.weight}{numeric. A number between 0 and 1 indicating how much weight to give to maximizing hits versus minimizing false alarms.} \item{verbose}{logical. Should progress reports be printed? Can be helpful for diagnosis when the function is running slowly...} \item{max.levels}{integer. The maximum number of levels considered for the tree.} \item{do.cart, do.lr, do.rf}{logical. Should alternative algorithms be created for comparison? cart = regression trees, lr = logistic regression, rf = random forests.} \item{object}{An optional existing FFTrees object (do not specify by hand)} } \value{ A list of length 3. The first element "tree.acc" is a dataframe containing the final statistics of all trees. The second element "cue.accuracies" shows the accuracies of all cues. The third element "tree.class.ls" is a list with n.trees elements, where each element shows the final decisions for each tree for each exemplar. } \description{ Creates a Fast and Frugal Trees (FFTrees) object. }
create_minimal_activity_log <- function(eventlog) { eDT <- data.table::data.table(eventlog) data.table::setorderv(eDT, cols = c(case_id(eventlog), timestamp(eventlog), ".order")) dplyr::as_tibble(unique(eDT, by = c(case_id(eventlog), activity_instance_id(eventlog), activity_id(eventlog)))) }
/R/create_minimal_activity_log.R
no_license
cran/edeaR
R
false
false
307
r
create_minimal_activity_log <- function(eventlog) { eDT <- data.table::data.table(eventlog) data.table::setorderv(eDT, cols = c(case_id(eventlog), timestamp(eventlog), ".order")) dplyr::as_tibble(unique(eDT, by = c(case_id(eventlog), activity_instance_id(eventlog), activity_id(eventlog)))) }
#' Metadata for the NMR spectra. #' #' A data frame containing acquisition and processing data pertaining to the NMR spectrum. #' #' @format a data frame with 363 variables for two observations #' \describe{ #' \item{DS}{Dummy scans performed prior to acquisition} #' \item{NS}{Number of scans performed during the acquisition} #' \item{EXP}{The type of experiment/pulse sequence used to acqutire the spectrum} #' \item{DATE}{The date at which the sample was finished being acquired} #' \item{USER}{Information pertaining to the user as well as other metadata such as the success of the experiment} #' } "meta"
/R/meta.R
permissive
kbario/concentr8r
R
false
false
631
r
#' Metadata for the NMR spectra. #' #' A data frame containing acquisition and processing data pertaining to the NMR spectrum. #' #' @format a data frame with 363 variables for two observations #' \describe{ #' \item{DS}{Dummy scans performed prior to acquisition} #' \item{NS}{Number of scans performed during the acquisition} #' \item{EXP}{The type of experiment/pulse sequence used to acqutire the spectrum} #' \item{DATE}{The date at which the sample was finished being acquired} #' \item{USER}{Information pertaining to the user as well as other metadata such as the success of the experiment} #' } "meta"
BAYSTAR<-function(x,lagp1,lagp2,Iteration,Burnin,constant,d0,step.thv,thresVar,mu01,v01,mu02,v02,v0,lambda0,refresh,tplot) { ##Time.initial<-Sys.time() ## Initialize if (missing(constant)){ constant<- 1} else{ if (!is.vector(constant) || length(constant) != 1) stop ("'constant' must be a scalar") if (constant!=0 && constant!=1) stop ("'constant' must be 1 or 0") } if (missing(d0)){ d0<- 3} else{ if (!is.vector(d0) || length(d0) != 1) stop ("'d0' must be a scalar") if (d0 < 0) stop ("'d0' must be positive") } if (missing(step.thv)){ stop ("'step.thv' is missing") } if (missing(refresh)){ if(Iteration < 1000){ refresh <- Iteration /2 } else{ refresh <- 1000 }} else{ if (!is.vector(refresh) || length(refresh) != 1) stop ("'refresh' must be a scalar") if (refresh < 0) stop ("'refresh' must be positive") if (refresh > Iteration) stop ("'refresh' must be less than 'Iteration'") } if (missing(tplot)){ tplot ="FALSE" } p1<- length(lagp1); p2<- length(lagp2) ## No. of covariate in two regimes nx<- length(x) #if (differ ==1){ #yt<-x[2:nx]-x[2:nx-1] } #else yt<- x nob<- length(yt) if (!missing(thresVar)){ if (length(thresVar) >= nob ){ zt <- thresVar[1:nob]} else { stop ("Data for the threshold variable are not enough")} } else zt<- yt ## Set initial values phi.1 <- rep(0.05, p1 + constant) phi.2 <- rep(0.05, p2 + constant) sigma.1<- 0.2 sigma.2<- 0.2 lagd<- 1 thres<- median(zt) accept.r<- 0 sum.r<- 0 ## MSE of fitting an AR(p1) model ar.mse<- ar(yt,aic=FALSE, order.max=p1) ## Sets for the hyper-parameters if (missing(mu01)){ mu01<- matrix(0,nrow=p1+constant,ncol=1)} else{ if(!is.matrix(mu01)){ if (!is.vector(mu01) || length(mu01) != 1){ stop("'mu01' must be a scalar or a matrix")} else{ mu01<- matrix(mu01,nrow=p1+constant,ncol=1)} } else{ if (dim(mu01)[1]!=p1+constant || dim(mu01)[2]!=1){ stop("error: The dimensions of 'mu02' are worng!") } } } if (missing(v01)){ v01<- diag(0.1,p1+constant)} else{ if(!is.matrix(v01)){ if (!is.vector(v01) || length(v01) != 1){ stop("'v01' must be a scalar or a matrix")} else{ v01<- diag(v01,p1+constant)} } else{ if (dim(v01)[1]!=p1+constant || dim(v01)[2]!=p1+constant){ stop("error: The dimensions of 'v01' are worng!") } } } if (missing(mu02)){ mu02<- matrix(0,nrow=p2+constant,ncol=1)} else{ if(!is.matrix(mu02)){ if (!is.vector(mu02) || length(mu02) != 1){ stop("'mu02' must be a scalar or a matrix")} else{ mu02<- matrix(mu02,nrow=p2+constant,ncol=1)} } else{ if (dim(mu02)[1]!=p2+constant || dim(mu02)[2]!=1){ stop("error: The dimensions of 'mu02' are worng!") } } } if (missing(v02)){ v02<- diag(0.1,p2+constant)} else{ if(!is.matrix(v02)){ if (!is.vector(v02) || length(v02) != 1){ stop("'v02' must be a scalar or a matrix")} else{ v02<- diag(v02,p2+constant)} } else{ if (dim(v02)[1]!=p2+constant || dim(v02)[2]!=p2+constant){ stop("error: The dimensions of 'v02' are worng!") } } } if (missing(v0)){ v0<- 3} else{ if (!is.vector(v0) || length(v0) != 1) stop ("'v0' must be a scalar") if (v0 < 0) stop ("'v0' must be positive") } if (missing(lambda0)){ lambda0<- ar.mse$var.pred/3} else{ if (!is.vector(lambda0) || length(lambda0) != 1) stop ("'lambda0' must be a scalar") if (lambda0 < 0) stop ("'lambda0' must be positive") } bound.thv<- c(quantile(zt,0.25),quantile(zt,0.75)) ## Initialize a matrix for saving all iterative estimates if(constant==1){ par.set<- matrix(NA,nrow=Iteration,ncol=(length(c(phi.1,phi.2,sigma.1,sigma.2,lagd,thres))+2))} else{ par.set<- matrix(NA,nrow=Iteration,ncol=length(c(phi.1,phi.2,sigma.1,sigma.2,lagd,thres)))} loglik.1<-loglik.2<-DIC<-NA ## to calculate DIC ## Start of MCMC sampling for (igb in 1:Iteration){ if (!missing(thresVar)){ phi.1<- TAR.coeff(1,yt,p1,p2,sigma.1,lagd,thres,mu01,v01,lagp1,lagp2,constant=constant,zt) ## Draw phi.1 from a multivariate normal distribution phi.2<- TAR.coeff(2,yt,p1,p2,sigma.2,lagd,thres,mu02,v02,lagp1,lagp2,constant=constant,zt) ## Draw phi.2 from a multivariate normal distribution sigma.1<- TAR.sigma(1,yt,thres,lagd,p1,p2,phi.1,v0,lambda0,lagp1,lagp2,constant=constant,zt) ## Draw sigma.1 from an Inverse-Gamma distribution ## v and lambda are the hyper-parameters of the Gamma prior sigma.2<- TAR.sigma(2,yt,thres,lagd,p1,p2,phi.2,v0,lambda0,lagp1,lagp2,constant=constant,zt) ## Draw sigma.2 from a Inverse-Gamma distribution lagd<- TAR.lagd(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,thres,lagp1,lagp2,constant=constant,d0,zt) ## Draw lagd from a multinomial distribution thresholdt<- TAR.thres(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,lagd,thres,step.r=step.thv,bound.thv,lagp1,lagp2,constant=constant,zt) ## Draw thresholdt by the MH algorithm } else{ phi.1<- TAR.coeff(1,yt,p1,p2,sigma.1,lagd,thres,mu01,v01,lagp1,lagp2,constant=constant) ## Draw phi.1 from a multivariate normal distribution phi.2<- TAR.coeff(2,yt,p1,p2,sigma.2,lagd,thres,mu02,v02,lagp1,lagp2,constant=constant) ## Draw phi.2 from a multivariate normal distribution sigma.1<- TAR.sigma(1,yt,thres,lagd,p1,p2,phi.1,v0,lambda0,lagp1,lagp2,constant=constant) ## Draw sigma.1 from an Inverse-Gamma distribution ## v and lambda are the hyper-parameters of the Gamma prior sigma.2<- TAR.sigma(2,yt,thres,lagd,p1,p2,phi.2,v0,lambda0,lagp1,lagp2,constant=constant) ## Draw sigma.2 from a Inverse-Gamma distribution lagd<- TAR.lagd(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,thres,lagp1,lagp2,constant=constant,d0) ## Draw lagd from a multinomial distribution thresholdt<- TAR.thres(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,lagd,thres,step.r=step.thv,bound.thv,lagp1,lagp2,constant=constant) ## Draw thresholdt by the MH algorithm } sum.r<- sum.r+thresholdt[1] ## Count the number of acceptance thres<- thresholdt[2] ## Save i-th iterated threshold value ## Compute the unconditional means for each regime if(constant==1){ c.mean<- c(phi.1[1]/(1-sum(phi.1)+phi.1[1]),phi.2[1]/(1-sum(phi.2)+phi.2[1])) par.set[igb,]<-c(phi.1,phi.2,sigma.1,sigma.2,thres,c.mean,lagd) } else {par.set[igb,]<-c(phi.1,phi.2,sigma.1,sigma.2,thres,lagd) } if (!missing(thresVar)){ loglik.1[igb]<-TAR.lik(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,lagd,thres,lagp1,lagp2,constant=constant,thresVar)} else{ loglik.1[igb]<-TAR.lik(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,lagd,thres,lagp1,lagp2,constant=constant) } ## Save all iterated estimates of parameters ncol0<-ncol(par.set) ## Print out for monitoring the estimations of every refresh (1000) iterate if(igb%%refresh==0){ cat("iteration = ",igb,"\n") cat("regime 1 = ",round(phi.1,4),"\n") cat("regime 2 = ",round(phi.2,4),"\n") cat("sigma^2 1 = ",round(sigma.1,4),"\n") cat("sigma^2 2 = ",round(sigma.2,4),"\n") cat("r = ",round(thres,4),"\n") accept.r<- (sum.r/igb)*100 cat("acceptance rate of r = ", round(accept.r,4),"%", "\n") ## Make a frequency table of delay lag lag.freq<- rep(0,d0) for(i in 1:d0){ lag.freq[i]<- sum(par.set[1:igb,ncol0]==i) } #lag.freq[1:length(table(par.set[,ncol0]))]<- table(par.set[,ncol0]) ## Frequency table for delay lag lag.freq<- t(matrix(lag.freq,dimnames=list(c(as.character(1:d0)),c("Freq")))) cat("Lag choice : ", "\n") print(lag.freq) cat("------------","\n") } } ## End of MCMC sampling ## Summarize the collected MCMC estimates mcmc.stat<- TAR.summary(par.set[(Burnin+1):Iteration,1:(ncol0-1)],lagp1,lagp2,constant=constant) print(round(mcmc.stat,4)) ## Calculate the highest posterior probability of delay lag lag.y<- c(1:d0) lag.d<- lag.y[lag.freq==max(lag.freq)] cat("Lag choice : ", "\n") print(lag.freq) cat("------------","\n") cat("The highest posterior prob. of lag is at : ",lag.d,"\n") ## calculate D(E[theta]) if (!missing(thresVar)){ loglik.2<-TAR.lik(yt,p1,p2,mcmc.stat[1:(p1+constant),1],mcmc.stat[(p1+constant+1):(p1+constant+p2+constant),1],mcmc.stat[(p1+constant+p2+constant+1),1],mcmc.stat[(p1+constant+p2+constant+2),1],lag.d,mcmc.stat[(p1+constant+p2+constant+3),1],lagp1,lagp2,constant=constant,thresVar)} else{ loglik.2<-TAR.lik(yt,p1,p2,mcmc.stat[1:(p1+constant),1],mcmc.stat[(p1+constant+1):(p1+constant+p2+constant),1],mcmc.stat[(p1+constant+p2+constant+1),1],mcmc.stat[(p1+constant+p2+constant+2),1],lag.d,mcmc.stat[(p1+constant+p2+constant+3),1],lagp1,lagp2,constant=constant) } DIC<-(2*(-2*sum(loglik.1[(Burnin+1):Iteration]))/length(loglik.1[(Burnin+1):Iteration]))-(-2*loglik.2) cat(" DIC = ",DIC,"\n") ################################################## ## Trace plots and ACF for all parameter estimates if(tplot =="TRUE"){ dev.new() ts.plot(yt) title("Trend plot of data.") nnp<- 2*constant+p1+p2+3 kk<- ceiling(nnp/3) pword<- NULL if(constant==1){ pword[1:(nnp-3)]<- c(paste("phi1",c(0,lagp1),sep="."),paste("phi2",c(0,lagp2),sep=".")) } else{ pword[1:(nnp-3)]<- c(paste("phi1",lagp1,sep="."),paste("phi2",lagp2,sep=".")) } pword[(nnp-2):nnp]<- expression(sigma[1]^2,sigma[2]^2,r) #pword[(p1+p2+1):(p1+p2+3)]<- expression() #expression(phi[c(0,lagp1)]^(1),phi[c(0,lagp2)]^(2),sigma[1]^2,sigma[2]^2,r) dev.new() par(mfrow=c(kk,3),cex=.6,cex.axis=0.8,lwd=0.1,las=1,ps=12,pch=0.5) ## Trace plots of all MCMC iterations for all estimates for (i in 1:nnp){ all.t<-length(par.set[,i]) plot.ts(par.set[,i],main=pword[i],xlab="",ylab="",col="blue") #lines(1:all.t,rep(real.par[i],all.t),col="red") lines(1:all.t,rep(mcmc.stat[i,"mean"],all.t),col="yellow") lines(1:all.t,rep(mcmc.stat[i,"lower"],all.t),col="green") lines(1:all.t,rep(mcmc.stat[i,"upper"],all.t),col="green") } dev.new() par(mfrow=c(kk,3),cex=.6,cex.axis=0.8,lwd=0.1,las=1,ps=12,pch=0.5) ## ACF of collected iterations for all estimates for (i in 1:nnp){ acf(par.set[(Burnin+1):Iteration,i],main=pword[i],xlab="",ylab="",lag.max=100)} } ## Calculate the residual for TAR model maxd<-max(lagp1,lagp2) if (constant == 1){ con.1<-mcmc.stat[1,1] par.1<-mcmc.stat[2:(p1+1),1] con.2<-mcmc.stat[p1+2,1] par.2<-mcmc.stat[(p1+2+1):(p1+p2+2),1] thv <-mcmc.stat[p1+p2+2+3,1] }else{par.1<-mcmc.stat[1:p1,1] par.2<-mcmc.stat[(p1+1):(p1+p2),1] thv <-mcmc.stat[p1+p2+2+1,1] } residual<-rep(NA,nob-maxd) for (t in (maxd+1):nob){ if (constant == 1){ if ( yt[t-lag.d] <= thv){ residual[t-maxd]<- yt[t] - sum(con.1,(par.1 * yt[t-lagp1])) } else{ residual[t-maxd]<- yt[t] - sum(con.2,(par.2 * yt[t-lagp2])) } } else{ if ( yt[t-lag.d] <= thv){ residual[t-maxd]<- yt[t] - sum(par.1 * yt[t-lagp1]) } else{ residual[t-maxd]<- yt[t] - sum(par.2 * yt[t-lagp2]) } } } tar<-list(mcmc=par.set,posterior=par.set[(Burnin+1):Iteration,1:(ncol0-1)],coef=round(mcmc.stat,4),residual=residual,lagd=lag.d,DIC=DIC) return(tar) ##Sys.time()-Time.initial }
/output/sources/authors/1105/BAYSTAR/BAYSTAR.R
no_license
Irbis3/crantasticScrapper
R
false
false
10,622
r
BAYSTAR<-function(x,lagp1,lagp2,Iteration,Burnin,constant,d0,step.thv,thresVar,mu01,v01,mu02,v02,v0,lambda0,refresh,tplot) { ##Time.initial<-Sys.time() ## Initialize if (missing(constant)){ constant<- 1} else{ if (!is.vector(constant) || length(constant) != 1) stop ("'constant' must be a scalar") if (constant!=0 && constant!=1) stop ("'constant' must be 1 or 0") } if (missing(d0)){ d0<- 3} else{ if (!is.vector(d0) || length(d0) != 1) stop ("'d0' must be a scalar") if (d0 < 0) stop ("'d0' must be positive") } if (missing(step.thv)){ stop ("'step.thv' is missing") } if (missing(refresh)){ if(Iteration < 1000){ refresh <- Iteration /2 } else{ refresh <- 1000 }} else{ if (!is.vector(refresh) || length(refresh) != 1) stop ("'refresh' must be a scalar") if (refresh < 0) stop ("'refresh' must be positive") if (refresh > Iteration) stop ("'refresh' must be less than 'Iteration'") } if (missing(tplot)){ tplot ="FALSE" } p1<- length(lagp1); p2<- length(lagp2) ## No. of covariate in two regimes nx<- length(x) #if (differ ==1){ #yt<-x[2:nx]-x[2:nx-1] } #else yt<- x nob<- length(yt) if (!missing(thresVar)){ if (length(thresVar) >= nob ){ zt <- thresVar[1:nob]} else { stop ("Data for the threshold variable are not enough")} } else zt<- yt ## Set initial values phi.1 <- rep(0.05, p1 + constant) phi.2 <- rep(0.05, p2 + constant) sigma.1<- 0.2 sigma.2<- 0.2 lagd<- 1 thres<- median(zt) accept.r<- 0 sum.r<- 0 ## MSE of fitting an AR(p1) model ar.mse<- ar(yt,aic=FALSE, order.max=p1) ## Sets for the hyper-parameters if (missing(mu01)){ mu01<- matrix(0,nrow=p1+constant,ncol=1)} else{ if(!is.matrix(mu01)){ if (!is.vector(mu01) || length(mu01) != 1){ stop("'mu01' must be a scalar or a matrix")} else{ mu01<- matrix(mu01,nrow=p1+constant,ncol=1)} } else{ if (dim(mu01)[1]!=p1+constant || dim(mu01)[2]!=1){ stop("error: The dimensions of 'mu02' are worng!") } } } if (missing(v01)){ v01<- diag(0.1,p1+constant)} else{ if(!is.matrix(v01)){ if (!is.vector(v01) || length(v01) != 1){ stop("'v01' must be a scalar or a matrix")} else{ v01<- diag(v01,p1+constant)} } else{ if (dim(v01)[1]!=p1+constant || dim(v01)[2]!=p1+constant){ stop("error: The dimensions of 'v01' are worng!") } } } if (missing(mu02)){ mu02<- matrix(0,nrow=p2+constant,ncol=1)} else{ if(!is.matrix(mu02)){ if (!is.vector(mu02) || length(mu02) != 1){ stop("'mu02' must be a scalar or a matrix")} else{ mu02<- matrix(mu02,nrow=p2+constant,ncol=1)} } else{ if (dim(mu02)[1]!=p2+constant || dim(mu02)[2]!=1){ stop("error: The dimensions of 'mu02' are worng!") } } } if (missing(v02)){ v02<- diag(0.1,p2+constant)} else{ if(!is.matrix(v02)){ if (!is.vector(v02) || length(v02) != 1){ stop("'v02' must be a scalar or a matrix")} else{ v02<- diag(v02,p2+constant)} } else{ if (dim(v02)[1]!=p2+constant || dim(v02)[2]!=p2+constant){ stop("error: The dimensions of 'v02' are worng!") } } } if (missing(v0)){ v0<- 3} else{ if (!is.vector(v0) || length(v0) != 1) stop ("'v0' must be a scalar") if (v0 < 0) stop ("'v0' must be positive") } if (missing(lambda0)){ lambda0<- ar.mse$var.pred/3} else{ if (!is.vector(lambda0) || length(lambda0) != 1) stop ("'lambda0' must be a scalar") if (lambda0 < 0) stop ("'lambda0' must be positive") } bound.thv<- c(quantile(zt,0.25),quantile(zt,0.75)) ## Initialize a matrix for saving all iterative estimates if(constant==1){ par.set<- matrix(NA,nrow=Iteration,ncol=(length(c(phi.1,phi.2,sigma.1,sigma.2,lagd,thres))+2))} else{ par.set<- matrix(NA,nrow=Iteration,ncol=length(c(phi.1,phi.2,sigma.1,sigma.2,lagd,thres)))} loglik.1<-loglik.2<-DIC<-NA ## to calculate DIC ## Start of MCMC sampling for (igb in 1:Iteration){ if (!missing(thresVar)){ phi.1<- TAR.coeff(1,yt,p1,p2,sigma.1,lagd,thres,mu01,v01,lagp1,lagp2,constant=constant,zt) ## Draw phi.1 from a multivariate normal distribution phi.2<- TAR.coeff(2,yt,p1,p2,sigma.2,lagd,thres,mu02,v02,lagp1,lagp2,constant=constant,zt) ## Draw phi.2 from a multivariate normal distribution sigma.1<- TAR.sigma(1,yt,thres,lagd,p1,p2,phi.1,v0,lambda0,lagp1,lagp2,constant=constant,zt) ## Draw sigma.1 from an Inverse-Gamma distribution ## v and lambda are the hyper-parameters of the Gamma prior sigma.2<- TAR.sigma(2,yt,thres,lagd,p1,p2,phi.2,v0,lambda0,lagp1,lagp2,constant=constant,zt) ## Draw sigma.2 from a Inverse-Gamma distribution lagd<- TAR.lagd(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,thres,lagp1,lagp2,constant=constant,d0,zt) ## Draw lagd from a multinomial distribution thresholdt<- TAR.thres(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,lagd,thres,step.r=step.thv,bound.thv,lagp1,lagp2,constant=constant,zt) ## Draw thresholdt by the MH algorithm } else{ phi.1<- TAR.coeff(1,yt,p1,p2,sigma.1,lagd,thres,mu01,v01,lagp1,lagp2,constant=constant) ## Draw phi.1 from a multivariate normal distribution phi.2<- TAR.coeff(2,yt,p1,p2,sigma.2,lagd,thres,mu02,v02,lagp1,lagp2,constant=constant) ## Draw phi.2 from a multivariate normal distribution sigma.1<- TAR.sigma(1,yt,thres,lagd,p1,p2,phi.1,v0,lambda0,lagp1,lagp2,constant=constant) ## Draw sigma.1 from an Inverse-Gamma distribution ## v and lambda are the hyper-parameters of the Gamma prior sigma.2<- TAR.sigma(2,yt,thres,lagd,p1,p2,phi.2,v0,lambda0,lagp1,lagp2,constant=constant) ## Draw sigma.2 from a Inverse-Gamma distribution lagd<- TAR.lagd(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,thres,lagp1,lagp2,constant=constant,d0) ## Draw lagd from a multinomial distribution thresholdt<- TAR.thres(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,lagd,thres,step.r=step.thv,bound.thv,lagp1,lagp2,constant=constant) ## Draw thresholdt by the MH algorithm } sum.r<- sum.r+thresholdt[1] ## Count the number of acceptance thres<- thresholdt[2] ## Save i-th iterated threshold value ## Compute the unconditional means for each regime if(constant==1){ c.mean<- c(phi.1[1]/(1-sum(phi.1)+phi.1[1]),phi.2[1]/(1-sum(phi.2)+phi.2[1])) par.set[igb,]<-c(phi.1,phi.2,sigma.1,sigma.2,thres,c.mean,lagd) } else {par.set[igb,]<-c(phi.1,phi.2,sigma.1,sigma.2,thres,lagd) } if (!missing(thresVar)){ loglik.1[igb]<-TAR.lik(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,lagd,thres,lagp1,lagp2,constant=constant,thresVar)} else{ loglik.1[igb]<-TAR.lik(yt,p1,p2,phi.1,phi.2,sigma.1,sigma.2,lagd,thres,lagp1,lagp2,constant=constant) } ## Save all iterated estimates of parameters ncol0<-ncol(par.set) ## Print out for monitoring the estimations of every refresh (1000) iterate if(igb%%refresh==0){ cat("iteration = ",igb,"\n") cat("regime 1 = ",round(phi.1,4),"\n") cat("regime 2 = ",round(phi.2,4),"\n") cat("sigma^2 1 = ",round(sigma.1,4),"\n") cat("sigma^2 2 = ",round(sigma.2,4),"\n") cat("r = ",round(thres,4),"\n") accept.r<- (sum.r/igb)*100 cat("acceptance rate of r = ", round(accept.r,4),"%", "\n") ## Make a frequency table of delay lag lag.freq<- rep(0,d0) for(i in 1:d0){ lag.freq[i]<- sum(par.set[1:igb,ncol0]==i) } #lag.freq[1:length(table(par.set[,ncol0]))]<- table(par.set[,ncol0]) ## Frequency table for delay lag lag.freq<- t(matrix(lag.freq,dimnames=list(c(as.character(1:d0)),c("Freq")))) cat("Lag choice : ", "\n") print(lag.freq) cat("------------","\n") } } ## End of MCMC sampling ## Summarize the collected MCMC estimates mcmc.stat<- TAR.summary(par.set[(Burnin+1):Iteration,1:(ncol0-1)],lagp1,lagp2,constant=constant) print(round(mcmc.stat,4)) ## Calculate the highest posterior probability of delay lag lag.y<- c(1:d0) lag.d<- lag.y[lag.freq==max(lag.freq)] cat("Lag choice : ", "\n") print(lag.freq) cat("------------","\n") cat("The highest posterior prob. of lag is at : ",lag.d,"\n") ## calculate D(E[theta]) if (!missing(thresVar)){ loglik.2<-TAR.lik(yt,p1,p2,mcmc.stat[1:(p1+constant),1],mcmc.stat[(p1+constant+1):(p1+constant+p2+constant),1],mcmc.stat[(p1+constant+p2+constant+1),1],mcmc.stat[(p1+constant+p2+constant+2),1],lag.d,mcmc.stat[(p1+constant+p2+constant+3),1],lagp1,lagp2,constant=constant,thresVar)} else{ loglik.2<-TAR.lik(yt,p1,p2,mcmc.stat[1:(p1+constant),1],mcmc.stat[(p1+constant+1):(p1+constant+p2+constant),1],mcmc.stat[(p1+constant+p2+constant+1),1],mcmc.stat[(p1+constant+p2+constant+2),1],lag.d,mcmc.stat[(p1+constant+p2+constant+3),1],lagp1,lagp2,constant=constant) } DIC<-(2*(-2*sum(loglik.1[(Burnin+1):Iteration]))/length(loglik.1[(Burnin+1):Iteration]))-(-2*loglik.2) cat(" DIC = ",DIC,"\n") ################################################## ## Trace plots and ACF for all parameter estimates if(tplot =="TRUE"){ dev.new() ts.plot(yt) title("Trend plot of data.") nnp<- 2*constant+p1+p2+3 kk<- ceiling(nnp/3) pword<- NULL if(constant==1){ pword[1:(nnp-3)]<- c(paste("phi1",c(0,lagp1),sep="."),paste("phi2",c(0,lagp2),sep=".")) } else{ pword[1:(nnp-3)]<- c(paste("phi1",lagp1,sep="."),paste("phi2",lagp2,sep=".")) } pword[(nnp-2):nnp]<- expression(sigma[1]^2,sigma[2]^2,r) #pword[(p1+p2+1):(p1+p2+3)]<- expression() #expression(phi[c(0,lagp1)]^(1),phi[c(0,lagp2)]^(2),sigma[1]^2,sigma[2]^2,r) dev.new() par(mfrow=c(kk,3),cex=.6,cex.axis=0.8,lwd=0.1,las=1,ps=12,pch=0.5) ## Trace plots of all MCMC iterations for all estimates for (i in 1:nnp){ all.t<-length(par.set[,i]) plot.ts(par.set[,i],main=pword[i],xlab="",ylab="",col="blue") #lines(1:all.t,rep(real.par[i],all.t),col="red") lines(1:all.t,rep(mcmc.stat[i,"mean"],all.t),col="yellow") lines(1:all.t,rep(mcmc.stat[i,"lower"],all.t),col="green") lines(1:all.t,rep(mcmc.stat[i,"upper"],all.t),col="green") } dev.new() par(mfrow=c(kk,3),cex=.6,cex.axis=0.8,lwd=0.1,las=1,ps=12,pch=0.5) ## ACF of collected iterations for all estimates for (i in 1:nnp){ acf(par.set[(Burnin+1):Iteration,i],main=pword[i],xlab="",ylab="",lag.max=100)} } ## Calculate the residual for TAR model maxd<-max(lagp1,lagp2) if (constant == 1){ con.1<-mcmc.stat[1,1] par.1<-mcmc.stat[2:(p1+1),1] con.2<-mcmc.stat[p1+2,1] par.2<-mcmc.stat[(p1+2+1):(p1+p2+2),1] thv <-mcmc.stat[p1+p2+2+3,1] }else{par.1<-mcmc.stat[1:p1,1] par.2<-mcmc.stat[(p1+1):(p1+p2),1] thv <-mcmc.stat[p1+p2+2+1,1] } residual<-rep(NA,nob-maxd) for (t in (maxd+1):nob){ if (constant == 1){ if ( yt[t-lag.d] <= thv){ residual[t-maxd]<- yt[t] - sum(con.1,(par.1 * yt[t-lagp1])) } else{ residual[t-maxd]<- yt[t] - sum(con.2,(par.2 * yt[t-lagp2])) } } else{ if ( yt[t-lag.d] <= thv){ residual[t-maxd]<- yt[t] - sum(par.1 * yt[t-lagp1]) } else{ residual[t-maxd]<- yt[t] - sum(par.2 * yt[t-lagp2]) } } } tar<-list(mcmc=par.set,posterior=par.set[(Burnin+1):Iteration,1:(ncol0-1)],coef=round(mcmc.stat,4),residual=residual,lagd=lag.d,DIC=DIC) return(tar) ##Sys.time()-Time.initial }
### collectResults.R ########################################################## # function to concatenate the results from patients 1, 2, and 3 into a large file # Inputs: # - outputPath = path to where the results are saved (both the separate set # results, and where the concatenated file will be output) # - filenameStem = the stem of the filename for the certain method # - outputFilename = filename of concatenated file # Outputs: # - saves file of concatenated results collectResults <- function(outputPath, filenameStem, outputFilename){ results <- NULL; for (set in 1:3){ filename <- dir( path = outputPath, # match all files with given pattern pattern = paste0("^.*", set, filenameStem), # return only names of visible files all.files = FALSE, # return only file names, not relative file paths full.names = FALSE, # assume all are in given directory, not in any subdirectories recursive = FALSE, ignore.case =TRUE ); temp <- read.table( file = file.path(outputPath,filename), sep = ",", header = TRUE, stringsAsFactors = TRUE ); results <- rbind(results, temp); } write.table(results[,c(1,2)], file = file.path(outputPath, outputFilename), quote = FALSE, sep = ",", col.names = TRUE, row.names = FALSE ); }
/clean-code/collectResults.R
no_license
canghel/kaggle-seizure-prediction
R
false
false
1,308
r
### collectResults.R ########################################################## # function to concatenate the results from patients 1, 2, and 3 into a large file # Inputs: # - outputPath = path to where the results are saved (both the separate set # results, and where the concatenated file will be output) # - filenameStem = the stem of the filename for the certain method # - outputFilename = filename of concatenated file # Outputs: # - saves file of concatenated results collectResults <- function(outputPath, filenameStem, outputFilename){ results <- NULL; for (set in 1:3){ filename <- dir( path = outputPath, # match all files with given pattern pattern = paste0("^.*", set, filenameStem), # return only names of visible files all.files = FALSE, # return only file names, not relative file paths full.names = FALSE, # assume all are in given directory, not in any subdirectories recursive = FALSE, ignore.case =TRUE ); temp <- read.table( file = file.path(outputPath,filename), sep = ",", header = TRUE, stringsAsFactors = TRUE ); results <- rbind(results, temp); } write.table(results[,c(1,2)], file = file.path(outputPath, outputFilename), quote = FALSE, sep = ",", col.names = TRUE, row.names = FALSE ); }
##### 키보드 입력 ##### # scan() : 벡터 입력 - 콘솔에 입력 # edit() : 데이터 프레임을 입력 받을 때 a <- scan() #숫자를 입력(엔터). 그만 입력할 경우 빈칸에 엔터 a b<- scan(what=character()) b df<- data.frame() df<-edit(df) df ##### 파일 입력 ###### # read.csv() # read.table() #,로 구분된 파일을 제외한 모든 파일 # read.xlsx() student<-read.table("../data/student.txt") student student1<-read.table("../data/student1.txt",header=T) student1 student2<- read.table(file.choose(),header = T,sep=";") #창이 열리고 파일 선택 student2 student3<- read.table("../data/student3.txt",header=T, sep="", na.strings = "-") student3<- read.table("../data/student3.txt",header=T, sep="", na.strings = c("-","+","&")) student3 #read.xlsx() 기본으로 제공되는 함수가 아니다(파이썬 pip같은 외부 라이브러리) p.41 #패키지 설치 install.packages("xlsx") library(rJava) library(xlsx) #studentX<-read.xlsx(file.choose(), sheetIndex = 1, encoding = "UTF-8") studentX<-read.xlsx(file.choose(), sheetName = "emp2", encoding = "UTF-8") studentX ##### 화면 출력 ##### # 변수명 # () # cat() # print() x<-10 y<-20 z<-x+y z (z<-x+y) print(z<-x+y) print(z) #print("x+y의 결과는" + as.character(z) + "입니다.") #+는 무조건 숫자계산에서만 사용 #print("x+y의 결과는", as.ccharacter(z), "입니다.") #,도 사용불가 #print 대신 cat 사용하면 출력할 수 있다. cat("x+y의 결과는", as.character(z), "입니다.") ##### 파일 출력 ##### # write.table() # write.csv() studentX<-read.xlsx(file.choose(), sheetName = "emp2", encoding = "UTF-8") studentX class(studentX) write.table(studentX, "../data/stud1.txt") write.table(studentX, "../data/stud2.txt", row.names = F) write.table(studentX, "../data/stud3.txt", row.names = F, quote=F) #데이터의""삭제 write.csv(studentX, "../data/stud4.txt", row.names = F, quote=F) #자동으로 , library(rJava) library(xlsx) write.xlsx(studentX, "../data/stud5.xlsx") ##### rda 파일 출력 ##### #save() R 데이터 형식 / 2진수로 저장 / R에서만 볼 수 있게 / 처리 속도가 빠름 #load() save(studentX, file="../data/stud6.rda") rm(studentX) studentX load("../data/stud6.rda") studentX ##### sink()#### data() ?data data(iris) head(iris) tail(iris) iris str(iris) #sink 실행하고 나서부터는 결과가 화면에 출력되지 않고 파일에 저장 sink("../data/iris.txt") head(iris) tail(iris) str(iris) #중지하고 싶을때는 다시하번 sink() sink() head(iris)
/R/BasicProject/2-InputOutput.R
no_license
JeonBW/Portfolio
R
false
false
2,633
r
##### 키보드 입력 ##### # scan() : 벡터 입력 - 콘솔에 입력 # edit() : 데이터 프레임을 입력 받을 때 a <- scan() #숫자를 입력(엔터). 그만 입력할 경우 빈칸에 엔터 a b<- scan(what=character()) b df<- data.frame() df<-edit(df) df ##### 파일 입력 ###### # read.csv() # read.table() #,로 구분된 파일을 제외한 모든 파일 # read.xlsx() student<-read.table("../data/student.txt") student student1<-read.table("../data/student1.txt",header=T) student1 student2<- read.table(file.choose(),header = T,sep=";") #창이 열리고 파일 선택 student2 student3<- read.table("../data/student3.txt",header=T, sep="", na.strings = "-") student3<- read.table("../data/student3.txt",header=T, sep="", na.strings = c("-","+","&")) student3 #read.xlsx() 기본으로 제공되는 함수가 아니다(파이썬 pip같은 외부 라이브러리) p.41 #패키지 설치 install.packages("xlsx") library(rJava) library(xlsx) #studentX<-read.xlsx(file.choose(), sheetIndex = 1, encoding = "UTF-8") studentX<-read.xlsx(file.choose(), sheetName = "emp2", encoding = "UTF-8") studentX ##### 화면 출력 ##### # 변수명 # () # cat() # print() x<-10 y<-20 z<-x+y z (z<-x+y) print(z<-x+y) print(z) #print("x+y의 결과는" + as.character(z) + "입니다.") #+는 무조건 숫자계산에서만 사용 #print("x+y의 결과는", as.ccharacter(z), "입니다.") #,도 사용불가 #print 대신 cat 사용하면 출력할 수 있다. cat("x+y의 결과는", as.character(z), "입니다.") ##### 파일 출력 ##### # write.table() # write.csv() studentX<-read.xlsx(file.choose(), sheetName = "emp2", encoding = "UTF-8") studentX class(studentX) write.table(studentX, "../data/stud1.txt") write.table(studentX, "../data/stud2.txt", row.names = F) write.table(studentX, "../data/stud3.txt", row.names = F, quote=F) #데이터의""삭제 write.csv(studentX, "../data/stud4.txt", row.names = F, quote=F) #자동으로 , library(rJava) library(xlsx) write.xlsx(studentX, "../data/stud5.xlsx") ##### rda 파일 출력 ##### #save() R 데이터 형식 / 2진수로 저장 / R에서만 볼 수 있게 / 처리 속도가 빠름 #load() save(studentX, file="../data/stud6.rda") rm(studentX) studentX load("../data/stud6.rda") studentX ##### sink()#### data() ?data data(iris) head(iris) tail(iris) iris str(iris) #sink 실행하고 나서부터는 결과가 화면에 출력되지 않고 파일에 저장 sink("../data/iris.txt") head(iris) tail(iris) str(iris) #중지하고 싶을때는 다시하번 sink() sink() head(iris)
library(lmom) ### Name: cdfgno ### Title: Generalized normal distribution ### Aliases: cdfgno quagno ### Keywords: distribution ### ** Examples # Random sample from the generalized normal distribution # with parameters xi=0, alpha=1, k=-0.5. quagno(runif(100), c(0,1,-0.5)) # The generalized normal distribution with parameters xi=1, alpha=1, k=-1, # is the standard lognormal distribution. An illustration: fval<-seq(0.1,0.9,by=0.1) cbind(fval, lognormal=qlnorm(fval), g.normal=quagno(fval, c(1,1,-1)))
/data/genthat_extracted_code/lmom/examples/cdfgno.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
513
r
library(lmom) ### Name: cdfgno ### Title: Generalized normal distribution ### Aliases: cdfgno quagno ### Keywords: distribution ### ** Examples # Random sample from the generalized normal distribution # with parameters xi=0, alpha=1, k=-0.5. quagno(runif(100), c(0,1,-0.5)) # The generalized normal distribution with parameters xi=1, alpha=1, k=-1, # is the standard lognormal distribution. An illustration: fval<-seq(0.1,0.9,by=0.1) cbind(fval, lognormal=qlnorm(fval), g.normal=quagno(fval, c(1,1,-1)))
rankhospital <- function(state, outcome, num = "best") { ## Read outcome data # "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia" # "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack" # "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure" outcomeData <- read.csv("outcome-of-care-measures.csv", colClasses = "character") data <-outcomeData[,c(2,7,11,17,23)] names(data)[1]<-"hospital" names(data)[2]<-"usstate" names(data)[3]<-"heart attack" names(data)[4]<-"heart failure" names(data)[5]<-"pneumonia" #setup vectors of accepted values valid_outcomes <- c("heart attack", "heart failure", "pneumonia") valid_states <- data[!duplicated(data['usstate']),"usstate"] #couple checks for valid input data if(!(outcome %in% valid_outcomes)) stop("invalid outcome") if(!(state %in% valid_states)) stop("invalid state") #not clear if complete cases just in the outcome or across all outcomes #suppressing the coersion warnings data[,outcome]<-suppressWarnings(as.numeric(data[,outcome])) #only those for whom we have a value for the state indicated data <- subset(data, usstate==state & !is.na(data[,outcome]),select=c("hospital",outcome)) #sort by rate then name data <-data[order(data[,outcome],data[,"hospital"]),] data<-cbind(data,rank=1:nrow(data)) if(num=="best") num=min(data["rank"]) if(num=="worst") num=max(data["rank"]) #return the name of the hospital with lowest rate data[num,"hospital"] }
/rankhospital.R
no_license
sreeser/ProgrammingAssignment3
R
false
false
1,507
r
rankhospital <- function(state, outcome, num = "best") { ## Read outcome data # "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia" # "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack" # "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure" outcomeData <- read.csv("outcome-of-care-measures.csv", colClasses = "character") data <-outcomeData[,c(2,7,11,17,23)] names(data)[1]<-"hospital" names(data)[2]<-"usstate" names(data)[3]<-"heart attack" names(data)[4]<-"heart failure" names(data)[5]<-"pneumonia" #setup vectors of accepted values valid_outcomes <- c("heart attack", "heart failure", "pneumonia") valid_states <- data[!duplicated(data['usstate']),"usstate"] #couple checks for valid input data if(!(outcome %in% valid_outcomes)) stop("invalid outcome") if(!(state %in% valid_states)) stop("invalid state") #not clear if complete cases just in the outcome or across all outcomes #suppressing the coersion warnings data[,outcome]<-suppressWarnings(as.numeric(data[,outcome])) #only those for whom we have a value for the state indicated data <- subset(data, usstate==state & !is.na(data[,outcome]),select=c("hospital",outcome)) #sort by rate then name data <-data[order(data[,outcome],data[,"hospital"]),] data<-cbind(data,rank=1:nrow(data)) if(num=="best") num=min(data["rank"]) if(num=="worst") num=max(data["rank"]) #return the name of the hospital with lowest rate data[num,"hospital"] }
require(MASS) require(fBasics) dyn.load("bin/r-scripts/utils_C.so") #symbol.C("convert_event_table") ## mysql utils mysqlSetTimeVariables=function(con, table, experimentID, expIntervalMin){ dbGetQuery(con,"SET @expStartTimeNs=0"); dbGetQuery(con,"SET @expIntervalStartNs=0"); dbGetQuery(con,"SET @expIntervalStopNs=0"); query=paste("SELECT @expStartTimeNs := MIN(tin) FROM ", table, " WHERE experimentID=",experimentID, " AND operation LIKE \"%ActionServlet%\"", sep="") dbGetQuery(con,query) min2ns=60*1000*1000*1000 query=paste("SELECT @expIntervalStartNs:=@expStartTimeNs+", expIntervalMin[1], "*", min2ns,sep="") dbGetQuery(con,query) query=paste("SELECT @expIntervalStopNs:=@expStartTimeNs+", expIntervalMin[2], "*", min2ns, sep="") dbGetQuery(con,query) rm(query) } ## ## sort.data.frame <- function(x, key, ...) { if (missing(key)) { rn <- rownames(x) if (all(rn %in% 1:nrow(x))) rn <- as.numeric(rn) x[order(rn, ...), , drop=FALSE] } else { x[do.call("order", c(x[key], ...)), , drop=FALSE] } } ## ## removes normal and extreme outliers but at most ## a given ratio in [0..1] ## expects field respMs remove_outliers_right = function (df, max_ratio=max_outlier_ratio){ df.n=nrow(df) max_n_outliers=floor(df.n*max_ratio) iqr = IQR(df$respMs) q3 = quantile(df$respMs, 0.75) q_max_ratio = quantile(df$respMs, (df.n-max_n_outliers)/df.n) q_normal = q3 + (1.5*iqr) q_extreme = q3 + (3*iqr) subset(df, respMs<=max(q_max_ratio, q_normal)) } ## returns data frame with fields outlier_info_right = function (data) { iqr = IQR(data) q3 = quantile(data, 0.75) q_normal = q3 + (1.5*iqr) q_extreme = q3 + (3*iqr) n_outliers=length(data[data>q_normal]) n_extreme=length(data[data>q_extreme]) n_normal=n_outliers-n_extreme n = length(data) data.frame( iqr, n_total=n_outliers, ratio_total=n_outliers/n, q_normal, n_normal, ratio_normal=n_normal/n, q_extreme, n_extreme, ratio_extreme=n_extreme/n) } ## returns data frame with $idx, $val runmean.steps = function (x, window_size, by, ...) { runningmean = runmean(x, k=window_size, endrule="trim", ...) num_steps=floor(length(runningmean)/by) idx=seq(from=1, to=by*num_steps, by=by) res=data.frame(idx=idx, val=runningmean[idx]) rm(runningmean) res } ## fancy density plot plot pearsonModeSkewness <- function(x,dataMean,mode) { result = (dataMean - mode)/sd(x) return(result) } theSkewness <- function(data,dataMean) { return(centralMoment(data,dataMean,3)/(centralMoment(data,dataMean,2)^(3/2))) } centralMoment <- function(data,dataMean,i) { mean((data-dataMean)^i) } plotFancyDensityMultBw=function(data, main="", xlab="", ylab="") { dens=density(data,bw="nrd0") xlab=paste(xlab," N=", dens$n,sep="") plot(dens,main=main,xlab=xlab,col=1); grid(col=gridcol) rug(data) lines(density(data, bw="nrd"), col = 2) lines(density(data, bw="ucv"), col = 3) lines(density(data, bw="bcv"), col = 4) #lines(density(entry[["respMs"]], bw="SJ-ste"), col = 5) lines(density(data, bw="SJ-dpi"), col = 5) legend("topright", legend = c("bw:nrd0", "bw:nrd", "bw:ucv", "bw:bcv", "bw:SJ-ste", "bw:SJ-dpi", "log-normal"), col = 1:6, lty = 1,box.lty=0) } plotFancyDensity=function(data, main="", xlab="", ylab="", ...) { dens=density(data,n=1024) xlab=paste(xlab," N=", dens$n, ", Bandwidth=",format(mean(dens$bw),digits=4),sep="") dataMean=mean(data) dataMeanY=approx(dens$x,dens$y,xout=dataMean)$y[1] dataMedian=median(data) dataMedianY=approx(dens$x,dens$y,xout=dataMedian)$y[1] densModeY=max(dens$y) densMode=approx(dens$y,dens$x,xout=densModeY)$y[1] moskewness = skewness(data) #theSkewness(data,dataMean); plot(dens,main=main,xlab=xlab, ...); grid(col=gridcol) rug(data) points(dataMean,dataMeanY,pch=3,lwd=1,type="h",lty="dashed",col="red"); points(dataMean,dataMeanY,pch=1,lwd=2,col="red"); points(dataMedian,dataMedianY,pch=4,lwd=1,type="h",lty="dashed",col="blue"); points(dataMedian,dataMedianY,pch=2,lwd=2,col="blue"); points(densMode,densModeY,pch=5,lwd=1,type="h",lty="dashed",col="darkgreen"); points(densMode,densModeY,pch=3,lwd=2,col="darkgreen"); t1 = paste("Mean ",format(dataMean,digits=4)) t2 = paste("Median ",format(dataMedian,digits=4)) t3 = paste("Approx. Mode ",format(densMode,digits=4)) t4 = paste("Skewness ",format(moskewness,digits=4)) #t5 = paste("Pearson Mode Skewness ",format(pearsonModeSkewness(data,dataMean,densMode),digits=4)) t5 = paste("Kurtosis ",format(kurtosis(data),digits=4)) if (moskewness < 0) { legendpos = "topleft"; } else { legendpos = "topright"; } legend(legendpos,c(t1,t2,t3,t4,t5),pch=c(1,2,3,0,0),col=c("red","blue","darkgreen","white","white"),bty = "n") } plotFancyDensityGiven=function(dens, minimum, maximum, dataQ1, dataQ3, dataMean, dataMedian, densMode, skewness, kurt, main="", xlab="", ylab="", ...) { xlab=paste(xlab," N=", dens$n, ", Bandwidth=",format(mean(dens$bw),digits=4),sep="") # dataMeanY=approx(dens$x,dens$y,xout=dataMean)$y[1] # dataQ1Y=approx(dens$x,dens$y,xout=dataQ1)$y[1] # dataMedianY=approx(dens$x,dens$y,xout=dataMedian)$y[1] # dataQ3Y=approx(dens$x,dens$y,xout=dataQ3)$y[1] densModeY=approx(dens$x,dens$y,xout=densMode)$y[1] plot(dens,main=main,xlab=xlab, lwd=1.5); grid(col=gridcol) abline(v=minimum, lwd=1,lty="dashed",col="black") abline(v=maximum, lwd=1,lty="dashed",col="black") abline(v=dataMean, lwd=1,lty="dashed",col="red") abline(v=dataQ1, lwd=1,lty="dashed",col="blue") abline(v=dataMedian, lwd=1,lty="dashed",col="blue") abline(v=dataQ3, lwd=1,lty="dashed",col="blue") points(densMode,densModeY, lwd=1,type="h",lty="dashed",col="green"); points(densMode,densModeY,pch=1,lwd=2,col="green"); # points(dataMean,dataMeanY,pch=3,lwd=1.5,type="h",lty="dashed",col="red"); # points(dataMean,dataMeanY,pch=1,lwd=2,col="red"); # points(dataQ1,dataQ1Y,pch=4,lwd=1.5,type="h",lty="dashed",col="blue"); # points(dataQ1,dataQ1Y,pch=1,lwd=2,col="blue"); # points(dataMedian,dataMedianY,pch=4,lwd=1.5,type="h",lty="dashed",col="blue"); # points(dataMedian,dataMedianY,pch=1,lwd=2,col="blue"); # points(dataQ3,dataQ3Y,pch=4,lwd=1.5,type="h",lty="dashed",col="blue"); # points(dataQ3,dataQ3Y,pch=1,lwd=2,col="blue"); # points(densMode,densModeY,pch=5,lwd=1.5,type="h",lty="dashed",col="green"); # points(densMode,densModeY,pch=1,lwd=2,col="green"); t0 = paste("min, max (", format(minimum,digits=4), ", ", format(maximum,digits=4), ")", sep="") t1 = paste("mean (",format(dataMean,digits=4), ")", sep="") t2 = paste("quartiles (",format(dataQ1,digits=4),", ",format(dataMedian,digits=4), ", ", format(dataQ3,digits=4), ")", sep="") t3 = paste("approx. mode (",format(densMode,digits=4), ")", sep="") t4 = paste("skewness (",format(skewness,digits=4), ")", sep="") #t5 = paste("Pearson Mode Skewness ",format(pearsonModeSkewness(data,dataMean,densMode),digits=4)) t5 = paste("kurtosis (",format(kurt,digits=4), ")", sep="") if (skewness < 0) { legendpos = "topleft"; } else { legendpos = "topright"; } legend(legendpos,c(t0,t1,t2,t3,t4,t5),pch=c(NA,NA,NA,NA,1,NA,NA),col=c("black","red","blue","green","white","white"),lwd=c("solid"),bty = "n") } plotFancyXYCurve = function( x,y=NULL, add=FALSE, grid=TRUE, ...){ if (add){ points(x=x, y=y, ...) }else{ plot(x=x, y=y, ...) } lines(spline(x=x, y=y), ...) if(grid) grid(col=gridcol) } plotFancyXYLine = function(x, y=NULL, add=FALSE, grid=TRUE, type="b", ...){ if (add){ points(x=x, y=y, type=type, ...) }else{ plot(x=x, y=y, type=type, ...) } if(grid) grid(col=gridcol) } ## fancy bin boxplot ## bin_width is relative to the exp_duration plotFancyBoxplot=function (expDuration, binWidth, resptime,limY=FALSE,main="",xlab="",ylab=""){ expDurationBins=floor(expDuration/binWidth)*binWidth+binWidth df=data.frame(expDurationBins,resptime) attach(df) rm(expDurationBins) avgs=unique(ave(resptime,expDurationBins)) boxplots=boxplot(resptime~expDurationBins,df,log="",plot=FALSE) # dirty! need this only for upper hinge if(limY){ boxplots=boxplot(resptime~expDurationBins,df,border="black",medcol="blue",log="", plot=TRUE,main=main,xlab=xlab,ylab=ylab#,outline=FALSE#,range=0 ,ylim=c(min(resptime),max(c(boxplots$stats[4,],max(avgs)))) ) grid(col=gridcol) }else{ boxplots=boxplot(resptime~expDurationBins,df,border="black",medcol="blue",log="", plot=TRUE,main=main,xlab=xlab,ylab=ylab,outline=TRUE) grid(col=gridcol) } medians=boxplots$stats[3,] lines(avgs,lty="solid", col="red", lwd=1.5) lines(medians,lty="solid", col="blue", lwd=1.5) legend("topleft",c("Mean","Median"),lty=c("solid","solid"),col=c("red","blue"),box.lty=0) } ## fancy scatter plot scatter.smooth.lcol=function (x, y = NULL, span = 2/3, degree = 1, family = c("symmetric", "gaussian"), xlab = NULL, ylab = NULL, ylim = range(y, prediction$y, na.rm = TRUE), evaluation = 50, lcol="black", ...) { xlabel <- if (!missing(x)) deparse(substitute(x)) ylabel <- if (!missing(y)) deparse(substitute(y)) xy <- xy.coords(x, y, xlabel, ylabel) x <- xy$x y <- xy$y xlab <- if (is.null(xlab)) xy$xlab else xlab ylab <- if (is.null(ylab)) xy$ylab else ylab prediction <- loess.smooth(x, y, span, degree, family, evaluation) plot(x, y, ylim = ylim, xlab = xlab, ylab = ylab, ...) lines(prediction,col=lcol,lwd=1.5) invisible() } plotFancyScatterplot=function (expDuration, resptime, main="", xlab="", ylab=""){ scatter.smooth.lcol(expDuration,resptime,main=main,xlab=paste(xlab," N=",length(resptime),sep=""),ylab=ylab,family="gaussian", lcol="red") grid(col=gridcol) legend("topleft",c("Local regression"),lty=c("solid"),col="red", box.lty=0) } ## fancy throughput plot plotFancyThroughputplot=function (expDuration, binWidth, reqList, main="", xlab="", ylab=""){ expDurationBins=floor((expDuration)/binWidth)*binWidth+binWidth df=data.frame(expDurationBins, reqList) bin_list = split(df,df["expDurationBins"]) frequencies=as.vector(sapply(bin_list,nrow)) times=as.integer(names(bin_list)) plot(times,frequencies,type="h",main=main,xlab=xlab,ylab=ylab,xaxt="n",col="gray") grid(col=gridcol) lines(times,frequencies) axis(1, at=seq(min(times),max(times),binWidth)) } ## transforms a 'session table' into an 'active session table' ## session_table must contain the fields 'tin' and 'tout' ## the result will contain the field 'event_list' and 'active_sessions' ## if a t_i occurs multiple times, the maximum number of active sessions ## for t is set. #session_table=data.frame(tin=round(rlnorm(10)*20), tout=round(rlnorm(10)*50)+150) sessionTable2activeSessionsTable=function (session_table){ event_table=sort.data.frame(rbind.data.frame(data.frame(event_list=session_table$tin,active_sessions=1), data.frame(event_list=session_table$tout,active_sessions=-1) ), key="event_list") t_list=unique(event_table$event_list) num_list=rep(0,length(t_list)) # n[1] = 1 # cur_t = event_table$event_list[1] # cur_t_idx=1 # invalid but will be incremented in the first iteration # for (i in 2:length(event_table$active_sessions)){ # event_table$active_sessions[1]=1 # if (event_table$event_list[i] > cur_t) { # cur_t=event_table$event_list[i] # cur_t_idx=cur_t_idx+1 # } # event_table$active_sessions[i]=event_table$active_sessions[i-1]+event_table$active_sessions[i] # if(event_table$active_sessions[i] > n[cur_t_idx]) n[cur_t_idx] = event_table$active_sessions[i] # } ret = .C("convert_event_table", event_list_t=as.double(event_table$event_list), event_list_action=as.integer(event_table$active_sessions), n=nrow(event_table), t_list=as.double(t_list), num_list=as.integer(num_list), m=length(num_list)) rm(event_table); rm(t_list); rm(num_list) data.frame(event_list=ret$t_list, active_sessions=ret$num_list) # This is the old and stupid way of doing it: #attach(session_table) #event_list=sort(unique(c(tin,tout))) #active_sessions=sapply(event_list,function(x) nrow(subset(session_table,x>=tin&x<tout))) #active_sessions_table=data.frame(event_list,active_sessions) #rm(event_list) #rm(active_sessions) #active_sessions_table } ## fancy active sessions plot (lines) plotFancyActiveSessionsLineplot=function (active_sessions_table, main="", xlab="", ylab=""){ attach(active_sessions_table) plot(event_list, active_sessions,type="s", xaxt="n", main=main, xlab=xlab, ylab=ylab) grid(col=gridcol) axis(1, at=seq(floor(min(event_list)),ceiling(max(event_list)))) detach(active_sessions_table) } ## fancy active sessions plot (scatter with smooth regression line) ## TODO: probably this function has a bad performance since we draw a scatter plot with white ## points ;-) ## It should be sufficient to only plot the regression line with lines(loess.smooth(...)) ## but we get an error concerning xlim and ylim plotFancyActiveSessionsRegressionplot=function (active_sessions_table, main="", xlab="", ylab=""){ attach(active_sessions_table) scatter.smooth(event_list, active_sessions,xaxt="n", main=main, xlab=xlab, ylab=ylab,col="white",family="gaussian") grid(col=gridcol) axis(1, at=seq(floor(min(event_list)),max(ceiling(event_list)))) legend("topleft",c("Local regression"),lty=c("solid"),box.lty=0) detach(active_sessions_table) } ## execute maximum likely hood optimization for 3-parameter log-normal distribution parameters ## returns a data frame row with columns meanlog, sdlog, shift fitL3norm = function(data){ data.unique.sort=sort(unique(data)) # data.shiftEst=data.unique.sort[1]-(data.unique.sort[2]-data.unique.sort[1]) data.shiftEst=data.unique.sort[1]-(data.unique.sort[10]-data.unique.sort[1]) data.shiftedlog=log(data-data.shiftEst) data.shiftedlog.mean=mean(data.shiftedlog) data.shiftedlog.sd=sd(data.shiftedlog) rm(data.shiftedlog) rm(data.unique.sort) fittedL3norm=fitdistr(data, dl3norm, start=list(meanlog=data.shiftedlog.mean, sdlog=data.shiftedlog.sd, shift=data.shiftEst), verbose=FALSE # , method=c("BFGS") # ,lower(0,0.001,-Inf), upper(Inf,Inf,min(data)) ) #"BFGS": most results but sometimes quits with non-finite finite-difference value [3] # "L-BFGS-B": often non-finite finite-difference value [3] #"SANN" : very slow! #"CG": doesn't find much fittedL3norm } ## Aitchison1957 6.22 Cohen's Least Sample Value Method p. 56 (t estimated by quantile ... ) fitL3normCohenSave = function(data){ data.min=min(data) data.min.shifted=data-data.min data.min.shifted.log=log(data.min.shifted+0.05) data.min.log.mean=mean(data.min.shifted.log) data.min.log.sd=sd(data.min.shifted.log) data.min.shifted.pmin=ecdf(data.min.shifted)(0) data.shiftEst=data.min-exp(data.min.log.mean+qnorm(data.min.shifted.pmin)*data.min.log.sd) data.min.shifted=data-data.shiftEst # ;-) shouldn't we use data.min.shifted in the following? data.min.log.mean=mean(data.min.shifted.log) data.min.log.sd=sd(data.min.shifted.log) tryCatch( fitdistr(data, dl3norm, start=list(meanlog=data.min.log.mean, sdlog=data.min.log.sd, shift=data.shiftEst)), error= function (e) { list(estimate=data.frame(meanlog=data.min.log.mean, sdlog=data.min.log.sd, shift=data.shiftEst), sd=data.frame(meanlog=NA, sdlog=NA, shift=NA)) } ) } plotDistributionFittings=function(data,l3norm.approx,lnorm.approx,norm.approx){ l3norm.approx.estimate=l3norm.approx[["estimate"]] lnorm.approx.estimate=lnorm.approx[["estimate"]] norm.approx.estimate=norm.approx[["estimate"]] ## sample data against 3-parameter log-normal distribution with optimized parameters if(!any(is.na(as.vector(unlist(l3norm.approx.estimate))))){ plotFancyDensity(data=data, main=paste("Density Plot of Response Times and 3-Parameter Log-Normal Distribution Model", sep=""), xlab="Response time (milliseconds)",col="darkgray") curve(dl3norm(x, l3norm.approx.estimate[["meanlog"]], l3norm.approx.estimate[["sdlog"]], l3norm.approx.estimate[["shift"]]),add=TRUE) qqplot(rl3norm(length(data),meanlog=l3norm.approx.estimate[["meanlog"]], sdlog=l3norm.approx.estimate[["sdlog"]], shift=l3norm.approx.estimate[["shift"]]), data, main="QQ Plot of Sample Data and 3-Parameter Log-Normal Distribution", ylab="Sample response time (ms)", xlab=substitute(Lambda*"("*tau*"="*shift*", "*mu*"="*meanlog*", "*sigma*"="*sdlog*")", list(shift=round(l3norm.approx.estimate[["shift"]],digits=3), meanlog=round(l3norm.approx.estimate[["meanlog"]],digits=3), sdlog=round(l3norm.approx.estimate[["sdlog"]],digits=3))) ); grid(col=gridcol); abline(0,1) } ## sample data against 2-parameter log-normal distribution plotFancyDensity(data=data, main=paste("Density Plot of Response Times and 2-Parameter Log-Normal Distribution Model ", sep=""), xlab="Response time (milliseconds)",col="darkgray") curve(dlnorm(x, lnorm.approx.estimate[["meanlog"]], lnorm.approx.estimate[["sdlog"]]),add=TRUE) qqplot(rlnorm(length(data),meanlog=lnorm.approx.estimate[["meanlog"]], sdlog=lnorm.approx.estimate[["sdlog"]]), data, main="QQ Plot of Sample Data and 2-Parameter Log-Normal Distribution", ylab="Sample response time (ms)", xlab=substitute(Lambda*"("*mu*"="*meanlog*", "*sigma*"="*sdlog*")", list(meanlog=round(lnorm.approx.estimate[["meanlog"]],digits=3), sdlog=round(lnorm.approx.estimate[["sdlog"]],digits=3))) ); grid(col=gridcol); abline(0,1) ## sample data against normal distribution with sample mean and sd plotFancyDensity(data=data, main=paste("Density Plot of Response Times ", "and Normal Distribution Model (Sample Mean)", sep=""), xlab="Response time (milliseconds)",col="darkgray") curve(dnorm(x, norm.approx.estimate[["mean"]], norm.approx.estimate[["sd"]]),add=TRUE) qqplot(rnorm(length(data), norm.approx.estimate[["mean"]], norm.approx.estimate[["sd"]]), data, main="QQ Plot of Sample Data and Normal Distribution (Sample mean)", ylab="Sample response time (ms)", xlab=substitute(N*"("*mu*"="*mean*", "*sigma*"="*sd*")", list(mean=round(norm.approx.estimate[["mean"]],digits=3), sd=round(norm.approx.estimate[["sd"]],digits=3))) ); grid(col=gridcol); abline(0,1) } ## we should optimize this one ;-) ## Usage Example: ## ecdf_unserialized=unserialize(hexStr2RawVect(ecdf_hex)) ## density_unserialized=unserialize(hexStr2RawVect(density_hex)) hexStr2RawVect = function (str){ as.raw(paste("0x",unlist(strsplit(gsub("[[:space:]]$","",gsub("([[:xdigit:]][[:xdigit:]])","\\1 ",str,extended=TRUE))," ")),sep="")) } opStatsToTable = function(experimenttable,experimentid, expinterval, n_threads, n_traces, workload, operation, q_removed, n_removed, routlier_info, CI,basicStats,approxMode,l3norm.approx, lnorm.approx, l3norm.skTestResult, lnorm.skTestResult, norm.skTestResult, data.density, data.ecdf){ l3norm.approx.estimation=l3norm.approx[["estimate"]] l3norm.approx.sd=l3norm.approx[["sd"]] lnorm.approx.estimation=lnorm.approx[["estimate"]] lnorm.approx.sd=lnorm.approx[["sd"]] if (is.na(l3norm.approx.estimation[["meanlog"]]) || is.na(l3norm.approx.estimation[["sdlog"]]) || is.na(l3norm.approx.estimation[["shift"]])) l3norm.approx.estimation=data.frame(meanlog="null", sdlog="null", shift="null") if (is.na(l3norm.approx.sd[["meanlog"]]) || is.na(l3norm.approx.sd[["sdlog"]]) || is.na(l3norm.approx.sd[["shift"]])) l3norm.approx.sd=data.frame(meanlog="null", sdlog="null", shift="null") if (is.na(lnorm.approx.estimation[["meanlog"]]) || is.na(lnorm.approx.estimation[["sdlog"]])) lnorm.approx.estimation=data.frame(meanlog="null", sdlog="null", shift="null") if (is.na(lnorm.approx.sd[["meanlog"]]) || is.na(lnorm.approx.sd[["sdlog"]])) lnorm.approx.sd=data.frame(meanlog="null", sdlog="null", shift="null") if (is.na(l3norm.skTestResult[["D"]]) || is.na(l3norm.skTestResult[["D"]])) l3norm.skTestResult=data.frame(D="null", p.value="null") if (is.na(lnorm.skTestResult[["D"]]) || is.na(lnorm.skTestResult[["D"]])) lnorm.skTestResult=data.frame(D="null", p.value="null") if (is.na(norm.skTestResult[["D"]]) || is.na(norm.skTestResult[["D"]])) norm.skTestResult=data.frame(D="null", p.value="null") query=paste( "INSERT INTO ", opstats_table ," (", "`date`,", "`experimenttable`,", "`experimentid`,", "`startMin`,", "`stopMin`,", "`workload`,", "`threads`,", "`throughputMin`,", "`operation`,", "`n`,", "`q_removed`,", "`n_removed`,", "`rnoutliers_q`,", "`rnoutliers_n`,", "`rnoutliers_r`,", "`rxoutliers_q`,", "`rxoutliers_n`,", "`rxoutliers_r`,", "`CI`,", "`Variance`,", "`Stddev`,", "`Skewness`,", "`Kurtosis`,", "`min`,", "`1st Quartile`,", "`Mode`,", "`Median`,", "`LCL Mean`,", "`Mean`,", "`SE Mean`,", "`UCL Mean`,", "`3rd Quartile`,", "`max`,", "`l3norm meanLog`,", "`SE l3norm meanLog`,", "`l3norm sdLog`,", "`SE l3norm sdLog`,", "`l3norm shift`,", "`SE l3norm shift`,", "`lnorm meanLog`,", "`SE lnorm meanLog`,", "`lnorm sdLog`,", "`SE lnorm sdLog`,", "`l3norm S-K D`,", "`l3norm S-K p`,", "`lnorm S-K D`,", "`lnorm S-K p`,", "`norm S-K D`,", "`norm S-K p`,", "`ecdf_raw`,", "`density_raw`", ")", " VALUES (", "NOW()",",", "\"",experimenttable,"\"",",", experimentid,",", expinterval[1],",", expinterval[2],",", workload,",", n_threads,",", n_traces/(expinterval[2]-expinterval[1]),",", "\"",operation,"\",", basicStats["nobs",],",", q_removed,",", n_removed,",", routlier_info["q_normal"],",", routlier_info["n_normal"],",", routlier_info["ratio_normal"],",", routlier_info["q_extreme"],",", routlier_info["n_extreme"],",", routlier_info["ratio_extreme"],",", CI,",", basicStats["Variance",],",", basicStats["Stdev",],",", basicStats["Skewness",],",", basicStats["Kurtosis",],",", basicStats["Minimum",],",", basicStats["1. Quartile",],",", approxMode,",", basicStats["Median",],",", basicStats["LCL Mean",],",", basicStats["Mean",],",", basicStats["SE Mean",],",", basicStats["UCL Mean",],",", basicStats["3. Quartile",],",", basicStats["Maximum",],",", l3norm.approx.estimation[["meanlog"]],",", l3norm.approx.sd[["meanlog"]],",", l3norm.approx.estimation[["sdlog"]],",", l3norm.approx.sd[["sdlog"]],",", l3norm.approx.estimation[["shift"]],",", l3norm.approx.sd[["sdlog"]],",", lnorm.approx.estimation[["meanlog"]],",", lnorm.approx.sd[["meanlog"]],",", lnorm.approx.estimation[["sdlog"]],",", lnorm.approx.sd[["sdlog"]],",", l3norm.skTestResult[["D"]],",", l3norm.skTestResult[["p.value"]],",", lnorm.skTestResult[["D"]],",", lnorm.skTestResult[["p.value"]],",", norm.skTestResult[["D"]],",", norm.skTestResult[["p.value"]],",", "0x",paste(serialize(data.ecdf,connection=NULL),collapse=""),",", "0x",paste(serialize(data.density,connection=NULL),collapse=""),"", ")", sep="" ) # print.noquote(query) con=dbConnect(m, group="performanceData") dbSendQuery(con,query) dbDisconnect(con) } ## prints stats to plot opStatsToPlot = function(operation, CI, basicStats, l3norm.approx, lnorm.approx, l3norm.skTestResult, lnorm.skTestResult, norm.skTestResult){ basicStats.stats=row.names(basicStats) basicStats.numStats=nrow(basicStats) plot(0,0,type="n",xlab="", ylab="", main=paste("Basic Statistics (CI=",CI,") ",operation), xaxt = "n", yaxt="n",xlim=c(0,6),ylim=c(basicStats.numStats,0)) for (i in 1:basicStats.numStats){ text(0,i, basicStats.stats[i], adj=c(0,0)) text(1,i, basicStats[basicStats.stats[i],], adj=c(0,0)) } text(2,1, "l3norm Approximation:", adj=c(0,0)); text(2,2, "shift", adj=c(0,0)); text(3,2, round(l3norm.approx[["estimate"]][["shift"]],digits=4), adj=c(0,0)) text(2,3, "meanlog", adj=c(0,0)); text(3,3, round(l3norm.approx[["estimate"]][["meanlog"]],digits=4), adj=c(0,0)) text(2,4, "sdlog", adj=c(0,0)); text(3,4, round(l3norm.approx[["estimate"]][["sdlog"]],digits=4), adj=c(0,0)) text(2,6, "lnorm Approximation:", adj=c(0,0)); text(2,7, "meanlog", adj=c(0,0)); text(3,7, round(lnorm.approx[["estimate"]][["meanlog"]],digits=4), adj=c(0,0)) text(2,8, "sdlog", adj=c(0,0)); text(3,8, round(lnorm.approx[["estimate"]][["sdlog"]],digits=4), adj=c(0,0)) text(4,1, "S-K Test Results:", adj=c(0,0)); text(4,2, "l3norm D", adj=c(0,0)); text(5,2, round(l3norm.skTestResult[["D"]],digits=4), adj=c(0,0)) text(4,3, "l3norm p-value", adj=c(0,0)); text(5,3, round(l3norm.skTestResult[["p.value"]],digits=4), adj=c(0,0)) text(4,4, "lnorm D", adj=c(0,0)); text(5,4, round(lnorm.skTestResult[["D"]],digits=4), adj=c(0,0)) text(4,5, "lnorm p-value", adj=c(0,0)); text(5,5, round(lnorm.skTestResult[["p.value"]],digits=4), adj=c(0,0)) text(4,6, "norm D", adj=c(0,0)); text(5,6, round(norm.skTestResult[["D"]],digits=4), adj=c(0,0)) text(4,7, "norm p-value", adj=c(0,0)); text(5,7, round(norm.skTestResult[["p.value"]],digits=4), adj=c(0,0)) } l3normSKTest = function (data, params){ ksTestRes=ks.test(respMs,"pl3norm",shift=params[["shift"]], meanlog=params[["meanlog"]], sdlog=params[["sdlog"]]) data.frame(D=ksTestRes[["statistic"]],p.value=ksTestRes[["p.value"]]) } lnormSKTest = function (data, params){ ksTestRes=ks.test(respMs,"plnorm",meanlog=params[["meanlog"]], sdlog=params[["sdlog"]]) data.frame(D=ksTestRes[["statistic"]],p.value=ksTestRes[["p.value"]]) } normSKTest = function (data, params){ ksTestRes=ks.test(respMs,"pnorm",mean=params[["mean"]], sd=params[["sd"]]) data.frame(D=ksTestRes[["statistic"]],p.value=ksTestRes[["p.value"]]) } ## tests ## # generate random grouped data #A=sort(rep(seq(0,39,by=1), 1)) #B=sapply(A,sqrt)+A #C=B*abs(rnorm(40)) #Z=data.frame(A,C) #names(Z)=c("min","resp") #plotFancyBoxplot(expDuration=Z[["min"]], binWidth=5, resptime=Z[["resp"]], # main="Box-and-Whisker Plot of Experiment Response Times",limY=TRUE, # xlab="Experiment time (minutes)",ylab="Response time (ms)") #plotFancyThroughputplot(expDuration=Z[["min"]], binWidth=1, reqList=Z[["resp"]], # main="Throughput", # xlab="Experiment time (minutes)",ylab="Requests") ## test active sessions #session_id=letters[1:10] #tin=round((rlnorm(n=10)*10))%%10+1 #tout=round((rlnorm(n=10)*10))%%10+11 #session_table=data.frame(session_id,tin,tout) #plotFancyActiveSessions(session_table, main="Active sessions", # xlab="Experiment time (minutes)", # ylab="Sessions") ## test plotDistributionFittings #require(MASS) #source("../R/l3norm.R") #data=rl3norm(n=1000) #gridcol="darkgray" #par(mfrow=c(3,2)) #plotDistributionFittings(data)
/r-scripts/util.R
permissive
SLAsticSPE/slastic
R
false
false
28,265
r
require(MASS) require(fBasics) dyn.load("bin/r-scripts/utils_C.so") #symbol.C("convert_event_table") ## mysql utils mysqlSetTimeVariables=function(con, table, experimentID, expIntervalMin){ dbGetQuery(con,"SET @expStartTimeNs=0"); dbGetQuery(con,"SET @expIntervalStartNs=0"); dbGetQuery(con,"SET @expIntervalStopNs=0"); query=paste("SELECT @expStartTimeNs := MIN(tin) FROM ", table, " WHERE experimentID=",experimentID, " AND operation LIKE \"%ActionServlet%\"", sep="") dbGetQuery(con,query) min2ns=60*1000*1000*1000 query=paste("SELECT @expIntervalStartNs:=@expStartTimeNs+", expIntervalMin[1], "*", min2ns,sep="") dbGetQuery(con,query) query=paste("SELECT @expIntervalStopNs:=@expStartTimeNs+", expIntervalMin[2], "*", min2ns, sep="") dbGetQuery(con,query) rm(query) } ## ## sort.data.frame <- function(x, key, ...) { if (missing(key)) { rn <- rownames(x) if (all(rn %in% 1:nrow(x))) rn <- as.numeric(rn) x[order(rn, ...), , drop=FALSE] } else { x[do.call("order", c(x[key], ...)), , drop=FALSE] } } ## ## removes normal and extreme outliers but at most ## a given ratio in [0..1] ## expects field respMs remove_outliers_right = function (df, max_ratio=max_outlier_ratio){ df.n=nrow(df) max_n_outliers=floor(df.n*max_ratio) iqr = IQR(df$respMs) q3 = quantile(df$respMs, 0.75) q_max_ratio = quantile(df$respMs, (df.n-max_n_outliers)/df.n) q_normal = q3 + (1.5*iqr) q_extreme = q3 + (3*iqr) subset(df, respMs<=max(q_max_ratio, q_normal)) } ## returns data frame with fields outlier_info_right = function (data) { iqr = IQR(data) q3 = quantile(data, 0.75) q_normal = q3 + (1.5*iqr) q_extreme = q3 + (3*iqr) n_outliers=length(data[data>q_normal]) n_extreme=length(data[data>q_extreme]) n_normal=n_outliers-n_extreme n = length(data) data.frame( iqr, n_total=n_outliers, ratio_total=n_outliers/n, q_normal, n_normal, ratio_normal=n_normal/n, q_extreme, n_extreme, ratio_extreme=n_extreme/n) } ## returns data frame with $idx, $val runmean.steps = function (x, window_size, by, ...) { runningmean = runmean(x, k=window_size, endrule="trim", ...) num_steps=floor(length(runningmean)/by) idx=seq(from=1, to=by*num_steps, by=by) res=data.frame(idx=idx, val=runningmean[idx]) rm(runningmean) res } ## fancy density plot plot pearsonModeSkewness <- function(x,dataMean,mode) { result = (dataMean - mode)/sd(x) return(result) } theSkewness <- function(data,dataMean) { return(centralMoment(data,dataMean,3)/(centralMoment(data,dataMean,2)^(3/2))) } centralMoment <- function(data,dataMean,i) { mean((data-dataMean)^i) } plotFancyDensityMultBw=function(data, main="", xlab="", ylab="") { dens=density(data,bw="nrd0") xlab=paste(xlab," N=", dens$n,sep="") plot(dens,main=main,xlab=xlab,col=1); grid(col=gridcol) rug(data) lines(density(data, bw="nrd"), col = 2) lines(density(data, bw="ucv"), col = 3) lines(density(data, bw="bcv"), col = 4) #lines(density(entry[["respMs"]], bw="SJ-ste"), col = 5) lines(density(data, bw="SJ-dpi"), col = 5) legend("topright", legend = c("bw:nrd0", "bw:nrd", "bw:ucv", "bw:bcv", "bw:SJ-ste", "bw:SJ-dpi", "log-normal"), col = 1:6, lty = 1,box.lty=0) } plotFancyDensity=function(data, main="", xlab="", ylab="", ...) { dens=density(data,n=1024) xlab=paste(xlab," N=", dens$n, ", Bandwidth=",format(mean(dens$bw),digits=4),sep="") dataMean=mean(data) dataMeanY=approx(dens$x,dens$y,xout=dataMean)$y[1] dataMedian=median(data) dataMedianY=approx(dens$x,dens$y,xout=dataMedian)$y[1] densModeY=max(dens$y) densMode=approx(dens$y,dens$x,xout=densModeY)$y[1] moskewness = skewness(data) #theSkewness(data,dataMean); plot(dens,main=main,xlab=xlab, ...); grid(col=gridcol) rug(data) points(dataMean,dataMeanY,pch=3,lwd=1,type="h",lty="dashed",col="red"); points(dataMean,dataMeanY,pch=1,lwd=2,col="red"); points(dataMedian,dataMedianY,pch=4,lwd=1,type="h",lty="dashed",col="blue"); points(dataMedian,dataMedianY,pch=2,lwd=2,col="blue"); points(densMode,densModeY,pch=5,lwd=1,type="h",lty="dashed",col="darkgreen"); points(densMode,densModeY,pch=3,lwd=2,col="darkgreen"); t1 = paste("Mean ",format(dataMean,digits=4)) t2 = paste("Median ",format(dataMedian,digits=4)) t3 = paste("Approx. Mode ",format(densMode,digits=4)) t4 = paste("Skewness ",format(moskewness,digits=4)) #t5 = paste("Pearson Mode Skewness ",format(pearsonModeSkewness(data,dataMean,densMode),digits=4)) t5 = paste("Kurtosis ",format(kurtosis(data),digits=4)) if (moskewness < 0) { legendpos = "topleft"; } else { legendpos = "topright"; } legend(legendpos,c(t1,t2,t3,t4,t5),pch=c(1,2,3,0,0),col=c("red","blue","darkgreen","white","white"),bty = "n") } plotFancyDensityGiven=function(dens, minimum, maximum, dataQ1, dataQ3, dataMean, dataMedian, densMode, skewness, kurt, main="", xlab="", ylab="", ...) { xlab=paste(xlab," N=", dens$n, ", Bandwidth=",format(mean(dens$bw),digits=4),sep="") # dataMeanY=approx(dens$x,dens$y,xout=dataMean)$y[1] # dataQ1Y=approx(dens$x,dens$y,xout=dataQ1)$y[1] # dataMedianY=approx(dens$x,dens$y,xout=dataMedian)$y[1] # dataQ3Y=approx(dens$x,dens$y,xout=dataQ3)$y[1] densModeY=approx(dens$x,dens$y,xout=densMode)$y[1] plot(dens,main=main,xlab=xlab, lwd=1.5); grid(col=gridcol) abline(v=minimum, lwd=1,lty="dashed",col="black") abline(v=maximum, lwd=1,lty="dashed",col="black") abline(v=dataMean, lwd=1,lty="dashed",col="red") abline(v=dataQ1, lwd=1,lty="dashed",col="blue") abline(v=dataMedian, lwd=1,lty="dashed",col="blue") abline(v=dataQ3, lwd=1,lty="dashed",col="blue") points(densMode,densModeY, lwd=1,type="h",lty="dashed",col="green"); points(densMode,densModeY,pch=1,lwd=2,col="green"); # points(dataMean,dataMeanY,pch=3,lwd=1.5,type="h",lty="dashed",col="red"); # points(dataMean,dataMeanY,pch=1,lwd=2,col="red"); # points(dataQ1,dataQ1Y,pch=4,lwd=1.5,type="h",lty="dashed",col="blue"); # points(dataQ1,dataQ1Y,pch=1,lwd=2,col="blue"); # points(dataMedian,dataMedianY,pch=4,lwd=1.5,type="h",lty="dashed",col="blue"); # points(dataMedian,dataMedianY,pch=1,lwd=2,col="blue"); # points(dataQ3,dataQ3Y,pch=4,lwd=1.5,type="h",lty="dashed",col="blue"); # points(dataQ3,dataQ3Y,pch=1,lwd=2,col="blue"); # points(densMode,densModeY,pch=5,lwd=1.5,type="h",lty="dashed",col="green"); # points(densMode,densModeY,pch=1,lwd=2,col="green"); t0 = paste("min, max (", format(minimum,digits=4), ", ", format(maximum,digits=4), ")", sep="") t1 = paste("mean (",format(dataMean,digits=4), ")", sep="") t2 = paste("quartiles (",format(dataQ1,digits=4),", ",format(dataMedian,digits=4), ", ", format(dataQ3,digits=4), ")", sep="") t3 = paste("approx. mode (",format(densMode,digits=4), ")", sep="") t4 = paste("skewness (",format(skewness,digits=4), ")", sep="") #t5 = paste("Pearson Mode Skewness ",format(pearsonModeSkewness(data,dataMean,densMode),digits=4)) t5 = paste("kurtosis (",format(kurt,digits=4), ")", sep="") if (skewness < 0) { legendpos = "topleft"; } else { legendpos = "topright"; } legend(legendpos,c(t0,t1,t2,t3,t4,t5),pch=c(NA,NA,NA,NA,1,NA,NA),col=c("black","red","blue","green","white","white"),lwd=c("solid"),bty = "n") } plotFancyXYCurve = function( x,y=NULL, add=FALSE, grid=TRUE, ...){ if (add){ points(x=x, y=y, ...) }else{ plot(x=x, y=y, ...) } lines(spline(x=x, y=y), ...) if(grid) grid(col=gridcol) } plotFancyXYLine = function(x, y=NULL, add=FALSE, grid=TRUE, type="b", ...){ if (add){ points(x=x, y=y, type=type, ...) }else{ plot(x=x, y=y, type=type, ...) } if(grid) grid(col=gridcol) } ## fancy bin boxplot ## bin_width is relative to the exp_duration plotFancyBoxplot=function (expDuration, binWidth, resptime,limY=FALSE,main="",xlab="",ylab=""){ expDurationBins=floor(expDuration/binWidth)*binWidth+binWidth df=data.frame(expDurationBins,resptime) attach(df) rm(expDurationBins) avgs=unique(ave(resptime,expDurationBins)) boxplots=boxplot(resptime~expDurationBins,df,log="",plot=FALSE) # dirty! need this only for upper hinge if(limY){ boxplots=boxplot(resptime~expDurationBins,df,border="black",medcol="blue",log="", plot=TRUE,main=main,xlab=xlab,ylab=ylab#,outline=FALSE#,range=0 ,ylim=c(min(resptime),max(c(boxplots$stats[4,],max(avgs)))) ) grid(col=gridcol) }else{ boxplots=boxplot(resptime~expDurationBins,df,border="black",medcol="blue",log="", plot=TRUE,main=main,xlab=xlab,ylab=ylab,outline=TRUE) grid(col=gridcol) } medians=boxplots$stats[3,] lines(avgs,lty="solid", col="red", lwd=1.5) lines(medians,lty="solid", col="blue", lwd=1.5) legend("topleft",c("Mean","Median"),lty=c("solid","solid"),col=c("red","blue"),box.lty=0) } ## fancy scatter plot scatter.smooth.lcol=function (x, y = NULL, span = 2/3, degree = 1, family = c("symmetric", "gaussian"), xlab = NULL, ylab = NULL, ylim = range(y, prediction$y, na.rm = TRUE), evaluation = 50, lcol="black", ...) { xlabel <- if (!missing(x)) deparse(substitute(x)) ylabel <- if (!missing(y)) deparse(substitute(y)) xy <- xy.coords(x, y, xlabel, ylabel) x <- xy$x y <- xy$y xlab <- if (is.null(xlab)) xy$xlab else xlab ylab <- if (is.null(ylab)) xy$ylab else ylab prediction <- loess.smooth(x, y, span, degree, family, evaluation) plot(x, y, ylim = ylim, xlab = xlab, ylab = ylab, ...) lines(prediction,col=lcol,lwd=1.5) invisible() } plotFancyScatterplot=function (expDuration, resptime, main="", xlab="", ylab=""){ scatter.smooth.lcol(expDuration,resptime,main=main,xlab=paste(xlab," N=",length(resptime),sep=""),ylab=ylab,family="gaussian", lcol="red") grid(col=gridcol) legend("topleft",c("Local regression"),lty=c("solid"),col="red", box.lty=0) } ## fancy throughput plot plotFancyThroughputplot=function (expDuration, binWidth, reqList, main="", xlab="", ylab=""){ expDurationBins=floor((expDuration)/binWidth)*binWidth+binWidth df=data.frame(expDurationBins, reqList) bin_list = split(df,df["expDurationBins"]) frequencies=as.vector(sapply(bin_list,nrow)) times=as.integer(names(bin_list)) plot(times,frequencies,type="h",main=main,xlab=xlab,ylab=ylab,xaxt="n",col="gray") grid(col=gridcol) lines(times,frequencies) axis(1, at=seq(min(times),max(times),binWidth)) } ## transforms a 'session table' into an 'active session table' ## session_table must contain the fields 'tin' and 'tout' ## the result will contain the field 'event_list' and 'active_sessions' ## if a t_i occurs multiple times, the maximum number of active sessions ## for t is set. #session_table=data.frame(tin=round(rlnorm(10)*20), tout=round(rlnorm(10)*50)+150) sessionTable2activeSessionsTable=function (session_table){ event_table=sort.data.frame(rbind.data.frame(data.frame(event_list=session_table$tin,active_sessions=1), data.frame(event_list=session_table$tout,active_sessions=-1) ), key="event_list") t_list=unique(event_table$event_list) num_list=rep(0,length(t_list)) # n[1] = 1 # cur_t = event_table$event_list[1] # cur_t_idx=1 # invalid but will be incremented in the first iteration # for (i in 2:length(event_table$active_sessions)){ # event_table$active_sessions[1]=1 # if (event_table$event_list[i] > cur_t) { # cur_t=event_table$event_list[i] # cur_t_idx=cur_t_idx+1 # } # event_table$active_sessions[i]=event_table$active_sessions[i-1]+event_table$active_sessions[i] # if(event_table$active_sessions[i] > n[cur_t_idx]) n[cur_t_idx] = event_table$active_sessions[i] # } ret = .C("convert_event_table", event_list_t=as.double(event_table$event_list), event_list_action=as.integer(event_table$active_sessions), n=nrow(event_table), t_list=as.double(t_list), num_list=as.integer(num_list), m=length(num_list)) rm(event_table); rm(t_list); rm(num_list) data.frame(event_list=ret$t_list, active_sessions=ret$num_list) # This is the old and stupid way of doing it: #attach(session_table) #event_list=sort(unique(c(tin,tout))) #active_sessions=sapply(event_list,function(x) nrow(subset(session_table,x>=tin&x<tout))) #active_sessions_table=data.frame(event_list,active_sessions) #rm(event_list) #rm(active_sessions) #active_sessions_table } ## fancy active sessions plot (lines) plotFancyActiveSessionsLineplot=function (active_sessions_table, main="", xlab="", ylab=""){ attach(active_sessions_table) plot(event_list, active_sessions,type="s", xaxt="n", main=main, xlab=xlab, ylab=ylab) grid(col=gridcol) axis(1, at=seq(floor(min(event_list)),ceiling(max(event_list)))) detach(active_sessions_table) } ## fancy active sessions plot (scatter with smooth regression line) ## TODO: probably this function has a bad performance since we draw a scatter plot with white ## points ;-) ## It should be sufficient to only plot the regression line with lines(loess.smooth(...)) ## but we get an error concerning xlim and ylim plotFancyActiveSessionsRegressionplot=function (active_sessions_table, main="", xlab="", ylab=""){ attach(active_sessions_table) scatter.smooth(event_list, active_sessions,xaxt="n", main=main, xlab=xlab, ylab=ylab,col="white",family="gaussian") grid(col=gridcol) axis(1, at=seq(floor(min(event_list)),max(ceiling(event_list)))) legend("topleft",c("Local regression"),lty=c("solid"),box.lty=0) detach(active_sessions_table) } ## execute maximum likely hood optimization for 3-parameter log-normal distribution parameters ## returns a data frame row with columns meanlog, sdlog, shift fitL3norm = function(data){ data.unique.sort=sort(unique(data)) # data.shiftEst=data.unique.sort[1]-(data.unique.sort[2]-data.unique.sort[1]) data.shiftEst=data.unique.sort[1]-(data.unique.sort[10]-data.unique.sort[1]) data.shiftedlog=log(data-data.shiftEst) data.shiftedlog.mean=mean(data.shiftedlog) data.shiftedlog.sd=sd(data.shiftedlog) rm(data.shiftedlog) rm(data.unique.sort) fittedL3norm=fitdistr(data, dl3norm, start=list(meanlog=data.shiftedlog.mean, sdlog=data.shiftedlog.sd, shift=data.shiftEst), verbose=FALSE # , method=c("BFGS") # ,lower(0,0.001,-Inf), upper(Inf,Inf,min(data)) ) #"BFGS": most results but sometimes quits with non-finite finite-difference value [3] # "L-BFGS-B": often non-finite finite-difference value [3] #"SANN" : very slow! #"CG": doesn't find much fittedL3norm } ## Aitchison1957 6.22 Cohen's Least Sample Value Method p. 56 (t estimated by quantile ... ) fitL3normCohenSave = function(data){ data.min=min(data) data.min.shifted=data-data.min data.min.shifted.log=log(data.min.shifted+0.05) data.min.log.mean=mean(data.min.shifted.log) data.min.log.sd=sd(data.min.shifted.log) data.min.shifted.pmin=ecdf(data.min.shifted)(0) data.shiftEst=data.min-exp(data.min.log.mean+qnorm(data.min.shifted.pmin)*data.min.log.sd) data.min.shifted=data-data.shiftEst # ;-) shouldn't we use data.min.shifted in the following? data.min.log.mean=mean(data.min.shifted.log) data.min.log.sd=sd(data.min.shifted.log) tryCatch( fitdistr(data, dl3norm, start=list(meanlog=data.min.log.mean, sdlog=data.min.log.sd, shift=data.shiftEst)), error= function (e) { list(estimate=data.frame(meanlog=data.min.log.mean, sdlog=data.min.log.sd, shift=data.shiftEst), sd=data.frame(meanlog=NA, sdlog=NA, shift=NA)) } ) } plotDistributionFittings=function(data,l3norm.approx,lnorm.approx,norm.approx){ l3norm.approx.estimate=l3norm.approx[["estimate"]] lnorm.approx.estimate=lnorm.approx[["estimate"]] norm.approx.estimate=norm.approx[["estimate"]] ## sample data against 3-parameter log-normal distribution with optimized parameters if(!any(is.na(as.vector(unlist(l3norm.approx.estimate))))){ plotFancyDensity(data=data, main=paste("Density Plot of Response Times and 3-Parameter Log-Normal Distribution Model", sep=""), xlab="Response time (milliseconds)",col="darkgray") curve(dl3norm(x, l3norm.approx.estimate[["meanlog"]], l3norm.approx.estimate[["sdlog"]], l3norm.approx.estimate[["shift"]]),add=TRUE) qqplot(rl3norm(length(data),meanlog=l3norm.approx.estimate[["meanlog"]], sdlog=l3norm.approx.estimate[["sdlog"]], shift=l3norm.approx.estimate[["shift"]]), data, main="QQ Plot of Sample Data and 3-Parameter Log-Normal Distribution", ylab="Sample response time (ms)", xlab=substitute(Lambda*"("*tau*"="*shift*", "*mu*"="*meanlog*", "*sigma*"="*sdlog*")", list(shift=round(l3norm.approx.estimate[["shift"]],digits=3), meanlog=round(l3norm.approx.estimate[["meanlog"]],digits=3), sdlog=round(l3norm.approx.estimate[["sdlog"]],digits=3))) ); grid(col=gridcol); abline(0,1) } ## sample data against 2-parameter log-normal distribution plotFancyDensity(data=data, main=paste("Density Plot of Response Times and 2-Parameter Log-Normal Distribution Model ", sep=""), xlab="Response time (milliseconds)",col="darkgray") curve(dlnorm(x, lnorm.approx.estimate[["meanlog"]], lnorm.approx.estimate[["sdlog"]]),add=TRUE) qqplot(rlnorm(length(data),meanlog=lnorm.approx.estimate[["meanlog"]], sdlog=lnorm.approx.estimate[["sdlog"]]), data, main="QQ Plot of Sample Data and 2-Parameter Log-Normal Distribution", ylab="Sample response time (ms)", xlab=substitute(Lambda*"("*mu*"="*meanlog*", "*sigma*"="*sdlog*")", list(meanlog=round(lnorm.approx.estimate[["meanlog"]],digits=3), sdlog=round(lnorm.approx.estimate[["sdlog"]],digits=3))) ); grid(col=gridcol); abline(0,1) ## sample data against normal distribution with sample mean and sd plotFancyDensity(data=data, main=paste("Density Plot of Response Times ", "and Normal Distribution Model (Sample Mean)", sep=""), xlab="Response time (milliseconds)",col="darkgray") curve(dnorm(x, norm.approx.estimate[["mean"]], norm.approx.estimate[["sd"]]),add=TRUE) qqplot(rnorm(length(data), norm.approx.estimate[["mean"]], norm.approx.estimate[["sd"]]), data, main="QQ Plot of Sample Data and Normal Distribution (Sample mean)", ylab="Sample response time (ms)", xlab=substitute(N*"("*mu*"="*mean*", "*sigma*"="*sd*")", list(mean=round(norm.approx.estimate[["mean"]],digits=3), sd=round(norm.approx.estimate[["sd"]],digits=3))) ); grid(col=gridcol); abline(0,1) } ## we should optimize this one ;-) ## Usage Example: ## ecdf_unserialized=unserialize(hexStr2RawVect(ecdf_hex)) ## density_unserialized=unserialize(hexStr2RawVect(density_hex)) hexStr2RawVect = function (str){ as.raw(paste("0x",unlist(strsplit(gsub("[[:space:]]$","",gsub("([[:xdigit:]][[:xdigit:]])","\\1 ",str,extended=TRUE))," ")),sep="")) } opStatsToTable = function(experimenttable,experimentid, expinterval, n_threads, n_traces, workload, operation, q_removed, n_removed, routlier_info, CI,basicStats,approxMode,l3norm.approx, lnorm.approx, l3norm.skTestResult, lnorm.skTestResult, norm.skTestResult, data.density, data.ecdf){ l3norm.approx.estimation=l3norm.approx[["estimate"]] l3norm.approx.sd=l3norm.approx[["sd"]] lnorm.approx.estimation=lnorm.approx[["estimate"]] lnorm.approx.sd=lnorm.approx[["sd"]] if (is.na(l3norm.approx.estimation[["meanlog"]]) || is.na(l3norm.approx.estimation[["sdlog"]]) || is.na(l3norm.approx.estimation[["shift"]])) l3norm.approx.estimation=data.frame(meanlog="null", sdlog="null", shift="null") if (is.na(l3norm.approx.sd[["meanlog"]]) || is.na(l3norm.approx.sd[["sdlog"]]) || is.na(l3norm.approx.sd[["shift"]])) l3norm.approx.sd=data.frame(meanlog="null", sdlog="null", shift="null") if (is.na(lnorm.approx.estimation[["meanlog"]]) || is.na(lnorm.approx.estimation[["sdlog"]])) lnorm.approx.estimation=data.frame(meanlog="null", sdlog="null", shift="null") if (is.na(lnorm.approx.sd[["meanlog"]]) || is.na(lnorm.approx.sd[["sdlog"]])) lnorm.approx.sd=data.frame(meanlog="null", sdlog="null", shift="null") if (is.na(l3norm.skTestResult[["D"]]) || is.na(l3norm.skTestResult[["D"]])) l3norm.skTestResult=data.frame(D="null", p.value="null") if (is.na(lnorm.skTestResult[["D"]]) || is.na(lnorm.skTestResult[["D"]])) lnorm.skTestResult=data.frame(D="null", p.value="null") if (is.na(norm.skTestResult[["D"]]) || is.na(norm.skTestResult[["D"]])) norm.skTestResult=data.frame(D="null", p.value="null") query=paste( "INSERT INTO ", opstats_table ," (", "`date`,", "`experimenttable`,", "`experimentid`,", "`startMin`,", "`stopMin`,", "`workload`,", "`threads`,", "`throughputMin`,", "`operation`,", "`n`,", "`q_removed`,", "`n_removed`,", "`rnoutliers_q`,", "`rnoutliers_n`,", "`rnoutliers_r`,", "`rxoutliers_q`,", "`rxoutliers_n`,", "`rxoutliers_r`,", "`CI`,", "`Variance`,", "`Stddev`,", "`Skewness`,", "`Kurtosis`,", "`min`,", "`1st Quartile`,", "`Mode`,", "`Median`,", "`LCL Mean`,", "`Mean`,", "`SE Mean`,", "`UCL Mean`,", "`3rd Quartile`,", "`max`,", "`l3norm meanLog`,", "`SE l3norm meanLog`,", "`l3norm sdLog`,", "`SE l3norm sdLog`,", "`l3norm shift`,", "`SE l3norm shift`,", "`lnorm meanLog`,", "`SE lnorm meanLog`,", "`lnorm sdLog`,", "`SE lnorm sdLog`,", "`l3norm S-K D`,", "`l3norm S-K p`,", "`lnorm S-K D`,", "`lnorm S-K p`,", "`norm S-K D`,", "`norm S-K p`,", "`ecdf_raw`,", "`density_raw`", ")", " VALUES (", "NOW()",",", "\"",experimenttable,"\"",",", experimentid,",", expinterval[1],",", expinterval[2],",", workload,",", n_threads,",", n_traces/(expinterval[2]-expinterval[1]),",", "\"",operation,"\",", basicStats["nobs",],",", q_removed,",", n_removed,",", routlier_info["q_normal"],",", routlier_info["n_normal"],",", routlier_info["ratio_normal"],",", routlier_info["q_extreme"],",", routlier_info["n_extreme"],",", routlier_info["ratio_extreme"],",", CI,",", basicStats["Variance",],",", basicStats["Stdev",],",", basicStats["Skewness",],",", basicStats["Kurtosis",],",", basicStats["Minimum",],",", basicStats["1. Quartile",],",", approxMode,",", basicStats["Median",],",", basicStats["LCL Mean",],",", basicStats["Mean",],",", basicStats["SE Mean",],",", basicStats["UCL Mean",],",", basicStats["3. Quartile",],",", basicStats["Maximum",],",", l3norm.approx.estimation[["meanlog"]],",", l3norm.approx.sd[["meanlog"]],",", l3norm.approx.estimation[["sdlog"]],",", l3norm.approx.sd[["sdlog"]],",", l3norm.approx.estimation[["shift"]],",", l3norm.approx.sd[["sdlog"]],",", lnorm.approx.estimation[["meanlog"]],",", lnorm.approx.sd[["meanlog"]],",", lnorm.approx.estimation[["sdlog"]],",", lnorm.approx.sd[["sdlog"]],",", l3norm.skTestResult[["D"]],",", l3norm.skTestResult[["p.value"]],",", lnorm.skTestResult[["D"]],",", lnorm.skTestResult[["p.value"]],",", norm.skTestResult[["D"]],",", norm.skTestResult[["p.value"]],",", "0x",paste(serialize(data.ecdf,connection=NULL),collapse=""),",", "0x",paste(serialize(data.density,connection=NULL),collapse=""),"", ")", sep="" ) # print.noquote(query) con=dbConnect(m, group="performanceData") dbSendQuery(con,query) dbDisconnect(con) } ## prints stats to plot opStatsToPlot = function(operation, CI, basicStats, l3norm.approx, lnorm.approx, l3norm.skTestResult, lnorm.skTestResult, norm.skTestResult){ basicStats.stats=row.names(basicStats) basicStats.numStats=nrow(basicStats) plot(0,0,type="n",xlab="", ylab="", main=paste("Basic Statistics (CI=",CI,") ",operation), xaxt = "n", yaxt="n",xlim=c(0,6),ylim=c(basicStats.numStats,0)) for (i in 1:basicStats.numStats){ text(0,i, basicStats.stats[i], adj=c(0,0)) text(1,i, basicStats[basicStats.stats[i],], adj=c(0,0)) } text(2,1, "l3norm Approximation:", adj=c(0,0)); text(2,2, "shift", adj=c(0,0)); text(3,2, round(l3norm.approx[["estimate"]][["shift"]],digits=4), adj=c(0,0)) text(2,3, "meanlog", adj=c(0,0)); text(3,3, round(l3norm.approx[["estimate"]][["meanlog"]],digits=4), adj=c(0,0)) text(2,4, "sdlog", adj=c(0,0)); text(3,4, round(l3norm.approx[["estimate"]][["sdlog"]],digits=4), adj=c(0,0)) text(2,6, "lnorm Approximation:", adj=c(0,0)); text(2,7, "meanlog", adj=c(0,0)); text(3,7, round(lnorm.approx[["estimate"]][["meanlog"]],digits=4), adj=c(0,0)) text(2,8, "sdlog", adj=c(0,0)); text(3,8, round(lnorm.approx[["estimate"]][["sdlog"]],digits=4), adj=c(0,0)) text(4,1, "S-K Test Results:", adj=c(0,0)); text(4,2, "l3norm D", adj=c(0,0)); text(5,2, round(l3norm.skTestResult[["D"]],digits=4), adj=c(0,0)) text(4,3, "l3norm p-value", adj=c(0,0)); text(5,3, round(l3norm.skTestResult[["p.value"]],digits=4), adj=c(0,0)) text(4,4, "lnorm D", adj=c(0,0)); text(5,4, round(lnorm.skTestResult[["D"]],digits=4), adj=c(0,0)) text(4,5, "lnorm p-value", adj=c(0,0)); text(5,5, round(lnorm.skTestResult[["p.value"]],digits=4), adj=c(0,0)) text(4,6, "norm D", adj=c(0,0)); text(5,6, round(norm.skTestResult[["D"]],digits=4), adj=c(0,0)) text(4,7, "norm p-value", adj=c(0,0)); text(5,7, round(norm.skTestResult[["p.value"]],digits=4), adj=c(0,0)) } l3normSKTest = function (data, params){ ksTestRes=ks.test(respMs,"pl3norm",shift=params[["shift"]], meanlog=params[["meanlog"]], sdlog=params[["sdlog"]]) data.frame(D=ksTestRes[["statistic"]],p.value=ksTestRes[["p.value"]]) } lnormSKTest = function (data, params){ ksTestRes=ks.test(respMs,"plnorm",meanlog=params[["meanlog"]], sdlog=params[["sdlog"]]) data.frame(D=ksTestRes[["statistic"]],p.value=ksTestRes[["p.value"]]) } normSKTest = function (data, params){ ksTestRes=ks.test(respMs,"pnorm",mean=params[["mean"]], sd=params[["sd"]]) data.frame(D=ksTestRes[["statistic"]],p.value=ksTestRes[["p.value"]]) } ## tests ## # generate random grouped data #A=sort(rep(seq(0,39,by=1), 1)) #B=sapply(A,sqrt)+A #C=B*abs(rnorm(40)) #Z=data.frame(A,C) #names(Z)=c("min","resp") #plotFancyBoxplot(expDuration=Z[["min"]], binWidth=5, resptime=Z[["resp"]], # main="Box-and-Whisker Plot of Experiment Response Times",limY=TRUE, # xlab="Experiment time (minutes)",ylab="Response time (ms)") #plotFancyThroughputplot(expDuration=Z[["min"]], binWidth=1, reqList=Z[["resp"]], # main="Throughput", # xlab="Experiment time (minutes)",ylab="Requests") ## test active sessions #session_id=letters[1:10] #tin=round((rlnorm(n=10)*10))%%10+1 #tout=round((rlnorm(n=10)*10))%%10+11 #session_table=data.frame(session_id,tin,tout) #plotFancyActiveSessions(session_table, main="Active sessions", # xlab="Experiment time (minutes)", # ylab="Sessions") ## test plotDistributionFittings #require(MASS) #source("../R/l3norm.R") #data=rl3norm(n=1000) #gridcol="darkgray" #par(mfrow=c(3,2)) #plotDistributionFittings(data)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/14-interpolate_indicators.R \name{interpolate_indicators} \alias{interpolate_indicators} \title{Function to interpolate indicators from the Liberia Coverage Survey} \usage{ interpolate_indicators( indicator = c("ifaDF", "iycfDF", "mnpDF", "vitDF", "screenDF", "anthroDF", "cmamDF"), county, core.columns = c("spid", "cid", "did", "eid", "motherID", "m2"), coords, hexgrid, idp = 2 ) } \arguments{ \item{indicator}{A character vector of indicator data.frame names} \item{county}{An integer indicating which county to interpolate; 1 for Greater Monrovia; 2 for Grand Bassa} \item{core.columns}{A vector of variable names included in indicator data.frames} \item{coords}{A data.frame containing per enumeration area centroid coordinates} \item{hexgrid}{A SpatialPoints class object containing locations of interpolation} \item{idp}{Inverse distance power. Default is 2.} } \value{ A data.frame as long as `hexgrid` containing interpolated indicator values at each location of `hexgrid` } \description{ Function to interpolate indicators from the Liberia Coverage Survey } \examples{ interpolate_indicators(indicator = "vitDF", county = 1, coords = sampleList.r2[ , c("EFEACODE", "lon", "lat")], hexgrid = gmHexGrid) }
/man/interpolate_indicators.Rd
no_license
validmeasures/liberiaData
R
false
true
1,382
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/14-interpolate_indicators.R \name{interpolate_indicators} \alias{interpolate_indicators} \title{Function to interpolate indicators from the Liberia Coverage Survey} \usage{ interpolate_indicators( indicator = c("ifaDF", "iycfDF", "mnpDF", "vitDF", "screenDF", "anthroDF", "cmamDF"), county, core.columns = c("spid", "cid", "did", "eid", "motherID", "m2"), coords, hexgrid, idp = 2 ) } \arguments{ \item{indicator}{A character vector of indicator data.frame names} \item{county}{An integer indicating which county to interpolate; 1 for Greater Monrovia; 2 for Grand Bassa} \item{core.columns}{A vector of variable names included in indicator data.frames} \item{coords}{A data.frame containing per enumeration area centroid coordinates} \item{hexgrid}{A SpatialPoints class object containing locations of interpolation} \item{idp}{Inverse distance power. Default is 2.} } \value{ A data.frame as long as `hexgrid` containing interpolated indicator values at each location of `hexgrid` } \description{ Function to interpolate indicators from the Liberia Coverage Survey } \examples{ interpolate_indicators(indicator = "vitDF", county = 1, coords = sampleList.r2[ , c("EFEACODE", "lon", "lat")], hexgrid = gmHexGrid) }
library(rgdal) library(zoom) library(maps) tst = readOGR('./S_USA.Activity_HazFuelTrt_LN.shp', layer='S_USA.Activity_HazFuelTrt_LN') ogrListLayers("./sef_lidar.gdb/gdb") topo = readOGR('./Topography.shp', layer='Topography') soil = readOGR('./SSURGO_Soils.shp', layer='SSURGO_Soils') Stand = readOGR('./Stand.shp', layer='Stand') Owners = readOGR('./BasicSurfaceOwnership.shp', layer='BasicSurfaceOwnership') invasive = readOGR('./Current_Invasive_Plants_Inventory.shp', layer='Current_Invasive_Plants_Inventory') fire_occ = readOGR('./Monitoring_Trends_in_Burn_Severity__Fire_Occurrence_Locations.shp', layer='Monitoring_Trends_in_Burn_Severity__Fire_Occurrence_Locations') fire_poly = readOGR('./Monitoring_Trends_in_Burn_Severity__Burned_Area_Boundaries.shp', layer='Monitoring_Trends_in_Burn_Severity__Burned_Area_Boundaries') fire_Rx = readOGR('./FM_RxBurnHistory.shp', layer='FM_RxBurnHistory') par(mfrow=c(1,2)) plot(fire_poly) plot(fire_Rx) llStand = Stand[grep('Longleaf pine', Stand@data$FORESTTYPE), ] pdf('./Stand.pdf') plot(Stand) dev.off() proj4string(Stand) geo_prj = CRS("+proj=longlat +datum=WGS84") Stand_ll = spTransform(Stand, geo_prj) llStand_ll = spTransform(llStand, geo_prj) Owners_ll = spTransform(Owners, geo_prj) topo_ll = spTransform(topo, geo_prj) soil_ll = spTransform(soil, geo_prj) vegplots = read.csv('./CharlestonPlots.csv') head(vegplots) vegplots = SpatialPointsDataFrame(coords = vegplots[ , c('Real.Longitude', 'Real.Latitude')], data=vegplots, coords.nrs = 5:6, proj4string = CRS("+proj=longlat +datum=WGS84")) vegplots$project_num = as.integer(sapply(strsplit(as.character(vegplots$Plot.Code), "-"), function(x)x[1])) llvegplots = vegplots[grep('Pinus palustris', vegplots$commPrimaryScientific), ] llyr = as.numeric(sapply(as.character(llvegplots@data$Date), function(x) strsplit(x, '-')[[1]][3])) table(llyr) pdf('vegplot_map.pdf') data(us.cities) map('county', c('south carolina,charleston', 'south carolina,berkeley')) map.cities(us.cities, country="SC") points(vegplots, col='dodgerblue', pch=19) points(llvegplots, col='green3', pch=19) points(llvegplots[llyr == 1995, ], col='red') legend('bottomright', c('longleaf plot', 'other plot'), col=c('green3', 'dodgerblue'), pch=19, bty='n') dev.off() ## export kmls writeOGR(Stand_ll, "Stand.kml", "Stand", "KML") writeOGR(llStand_ll, "llStand.kml", "Stand", "KML") writeOGR(Owners_ll, "Owners.kml", "Owners", "KML") writeOGR(topo_ll, "topo.kml", "topo", "KML") writeOGR(soil_ll, "soil.kml", "soil", "KML") writeOGR(vegplots, "vegplots.kml", "vegplots", "KML") writeOGR(llvegplots, "llvegplots.kml", "llvegplots", "KML") ## summary
/scripts/FS_maps.R
no_license
smccau/se_veg
R
false
false
2,822
r
library(rgdal) library(zoom) library(maps) tst = readOGR('./S_USA.Activity_HazFuelTrt_LN.shp', layer='S_USA.Activity_HazFuelTrt_LN') ogrListLayers("./sef_lidar.gdb/gdb") topo = readOGR('./Topography.shp', layer='Topography') soil = readOGR('./SSURGO_Soils.shp', layer='SSURGO_Soils') Stand = readOGR('./Stand.shp', layer='Stand') Owners = readOGR('./BasicSurfaceOwnership.shp', layer='BasicSurfaceOwnership') invasive = readOGR('./Current_Invasive_Plants_Inventory.shp', layer='Current_Invasive_Plants_Inventory') fire_occ = readOGR('./Monitoring_Trends_in_Burn_Severity__Fire_Occurrence_Locations.shp', layer='Monitoring_Trends_in_Burn_Severity__Fire_Occurrence_Locations') fire_poly = readOGR('./Monitoring_Trends_in_Burn_Severity__Burned_Area_Boundaries.shp', layer='Monitoring_Trends_in_Burn_Severity__Burned_Area_Boundaries') fire_Rx = readOGR('./FM_RxBurnHistory.shp', layer='FM_RxBurnHistory') par(mfrow=c(1,2)) plot(fire_poly) plot(fire_Rx) llStand = Stand[grep('Longleaf pine', Stand@data$FORESTTYPE), ] pdf('./Stand.pdf') plot(Stand) dev.off() proj4string(Stand) geo_prj = CRS("+proj=longlat +datum=WGS84") Stand_ll = spTransform(Stand, geo_prj) llStand_ll = spTransform(llStand, geo_prj) Owners_ll = spTransform(Owners, geo_prj) topo_ll = spTransform(topo, geo_prj) soil_ll = spTransform(soil, geo_prj) vegplots = read.csv('./CharlestonPlots.csv') head(vegplots) vegplots = SpatialPointsDataFrame(coords = vegplots[ , c('Real.Longitude', 'Real.Latitude')], data=vegplots, coords.nrs = 5:6, proj4string = CRS("+proj=longlat +datum=WGS84")) vegplots$project_num = as.integer(sapply(strsplit(as.character(vegplots$Plot.Code), "-"), function(x)x[1])) llvegplots = vegplots[grep('Pinus palustris', vegplots$commPrimaryScientific), ] llyr = as.numeric(sapply(as.character(llvegplots@data$Date), function(x) strsplit(x, '-')[[1]][3])) table(llyr) pdf('vegplot_map.pdf') data(us.cities) map('county', c('south carolina,charleston', 'south carolina,berkeley')) map.cities(us.cities, country="SC") points(vegplots, col='dodgerblue', pch=19) points(llvegplots, col='green3', pch=19) points(llvegplots[llyr == 1995, ], col='red') legend('bottomright', c('longleaf plot', 'other plot'), col=c('green3', 'dodgerblue'), pch=19, bty='n') dev.off() ## export kmls writeOGR(Stand_ll, "Stand.kml", "Stand", "KML") writeOGR(llStand_ll, "llStand.kml", "Stand", "KML") writeOGR(Owners_ll, "Owners.kml", "Owners", "KML") writeOGR(topo_ll, "topo.kml", "topo", "KML") writeOGR(soil_ll, "soil.kml", "soil", "KML") writeOGR(vegplots, "vegplots.kml", "vegplots", "KML") writeOGR(llvegplots, "llvegplots.kml", "llvegplots", "KML") ## summary
\encoding{latin1} \name{kmfun} \alias{kmfun} \title{Multiscale second-order neighbourhood analysis of a marked spatial point pattern} \description{ Computes estimates of the mark correlation \emph{Km}-function and associated neighbourhood functions from a marked spatial point pattern in a simple (rectangular or circular) or complex sampling window. Computes optionally local confidence limits of the functions under the null hypothesis of no correlation between marks (see Details). } \usage{ kmfun(p, upto, by, nsim=0, alpha=0.01) } \arguments{ \item{p}{a \code{"spp"} object defining a marked spatial point pattern in a given sampling window (see \code{\link{spp}}).} \item{upto }{maximum radius of the sample circles (see Details).} \item{by }{interval length between successive sample circles radii (see Details).} \item{nsim }{number of Monte Carlo simulations to estimate local confidence limits of the null hypothesis of no correlation between marks (see Details). By default \code{nsim=0}, so that no confidence limits are computed.} \item{alpha }{if \code{nsim>0}, significant level of the confidence limits. By default \eqn{\alpha=0.01}.} } \details{ Function \code{kmfun} computes the mark correlation function \eqn{Km(r)} and the associated function \eqn{gm(r)}.\cr\cr It is defined from a general definition of spatial autocorrelation (Goreaud 2000) as:\cr \eqn{Km(r) = (COV(Xi,Xj)|d(i,j)<r) / VAR(X)}\cr where \eqn{X} is a quantitative random variable attached to each point of the pattern. \emph{Km(r)} has a very similar interpretation than more classical correlation functions, such as Moran's \emph{I}: it takes values between -1 and 1, with an expectation of 0 under the null hypothesis of no spatial correlation between the values of \emph{X}, becomes positive when values of \eqn{X} at distance \emph{r} are positively correlated and negative when values of \eqn{X} at distance \emph{r} are negatively correlated. \eqn{gm(r)} is the derivative of \eqn{Km(r)} or pair mark correlation function, which gives the correlation of marks within an annuli between two successive circles with radii \eqn{r} and \eqn{r-by}).\cr\cr The program introduces an edge effect correction term according to the method proposed by Ripley (1977) and extended to circular and complex sampling windows by Goreaud & P?Pelissier (1999). Local Monte Carlo confidence limits and p-values of departure from the null hypothesis of no correlation are estimated at each distance \eqn{r}, after reallocating at random the values of \emph{X} over all points of the pattern, the location of trees being kept unchanged. } \value{ A list of class \code{"fads"} with essentially the following components: \item{r }{a vector of regularly spaced out distances (\code{seq(by,upto,by)}).} \item{gm }{a data frame containing values of the pair mark correlation function \eqn{gm(r)}.} \item{km }{a data frame containing values of the mark correlation function \eqn{Km(r)}.\cr} Each component except \code{r} is a data frame with the following variables:\cr \item{obs }{a vector of estimated values for the observed point pattern.} \item{theo }{a vector of theoretical values expected for the null hypothesis of no correlation between marks.} \item{sup }{(optional) if \code{nsim>0} a vector of the upper local confidence limits of the null hypothesis at a significant level \eqn{\alpha}.} \item{inf }{(optional) if \code{nsim>0} a vector of the lower local confidence limits of the null hypothesis at a significant level \eqn{\alpha}.} \item{pval }{(optional) if \code{nsim>0} a vector of local p-values of departure from the null hypothesis.} } \note{ Applications of this function can be found in Oddou-Muratorio \emph{et al.} (2004) and Madelaine \emph{et al.} (submitted). } \references{Goreaud, F. 2000. \emph{Apports de l'analyse de la structure spatiale en foret tempere a l'etude et la modelisation des peuplements complexes}. These de doctorat, ENGREF, Nancy, France.\cr\cr Goreaud F. & P?Pelissier R. 1999. On explicit formulas of edge effect correction for Ripley's K-function. \emph{Journal of Vegetation Science}, 10:433-438.\cr\cr Madelaine, C., Pelissier, R., Vincent, G., Molino, J.-F., Sabatier, D., Prevost, M.-F. & de Namur, C. 2007. Mortality and recruitment in a lowland tropical rainforest of French Guiana: effects of soil type and species guild. \emph{Journal of Tropical Ecology}, 23:277-287. Oddou-Muratorio, S., Demesure-Musch, B., Pelissier, R. & Gouyon, P.-H. 2004. Impacts of gene flow and logging history on the local genetic structure of a scattered tree species, Sorbus torminalis L. \emph{Molecular Ecology}, 13:3689-3702. Ripley B.D. 1977. Modelling spatial patterns. \emph{Journal of the Royal Statistical Society B}, 39:172-192. } \author{\email{Raphael.Pelissier@ird.fr}} \seealso{ \code{\link{plot.fads}}, \code{\link{spp}}, \code{\link{kfun}}, \code{\link{k12fun}}, \code{\link{kijfun}}, \code{\link{ki.fun}}. } \examples{ data(BPoirier) BP <- BPoirier \dontrun{spatial point pattern in a rectangle sampling window of size [0,110] x [0,90]} swrm <- spp(BP$trees, win=BP$rect, marks=BP$dbh) kmswrm <- kmfun(swrm, 25, 2, 500) plot(kmswrm) \dontrun{spatial point pattern in a circle with radius 50 centred on (55,45)} swc <- spp(BP$trees, win=c(55,45,45), marks=BP$dbh) kmswc <- kmfun(swc, 25, 2, 500) plot(kmswc) \dontrun{spatial point pattern in a complex sampling window} swrt <- spp(BP$trees, win=BP$rect, tri=BP$tri2, marks=BP$dbh) kmswrt <- kmfun(swrt, 25, 2, 500) plot(kmswrt) } \keyword{spatial}
/man/kmfun.Rd
no_license
cran/ads
R
false
false
5,623
rd
\encoding{latin1} \name{kmfun} \alias{kmfun} \title{Multiscale second-order neighbourhood analysis of a marked spatial point pattern} \description{ Computes estimates of the mark correlation \emph{Km}-function and associated neighbourhood functions from a marked spatial point pattern in a simple (rectangular or circular) or complex sampling window. Computes optionally local confidence limits of the functions under the null hypothesis of no correlation between marks (see Details). } \usage{ kmfun(p, upto, by, nsim=0, alpha=0.01) } \arguments{ \item{p}{a \code{"spp"} object defining a marked spatial point pattern in a given sampling window (see \code{\link{spp}}).} \item{upto }{maximum radius of the sample circles (see Details).} \item{by }{interval length between successive sample circles radii (see Details).} \item{nsim }{number of Monte Carlo simulations to estimate local confidence limits of the null hypothesis of no correlation between marks (see Details). By default \code{nsim=0}, so that no confidence limits are computed.} \item{alpha }{if \code{nsim>0}, significant level of the confidence limits. By default \eqn{\alpha=0.01}.} } \details{ Function \code{kmfun} computes the mark correlation function \eqn{Km(r)} and the associated function \eqn{gm(r)}.\cr\cr It is defined from a general definition of spatial autocorrelation (Goreaud 2000) as:\cr \eqn{Km(r) = (COV(Xi,Xj)|d(i,j)<r) / VAR(X)}\cr where \eqn{X} is a quantitative random variable attached to each point of the pattern. \emph{Km(r)} has a very similar interpretation than more classical correlation functions, such as Moran's \emph{I}: it takes values between -1 and 1, with an expectation of 0 under the null hypothesis of no spatial correlation between the values of \emph{X}, becomes positive when values of \eqn{X} at distance \emph{r} are positively correlated and negative when values of \eqn{X} at distance \emph{r} are negatively correlated. \eqn{gm(r)} is the derivative of \eqn{Km(r)} or pair mark correlation function, which gives the correlation of marks within an annuli between two successive circles with radii \eqn{r} and \eqn{r-by}).\cr\cr The program introduces an edge effect correction term according to the method proposed by Ripley (1977) and extended to circular and complex sampling windows by Goreaud & P?Pelissier (1999). Local Monte Carlo confidence limits and p-values of departure from the null hypothesis of no correlation are estimated at each distance \eqn{r}, after reallocating at random the values of \emph{X} over all points of the pattern, the location of trees being kept unchanged. } \value{ A list of class \code{"fads"} with essentially the following components: \item{r }{a vector of regularly spaced out distances (\code{seq(by,upto,by)}).} \item{gm }{a data frame containing values of the pair mark correlation function \eqn{gm(r)}.} \item{km }{a data frame containing values of the mark correlation function \eqn{Km(r)}.\cr} Each component except \code{r} is a data frame with the following variables:\cr \item{obs }{a vector of estimated values for the observed point pattern.} \item{theo }{a vector of theoretical values expected for the null hypothesis of no correlation between marks.} \item{sup }{(optional) if \code{nsim>0} a vector of the upper local confidence limits of the null hypothesis at a significant level \eqn{\alpha}.} \item{inf }{(optional) if \code{nsim>0} a vector of the lower local confidence limits of the null hypothesis at a significant level \eqn{\alpha}.} \item{pval }{(optional) if \code{nsim>0} a vector of local p-values of departure from the null hypothesis.} } \note{ Applications of this function can be found in Oddou-Muratorio \emph{et al.} (2004) and Madelaine \emph{et al.} (submitted). } \references{Goreaud, F. 2000. \emph{Apports de l'analyse de la structure spatiale en foret tempere a l'etude et la modelisation des peuplements complexes}. These de doctorat, ENGREF, Nancy, France.\cr\cr Goreaud F. & P?Pelissier R. 1999. On explicit formulas of edge effect correction for Ripley's K-function. \emph{Journal of Vegetation Science}, 10:433-438.\cr\cr Madelaine, C., Pelissier, R., Vincent, G., Molino, J.-F., Sabatier, D., Prevost, M.-F. & de Namur, C. 2007. Mortality and recruitment in a lowland tropical rainforest of French Guiana: effects of soil type and species guild. \emph{Journal of Tropical Ecology}, 23:277-287. Oddou-Muratorio, S., Demesure-Musch, B., Pelissier, R. & Gouyon, P.-H. 2004. Impacts of gene flow and logging history on the local genetic structure of a scattered tree species, Sorbus torminalis L. \emph{Molecular Ecology}, 13:3689-3702. Ripley B.D. 1977. Modelling spatial patterns. \emph{Journal of the Royal Statistical Society B}, 39:172-192. } \author{\email{Raphael.Pelissier@ird.fr}} \seealso{ \code{\link{plot.fads}}, \code{\link{spp}}, \code{\link{kfun}}, \code{\link{k12fun}}, \code{\link{kijfun}}, \code{\link{ki.fun}}. } \examples{ data(BPoirier) BP <- BPoirier \dontrun{spatial point pattern in a rectangle sampling window of size [0,110] x [0,90]} swrm <- spp(BP$trees, win=BP$rect, marks=BP$dbh) kmswrm <- kmfun(swrm, 25, 2, 500) plot(kmswrm) \dontrun{spatial point pattern in a circle with radius 50 centred on (55,45)} swc <- spp(BP$trees, win=c(55,45,45), marks=BP$dbh) kmswc <- kmfun(swc, 25, 2, 500) plot(kmswc) \dontrun{spatial point pattern in a complex sampling window} swrt <- spp(BP$trees, win=BP$rect, tri=BP$tri2, marks=BP$dbh) kmswrt <- kmfun(swrt, 25, 2, 500) plot(kmswrt) } \keyword{spatial}
summary.WS.Corr.Mixed <- function(object, ..., Object){ if (missing(Object)){Object <- object} x <- Object cat("\nFunction call:\n\n") print(Object$Call) cat("\n\n") if (Object$Model=="Model 1, Random intercept"){ cat(Object$Model, "\n") cat("=========================\n\n") cat("Fitted variance components: \n") cat("--------------------------- \n") cat("D:", Object$D, "\n") cat("Sigma**2:", Object$Sigma2, "\n") cat("\nEstimated correlations R (r(time_j, time_k) constant): \n") cat("------------------------------------------------------ \n") cat("R: ", (Object$R)[1], sep="") cat("\n", (1-Object$Alpha)*100, "% confidence interval (bootstrap): [", Object$CI.Lower[1], "; ", Object$CI.Upper[1], "]", sep="") cat("\n\nModel fit: \n") cat("---------- \n") cat("LogLik: ", Object$LogLik) cat("\nAIC: ", Object$AIC) } if (Object$Model=="Model 2, Random intercept + serial corr (Gaussian)"){ cat(Object$Model, "\n") cat("==================================================\n\n") cat("Fitted variance components: \n") cat("--------------------------- \n") cat("D:", Object$D, "\n") cat("Sigma**2:", Object$Sigma2, "\n") cat("Tau**2:", Object$Tau2, "\n") cat("Rho:", Object$Rho, "\n") cat("\nEstimated correlations R as a function of time lag: \n") cat("--------------------------------------------------- \n") print(Object$R) cat("\n", (1-Object$Alpha)*100, "% confidence intervals (bootstrap), lower bounds:\n", sep="") cat("--------------------------------------------------- \n") print(Object$CI.Lower) cat("\n", (1-Object$Alpha)*100, "% confidence intervals (bootstrap), upper bounds:\n", sep="") cat("--------------------------------------------------- \n") print(Object$CI.Upper) cat("\n\nModel fit: \n") cat("---------- \n") cat("LogLik: ", Object$LogLik) cat("\nAIC: ", Object$AIC) } if (Object$Model=="Model 3, Random intercept, slope + serial corr (Gaussian)"){ cat(Object$Model, "\n") cat("=========================================================\n\n") cat("Fitted variance components: \n") cat("--------------------------- \n") cat("D:\n") print(Object$D) cat("\nSigma**2:", Object$Sigma2, "\n") cat("Tau**2:", Object$Tau2, "\n") cat("Rho:", Object$Rho, "\n") cat("\nEstimated correlations R at each time point r(time_j, time_k) \n") cat("------------------------------------------------------------- \n") print(Object$R) cat("\n", (1-Object$Alpha)*100, "% confidence intervals (bootstrap), lower bounds:\n", sep="") cat("--------------------------------------------------- \n") print(Object$CI.Lower) cat("\n", (1-Object$Alpha)*100, "% confidence intervals (bootstrap), upper bounds:\n", sep="") cat("--------------------------------------------------- \n") print(Object$CI.Upper) cat("\n\nModel fit: \n") cat("---------- \n") cat("LogLik: ", Object$LogLik) cat("\nAIC: ", Object$AIC) } if (Object$Model=="Model 4, Random intercept and slope"){ cat(Object$Model, "\n") cat("===================================\n\n") cat("Fitted variance components: \n") cat("--------------------------- \n") cat("D:\n") print(Object$D) cat("\nSigma**2:", Object$Sigma2, "\n") cat("\nEstimated correlations R at each time point r(time_j, time_k) \n") cat("------------------------------------------------------------- \n") print(Object$R) cat("\n", (1-Object$Alpha)*100, "% confidence intervals (bootstrap), lower bounds:\n", sep="") cat("--------------------------------------------------- \n") print(Object$CI.Lower) cat("\n", (1-Object$Alpha)*100, "% confidence intervals (bootstrap), upper bounds:\n", sep="") cat("--------------------------------------------------- \n") print(Object$CI.Upper) cat("\n\nModel fit: \n") cat("---------- \n") cat("LogLik: ", Object$LogLik) cat("\nAIC: ", Object$AIC) } }
/R/summary.WS.Corr.Mixed.R
no_license
cran/CorrMixed
R
false
false
4,110
r
summary.WS.Corr.Mixed <- function(object, ..., Object){ if (missing(Object)){Object <- object} x <- Object cat("\nFunction call:\n\n") print(Object$Call) cat("\n\n") if (Object$Model=="Model 1, Random intercept"){ cat(Object$Model, "\n") cat("=========================\n\n") cat("Fitted variance components: \n") cat("--------------------------- \n") cat("D:", Object$D, "\n") cat("Sigma**2:", Object$Sigma2, "\n") cat("\nEstimated correlations R (r(time_j, time_k) constant): \n") cat("------------------------------------------------------ \n") cat("R: ", (Object$R)[1], sep="") cat("\n", (1-Object$Alpha)*100, "% confidence interval (bootstrap): [", Object$CI.Lower[1], "; ", Object$CI.Upper[1], "]", sep="") cat("\n\nModel fit: \n") cat("---------- \n") cat("LogLik: ", Object$LogLik) cat("\nAIC: ", Object$AIC) } if (Object$Model=="Model 2, Random intercept + serial corr (Gaussian)"){ cat(Object$Model, "\n") cat("==================================================\n\n") cat("Fitted variance components: \n") cat("--------------------------- \n") cat("D:", Object$D, "\n") cat("Sigma**2:", Object$Sigma2, "\n") cat("Tau**2:", Object$Tau2, "\n") cat("Rho:", Object$Rho, "\n") cat("\nEstimated correlations R as a function of time lag: \n") cat("--------------------------------------------------- \n") print(Object$R) cat("\n", (1-Object$Alpha)*100, "% confidence intervals (bootstrap), lower bounds:\n", sep="") cat("--------------------------------------------------- \n") print(Object$CI.Lower) cat("\n", (1-Object$Alpha)*100, "% confidence intervals (bootstrap), upper bounds:\n", sep="") cat("--------------------------------------------------- \n") print(Object$CI.Upper) cat("\n\nModel fit: \n") cat("---------- \n") cat("LogLik: ", Object$LogLik) cat("\nAIC: ", Object$AIC) } if (Object$Model=="Model 3, Random intercept, slope + serial corr (Gaussian)"){ cat(Object$Model, "\n") cat("=========================================================\n\n") cat("Fitted variance components: \n") cat("--------------------------- \n") cat("D:\n") print(Object$D) cat("\nSigma**2:", Object$Sigma2, "\n") cat("Tau**2:", Object$Tau2, "\n") cat("Rho:", Object$Rho, "\n") cat("\nEstimated correlations R at each time point r(time_j, time_k) \n") cat("------------------------------------------------------------- \n") print(Object$R) cat("\n", (1-Object$Alpha)*100, "% confidence intervals (bootstrap), lower bounds:\n", sep="") cat("--------------------------------------------------- \n") print(Object$CI.Lower) cat("\n", (1-Object$Alpha)*100, "% confidence intervals (bootstrap), upper bounds:\n", sep="") cat("--------------------------------------------------- \n") print(Object$CI.Upper) cat("\n\nModel fit: \n") cat("---------- \n") cat("LogLik: ", Object$LogLik) cat("\nAIC: ", Object$AIC) } if (Object$Model=="Model 4, Random intercept and slope"){ cat(Object$Model, "\n") cat("===================================\n\n") cat("Fitted variance components: \n") cat("--------------------------- \n") cat("D:\n") print(Object$D) cat("\nSigma**2:", Object$Sigma2, "\n") cat("\nEstimated correlations R at each time point r(time_j, time_k) \n") cat("------------------------------------------------------------- \n") print(Object$R) cat("\n", (1-Object$Alpha)*100, "% confidence intervals (bootstrap), lower bounds:\n", sep="") cat("--------------------------------------------------- \n") print(Object$CI.Lower) cat("\n", (1-Object$Alpha)*100, "% confidence intervals (bootstrap), upper bounds:\n", sep="") cat("--------------------------------------------------- \n") print(Object$CI.Upper) cat("\n\nModel fit: \n") cat("---------- \n") cat("LogLik: ", Object$LogLik) cat("\nAIC: ", Object$AIC) } }
library(lattice) library(latticeExtra) library(ggplot2) load("/Users/explodecomputer/git/wrayvisscher/eQTL_2D/analysis/residuals.RData") load("/Users/explodecomputer/git/wrayvisscher/eQTL_2D/analysis/allsub.RData") load("/Users/explodecomputer/git/wrayvisscher/eQTL_2D/analysis/clean_geno_final.RData") load("/Users/explodecomputer/git/wrayvisscher/eQTL_2D/analysis/ggdata.RData") gen[gen == "NC"] <- NA gen[gen == "AA"] <- "0" gen[gen == "AB"] <- "1" gen[gen == "BB"] <- "2" gen <- matrix(as.numeric(gen), nrow(gen), ncol(gen)) ls() sig <- subset(allsub, pfull > 16.5 & propG > 0.05) dups <- with(sig, paste(probe, chr1, chr2)) sig <- sig[!duplicated(dups), ] sig <- subset(sig, snp1 != "rs11036212" & snp2 != "rs11036212" & probe != "ILMN_1688753") dim(sig) temp <- with(probeinfo, data.frame(probe=PROBE_ID, probechr=CHR)) sig <- merge(sig, temp, by="probe") dim(sig) head(sig) dim(ciscis <- subset(sig, chr1 == probechr & chr2 == probechr)) dim(cistrans <- subset(sig, (chr1 == probechr & chr2 != probechr) | (chr1 != probechr & chr2 == probechr))) dim(transtrans <- subset(sig, chr1 != probechr & chr2 != probechr)) sig$type <- NA sig$type[with(sig, chr1 == probechr & chr2 == probechr)] <- "cis-cis" sig$type[with(sig, (chr1 == probechr & chr2 != probechr) | chr1 != probechr & chr2 == probechr)] <- "cis-trans" sig$type[with(sig, chr1 != probechr & chr2 != probechr)] <- "trans-trans" head(sig) lapply(sig, class) sig$snp1 <- as.character(sig$snp1) sig$snp2 <- as.character(sig$snp2) allsnps <- with(sig, unique(c(snp1, snp2))) length(allsnps) table(allsnps %in% snp$Name) table(sig$snp1 %in% snp$Name, sig$snp2 %in% snp$Name) sig2 <- subset(sig, sig$snp1 %in% snp$Name & sig$snp2 %in% snp$Name) dim(sig2) head(sig2) replicate <- function( prinfo, probe, gen, snp, row ) { # Get the row name in probe # Get the two SNP rows in gen prow <- which(prinfo$PROBE_ID == row$probe[1]) stopifnot(length(prow) == 1) srow1 <- which(snp$Name == row$snp1) srow2 <- which(snp$Name == row$snp2) stopifnot(length(srow1) == 1) stopifnot(length(srow2) == 1) a <- anova(lm(probe[prow, ] ~ as.factor(gen[srow1, ]) + as.factor(gen[srow2, ]) + as.factor(gen[srow1, ]) : as.factor(gen[srow2, ])))$P[1] print() return(a) } replicate(prinfo, probe, gen, snp, sig2[4,]) rep <- array(0, nrow(sig2)) for(i in 1:nrow(sig2)) { rep[i] <- replicate(prinfo, probe, gen, snp, sig2[i,]) } # Of the 52 remaining, 3 interactions replicate # 2 are cis-trans, one is cis-cis sig2[which(rep * length(rep) < 0.05), ] rep[which(rep * length(rep) < 0.05)] sigrep <- sig2[which(rep * length(rep) < 0.05), ] scrutinise <- function(res, geno, phen, z=45) { a <- tapply(phen[,res$probeid], list(geno[,res$pos1], geno[, res$pos2]), mean) b <- table(geno[,res$pos1], geno[, res$pos2]) p <- cloud(a, panel.3d.cloud=panel.3dbars, col="black", col.facet=c("#e5f5e0", "#A1D99B", "#31A354"), xbase=0.9, ybase=0.9, xlab="SNP1", ylab="SNP2", zlab="Phenotype", default.scales=list(arrows=F), screen = list(z = z, x = -60, y = 3)) return(list(a,b,p)) } scrutinise_rep <- function( prinfo, probe, gen, snp, row, z=45 ) { # Get the row name in probe # Get the two SNP rows in gen prow <- which(prinfo$PROBE_ID == row$probe[1]) stopifnot(length(prow) == 1) srow1 <- which(snp$Name == row$snp1) srow2 <- which(snp$Name == row$snp2) stopifnot(length(srow1) == 1) stopifnot(length(srow2) == 1) a <- tapply(probe[prow,], list(gen[srow1, ], gen[srow2, ]), mean) b <- table(gen[srow1, ], gen[srow2, ]) p <- cloud(a, panel.3d.cloud=panel.3dbars, col="black", col.facet=c("#e5f5e0", "#A1D99B", "#31A354"), xbase=0.9, ybase=0.9, xlab="SNP1", ylab="SNP2", zlab="Phenotype", default.scales=list(arrows=F), screen = list(z = z, x = -60, y = 3)) return(list(a,b,p)) } scrutinise(sigrep[1,], xmat, resphen) scrutinise(sigrep[3,], xmat, resphen, z=-30) dev.new() scrutinise_rep(prinfo, probe, gen, snp, sigrep[1,]) dev.new() scrutinise_rep(prinfo, probe, gen, snp, sigrep[3,]) sigrep original pval rep pval gene chr snp1 maf1 snp2 maf2 snp$maf <- apply(gen, 1, function(x) { sum(x, na.rm=T) / (2*sum(!is.na(x))) }) index <- snp$maf > 0.5 snp$maf[index] <- 1 - snp$maf[index] hist(snp$maf) table(snp$maf > 0.05) bim$maf <- apply(xmat, 2, function(x) { sum(x, na.rm=T) / (2*sum(!is.na(x))) }) index <- bim$maf > 0.5 bim$maf[index] <- 1 - bim$maf[index] hist(bim$maf) table(bim$maf > 0.05) probeinfo[sigrep$probeid,] subset(snp, Name %in% c("rs7985085", "rs2241623")) cor(xmat[, 488075], xmat[, 488068]) subset(snp, Name %in% c("rs10847601", "rs229670")) subset(bim, V2 %in% c("rs10847601", "rs229670")) datstat <- function(info, xmat, resphen) { cat(" ... ",nrow(info), "rows to be analysed\n") a <- array(0, nrow(info)) g <- array(0, nrow(info)) for(i in 1:nrow(info)) { if(i %% (nrow(info) / 100) == 0) cat(i/(nrow(info)/100)," ") l <- info[i,] if(is.na(l$chr1)) { g[i] <- NA a[i] <- NA next } mod <- anova(lm(resphen[,l$probeid] ~ xmat[,l$pos1] + xmat[,l$pos2] + as.factor(xmat[,l$pos1]) : as.factor(xmat[,l$pos2]))) g[i] <- sum(mod$Sum[1:3]) / mod$Sum[4] a[i] <- sum(mod$Sum[1:2]) / sum(mod$Sum[1:3]) } return(data.frame(g,a)) } replicate_info <- function( prinfo, probe, gen, snp, row ) { # Get the row name in probe # Get the two SNP rows in gen prow <- which(prinfo$PROBE_ID == row$probe[1]) stopifnot(length(prow) == 1) srow1 <- which(snp$Name == row$snp1) srow2 <- which(snp$Name == row$snp2) stopifnot(length(srow1) == 1) stopifnot(length(srow2) == 1) mod <- anova(lm(probe[prow, ] ~ as.factor(gen[srow1, ]) + as.factor(gen[srow2, ]) + as.factor(gen[srow1, ]) : as.factor(gen[srow2, ]))) print(mod) g <- sum(mod$Sum[1:3]) / mod$Sum[4] a <- sum(mod$Sum[1:2]) / sum(mod$Sum[1:3]) print(a/g) } replicate_info(prinfo, probe, gen, snp, sigrep[3,])
/analysis/replication_gg.R
no_license
explodecomputer/eQTL-2D
R
false
false
5,843
r
library(lattice) library(latticeExtra) library(ggplot2) load("/Users/explodecomputer/git/wrayvisscher/eQTL_2D/analysis/residuals.RData") load("/Users/explodecomputer/git/wrayvisscher/eQTL_2D/analysis/allsub.RData") load("/Users/explodecomputer/git/wrayvisscher/eQTL_2D/analysis/clean_geno_final.RData") load("/Users/explodecomputer/git/wrayvisscher/eQTL_2D/analysis/ggdata.RData") gen[gen == "NC"] <- NA gen[gen == "AA"] <- "0" gen[gen == "AB"] <- "1" gen[gen == "BB"] <- "2" gen <- matrix(as.numeric(gen), nrow(gen), ncol(gen)) ls() sig <- subset(allsub, pfull > 16.5 & propG > 0.05) dups <- with(sig, paste(probe, chr1, chr2)) sig <- sig[!duplicated(dups), ] sig <- subset(sig, snp1 != "rs11036212" & snp2 != "rs11036212" & probe != "ILMN_1688753") dim(sig) temp <- with(probeinfo, data.frame(probe=PROBE_ID, probechr=CHR)) sig <- merge(sig, temp, by="probe") dim(sig) head(sig) dim(ciscis <- subset(sig, chr1 == probechr & chr2 == probechr)) dim(cistrans <- subset(sig, (chr1 == probechr & chr2 != probechr) | (chr1 != probechr & chr2 == probechr))) dim(transtrans <- subset(sig, chr1 != probechr & chr2 != probechr)) sig$type <- NA sig$type[with(sig, chr1 == probechr & chr2 == probechr)] <- "cis-cis" sig$type[with(sig, (chr1 == probechr & chr2 != probechr) | chr1 != probechr & chr2 == probechr)] <- "cis-trans" sig$type[with(sig, chr1 != probechr & chr2 != probechr)] <- "trans-trans" head(sig) lapply(sig, class) sig$snp1 <- as.character(sig$snp1) sig$snp2 <- as.character(sig$snp2) allsnps <- with(sig, unique(c(snp1, snp2))) length(allsnps) table(allsnps %in% snp$Name) table(sig$snp1 %in% snp$Name, sig$snp2 %in% snp$Name) sig2 <- subset(sig, sig$snp1 %in% snp$Name & sig$snp2 %in% snp$Name) dim(sig2) head(sig2) replicate <- function( prinfo, probe, gen, snp, row ) { # Get the row name in probe # Get the two SNP rows in gen prow <- which(prinfo$PROBE_ID == row$probe[1]) stopifnot(length(prow) == 1) srow1 <- which(snp$Name == row$snp1) srow2 <- which(snp$Name == row$snp2) stopifnot(length(srow1) == 1) stopifnot(length(srow2) == 1) a <- anova(lm(probe[prow, ] ~ as.factor(gen[srow1, ]) + as.factor(gen[srow2, ]) + as.factor(gen[srow1, ]) : as.factor(gen[srow2, ])))$P[1] print() return(a) } replicate(prinfo, probe, gen, snp, sig2[4,]) rep <- array(0, nrow(sig2)) for(i in 1:nrow(sig2)) { rep[i] <- replicate(prinfo, probe, gen, snp, sig2[i,]) } # Of the 52 remaining, 3 interactions replicate # 2 are cis-trans, one is cis-cis sig2[which(rep * length(rep) < 0.05), ] rep[which(rep * length(rep) < 0.05)] sigrep <- sig2[which(rep * length(rep) < 0.05), ] scrutinise <- function(res, geno, phen, z=45) { a <- tapply(phen[,res$probeid], list(geno[,res$pos1], geno[, res$pos2]), mean) b <- table(geno[,res$pos1], geno[, res$pos2]) p <- cloud(a, panel.3d.cloud=panel.3dbars, col="black", col.facet=c("#e5f5e0", "#A1D99B", "#31A354"), xbase=0.9, ybase=0.9, xlab="SNP1", ylab="SNP2", zlab="Phenotype", default.scales=list(arrows=F), screen = list(z = z, x = -60, y = 3)) return(list(a,b,p)) } scrutinise_rep <- function( prinfo, probe, gen, snp, row, z=45 ) { # Get the row name in probe # Get the two SNP rows in gen prow <- which(prinfo$PROBE_ID == row$probe[1]) stopifnot(length(prow) == 1) srow1 <- which(snp$Name == row$snp1) srow2 <- which(snp$Name == row$snp2) stopifnot(length(srow1) == 1) stopifnot(length(srow2) == 1) a <- tapply(probe[prow,], list(gen[srow1, ], gen[srow2, ]), mean) b <- table(gen[srow1, ], gen[srow2, ]) p <- cloud(a, panel.3d.cloud=panel.3dbars, col="black", col.facet=c("#e5f5e0", "#A1D99B", "#31A354"), xbase=0.9, ybase=0.9, xlab="SNP1", ylab="SNP2", zlab="Phenotype", default.scales=list(arrows=F), screen = list(z = z, x = -60, y = 3)) return(list(a,b,p)) } scrutinise(sigrep[1,], xmat, resphen) scrutinise(sigrep[3,], xmat, resphen, z=-30) dev.new() scrutinise_rep(prinfo, probe, gen, snp, sigrep[1,]) dev.new() scrutinise_rep(prinfo, probe, gen, snp, sigrep[3,]) sigrep original pval rep pval gene chr snp1 maf1 snp2 maf2 snp$maf <- apply(gen, 1, function(x) { sum(x, na.rm=T) / (2*sum(!is.na(x))) }) index <- snp$maf > 0.5 snp$maf[index] <- 1 - snp$maf[index] hist(snp$maf) table(snp$maf > 0.05) bim$maf <- apply(xmat, 2, function(x) { sum(x, na.rm=T) / (2*sum(!is.na(x))) }) index <- bim$maf > 0.5 bim$maf[index] <- 1 - bim$maf[index] hist(bim$maf) table(bim$maf > 0.05) probeinfo[sigrep$probeid,] subset(snp, Name %in% c("rs7985085", "rs2241623")) cor(xmat[, 488075], xmat[, 488068]) subset(snp, Name %in% c("rs10847601", "rs229670")) subset(bim, V2 %in% c("rs10847601", "rs229670")) datstat <- function(info, xmat, resphen) { cat(" ... ",nrow(info), "rows to be analysed\n") a <- array(0, nrow(info)) g <- array(0, nrow(info)) for(i in 1:nrow(info)) { if(i %% (nrow(info) / 100) == 0) cat(i/(nrow(info)/100)," ") l <- info[i,] if(is.na(l$chr1)) { g[i] <- NA a[i] <- NA next } mod <- anova(lm(resphen[,l$probeid] ~ xmat[,l$pos1] + xmat[,l$pos2] + as.factor(xmat[,l$pos1]) : as.factor(xmat[,l$pos2]))) g[i] <- sum(mod$Sum[1:3]) / mod$Sum[4] a[i] <- sum(mod$Sum[1:2]) / sum(mod$Sum[1:3]) } return(data.frame(g,a)) } replicate_info <- function( prinfo, probe, gen, snp, row ) { # Get the row name in probe # Get the two SNP rows in gen prow <- which(prinfo$PROBE_ID == row$probe[1]) stopifnot(length(prow) == 1) srow1 <- which(snp$Name == row$snp1) srow2 <- which(snp$Name == row$snp2) stopifnot(length(srow1) == 1) stopifnot(length(srow2) == 1) mod <- anova(lm(probe[prow, ] ~ as.factor(gen[srow1, ]) + as.factor(gen[srow2, ]) + as.factor(gen[srow1, ]) : as.factor(gen[srow2, ]))) print(mod) g <- sum(mod$Sum[1:3]) / mod$Sum[4] a <- sum(mod$Sum[1:2]) / sum(mod$Sum[1:3]) print(a/g) } replicate_info(prinfo, probe, gen, snp, sigrep[3,])
# To launch from dream-beat/src #.libPaths("/ifs/res/papaemme/users/eb2/dream-beat/repos") source("../../src/evaluate_SC2.R") source("../../src/predictors_SC2.R") source("../../src/utils_SC2.R") source("../../src/evaluateCV_SC2_ensemble_transfer_learning.R") mc.cores = 10 celllines_challenge <- combine_features(c("clinical_numerical","mol"),path="../../data/features_SC2/") celllines_challenge_MSK <- combine_features("combined_features",path="../../data/features_SC2/") response <- read.table("../../data/responses/prepared_data_response.csv") #1 ) GLM -GLM mypredictor_challenge <- predictorGLM mypredictor_challenge_MSK <- predictorGLM #alpha_challenge <- c(0,0.25,0.5,0.75,1) #alpha_challenge_MSK <- c(0,0.25,0.5,0.75,1) #weight_challenge <- c(0,0.25,0.5,0.75,1) #use_MSK <- c(25,50,75,100) alpha_challenge <- c(0,0.25,0.5,0.75) alpha_challenge_MSK <- c(0,0.25,0.5,0.75) weight_challenge <- c(0.25,0.5,0.75) use_MSK <- c(100) df <- data.frame() for (a in alpha_challenge){ for (b in alpha_challenge_MSK){ for (c in weight_challenge) { for (d in use_MSK) { res <- evaluateCV_SC2_ensemble_transfer_learning(mypredictor_challenge,mypredictor_challenge_MSK, celllines_challenge, celllines_challenge_MSK, response, MSK.path = "../../data/features_SC2/", nfolds=5, nrepeats=5, seed=17,,weight_challenge=c, mc.cores=mc.cores,use_MSK=d,alpha_challenge=a,alpha_challenge_MSK=b) df[paste("GLM_GLM",a,b,c,d,sep="_"),c("mean_ci","sd_ci","min_ci")] <- c(mean(res),sd(res),min(res)) } } } } write.table(df,"GLM_GLM.tsv", sep="\t", quote=F)
/analysis/sc2_ensembl/ensembl_GLM_GLM.R
no_license
yanistazi/DreamBeatAMLChallenge
R
false
false
1,612
r
# To launch from dream-beat/src #.libPaths("/ifs/res/papaemme/users/eb2/dream-beat/repos") source("../../src/evaluate_SC2.R") source("../../src/predictors_SC2.R") source("../../src/utils_SC2.R") source("../../src/evaluateCV_SC2_ensemble_transfer_learning.R") mc.cores = 10 celllines_challenge <- combine_features(c("clinical_numerical","mol"),path="../../data/features_SC2/") celllines_challenge_MSK <- combine_features("combined_features",path="../../data/features_SC2/") response <- read.table("../../data/responses/prepared_data_response.csv") #1 ) GLM -GLM mypredictor_challenge <- predictorGLM mypredictor_challenge_MSK <- predictorGLM #alpha_challenge <- c(0,0.25,0.5,0.75,1) #alpha_challenge_MSK <- c(0,0.25,0.5,0.75,1) #weight_challenge <- c(0,0.25,0.5,0.75,1) #use_MSK <- c(25,50,75,100) alpha_challenge <- c(0,0.25,0.5,0.75) alpha_challenge_MSK <- c(0,0.25,0.5,0.75) weight_challenge <- c(0.25,0.5,0.75) use_MSK <- c(100) df <- data.frame() for (a in alpha_challenge){ for (b in alpha_challenge_MSK){ for (c in weight_challenge) { for (d in use_MSK) { res <- evaluateCV_SC2_ensemble_transfer_learning(mypredictor_challenge,mypredictor_challenge_MSK, celllines_challenge, celllines_challenge_MSK, response, MSK.path = "../../data/features_SC2/", nfolds=5, nrepeats=5, seed=17,,weight_challenge=c, mc.cores=mc.cores,use_MSK=d,alpha_challenge=a,alpha_challenge_MSK=b) df[paste("GLM_GLM",a,b,c,d,sep="_"),c("mean_ci","sd_ci","min_ci")] <- c(mean(res),sd(res),min(res)) } } } } write.table(df,"GLM_GLM.tsv", sep="\t", quote=F)
# Created: June 26, 2013 # Purpose: Auxiliary Functions Used in VI Comparisons # Author: Anthony D'Agostino (ald2187@columbia.edu) # Last Edit: July 1, 2013 # EDIT HISTORY # # July 15 (ALD) - modified URL in evi.corr.regrid to better reflect the calculation # of masked EVI values. library(gtools) vi.common <- function(product = c("EVI")){ # Product has to be a single product, default is EVI unless otherwise specified # this code [will be] common to several functions in the vi_functions file and # read in VI data from existing .csv's # # product is "EVI", "NDVI", etc. # phase and satellite must already be defined: satellite is "Mod" or "Spo", phase is "early" or "late" fn <- paste0(base.path, scen,site,"/met.data/",phase,product,satellite,"R.csv") # need to create this if loop for sites which don't have complete satellite data if (file.exists(fn)){ vi <- read.csv(fn, header = T, row.names = 1) # read appropriate VI file # tryCatch(stop("you threw an error"), error = function(e) print(e$message)) rownames(vi) <- substr(rownames(vi), 5, 9) # removing month from rownames return(vi[years,]) # take only the relevant years } else {return(NA)} } vi.compare <- function(product){ # This function reads in existing VI data from a scenario folder, identifies # which years are the worst (according to a pre-defined "badyears.thres"), and # then returns the percentage of years in which these worst years correspond # to ARC data # product is "EVI", "NDVI", etc. # phase and satellite must already be defined: satellite is "Mod" or "Spo", phase is "early" or "late" if(any(!is.na(vi.common(product)))){ #returns NA if file does not exist vi <- vi.common(product) # Identify which years are bad according to pre-defined threshold vi.bad.years <- years[which(vi <= quantile(vi, probs = badyear.thres))] if (phase == "Early"){ # calculate overlap ol <- intersect(arc.years.early,vi.bad.years) pct <- length(ol) / max(length(arc.years.early), length(vi.bad.years)) } if (phase == "Late"){ ol <- intersect(arc.years.late,vi.bad.years) pct <- length(ol) / max(length(arc.years.late), length(vi.bad.years)) } } # if VI file does not exist else {pct = NA} return(pct) } #end of vi.compare function vi.ecdf <- function(product){ # Reads in VI data for single phase, single sat products # Calculates ranks using 'ecdf' # Output table is generated in main script # product is "EVI", "NDVI", etc. # `phase' and `satellite' must already be defined: `satellite' is "Mod" or "Spo", `phase' is "early" or "late" if(any(!is.na(vi.common(product)))){ #returns NA if file does not exist vi <- vi.common(product) Fn1 <- ecdf(vi) res <- round(Fn1(vi), digits = 2) } # if VI file does not exist else {res <- rep(NA,length(years))} return(res) } long.cor <- function(longitude,latitude){ # Calculates distance between longitude degrees # Need to determine whether these corrections get included in final version return(cos(latitude/(180/pi))*long.eq) } make.coords2 <- function(Lat,Lon,Range,Points){ #make.coords creates a matrix of all evenly-spaced lat/lon pairs, to create a grid of VI data values coords.mat <- matrix(data = NA, ncol = 2) lat.range <- seq(Lat - Range / (2*long.eq), Lat + Range / (2*long.eq), by = (2*Range/long.eq)/(Points-1)) #extent of lat range lon.range <- seq(Lon - Range / (2* long.eq), Lon + Range / (2*long.eq), by = (2*Range/long.eq)/(Points-1)) #extent of lon range coords.mat <- expand.grid(lat.range,lon.range) colnames(coords.mat) <- c("Latitude", "Longitude") return(coords.mat) } make.coords <- function(df,Buffer){ # Reads in the max/min lat/lon of a df with Latitude and Longitude columns # and generates a list of bounding coordinates to be read into a ggmap setting, # given a user-specified buffer, e.g., (0.1 degrees) return(list(l = min(df$Longitude)-Buffer, b = min(df$Latitude)-Buffer, r = max(df$Longitude)+Buffer, t = max(df$Latitude)+Buffer)) } coord.range <- function(Lat,Lon,Range){ #Provides the edge values of a lat-lon grid given user-specified distance value from site pixel coords.out <- matrix(data = NA, nrow = 2, ncol = 2) lat.range <- c(Lat - Range / (2*long.eq), Lat + Range / (2*long.eq)) lon.range <- c(Lon - Range / (2*long.eq), Lon + Range / (2*long.eq)) coords.out <- rbind(lat.range,lon.range) rownames(coords.out) <- c("Latitude","Longitude") return(coords.out) } evi.corr.regrid <- function(Lat,Lon,Size,CorrThreshold,Month,RegridSize=0.05,Lags = FALSE){ environment() # produces a time-series of monthly, X-Y averaged 1-mo lag of EVI # for a specified 3-letter month # Lat : user-specified latitude value # Lon : user-specified longitude value # Size : value in degrees to create the bounding box # RegridSize : in degrees, what scale is he underlying MODIS data regridded to # CorrThreshold : threshold value for which pixel-level correlations between EVI and ARC under this value are masked out as NA # Month : 3-letter string specifying which months to focus on in identifying worst years # Lags : enables capture of XY average correlation values across pre-specified lags # create IRI-DL url in stages given input values # keeping as small segments to improve flexibility for future functioning lagval <- paste0("T/",lag.start,"/1/",lag.end,"/shiftdatashort") ad1 <- "http://iridl.ldeo.columbia.edu/expert/SOURCES/.NOAA/.NCEP/.CPC/.FEWS/.Africa/.DAILY/.ARC2/.daily/.est_prcp" ad2 <- paste0("/X/",Lon,"/VALUE/Y/",Lat,"/VALUE/") ad3 <- "X/removeGRID/Y/removeGRID/T/1.0/monthlyAverage" ad4 <- "/SOURCES/.USGS/.LandDAAC/.MODIS/.version_005/.EAF/.EVI/" ad5 <- paste0("X/",Lon-Size,"/", Lon+Size, "/RANGEEDGES/") ad6 <- paste0("Y/", Lat-Size, "/", Lat+Size, "/RANGEEDGES/") # ad7 <- paste0("X/", RegridSize, "/", "0.9/boxAverage/Y/", RegridSize, "/", "0.9/boxAverage/") ad7 <- "" ad8 <- "T/0.9/monthlyAverage/" ad9 <- "dup/" # have to duplicate before taking the lag ad10 <- paste0("T/1/1/1/shiftdatashort") # stock lag value ad11 <- "/3/-1/roll" ad12 <- "/[T]/correlate/" ad13 <- paste0(CorrThreshold, "/maskle/") ad14 <- "dataflag/0/maskle/mul/" ad15 <- "[X/Y]average/" ad16 <- paste0("T/(",Month, ")RANGE/") ad17 <- "T+exch+table-+text+text+-table++.csv" # need to ensure get function calls variables from proper environment myfunc <- function(x){ return(get(x,envir = as.environment(-1))) } # modifying url to reflect capture only of correlation values if (Lags == TRUE){ ad7 <- "" ad9 <- lagval ad10 <- "" ad11 <- "" ad12 <- ad12 ad13 <- "" ad14 <- "" ad15 <- ad15 ad16 <- "" ad17 <- "T_lag+exch+table-+text+text+-table++.csv" } # lag URL will look something like this: "http://iridl.ldeo.columbia.edu/expert/SOURCES/.NOAA/.NCEP/.CPC/.FEWS/.Africa/.DAILY/.ARC2/.daily/.est_prcp/X/38.726/VALUE/Y/7.845/VALUE/X/removeGRID/Y/removeGRID/T/1.0/monthlyAverage/SOURCES/.USGS/.LandDAAC/.MODIS/.version_005/.EAF/.EVI/X/38.526/38.926/RANGEEDGES/Y/7.645/8.045/RANGEEDGES/T/1.0/monthlyAverage/T/0/1/0/shiftdatashort/[T]/correlate/[X/Y]average/ T_lag" # pieces together segments to generate a unified, usable URL vec <- paste0("ad",1:17) url.name <- capture.output(cat(unlist(lapply(vec,myfunc)), sep = "", collapse = "")) # specify output .csv filename and location fout <- paste0(out.path,"evicorr_temp.csv") download.file(url.name, fout, cacheOK = FALSE) return(read.csv(fout, header = T)) } # end evi.corr.regrid function # more work needs to be done on this. evi.regrid <- function(GridScale){ # gridscale is a value in degrees out <- eval(evi.regrid(Lat = site.data[site,"Latitude"], Lon = site.data[site,"Longitude"], Month = month, CorrThreshold = 0, Size = GridScale, RegridSize = MODIS.pixel.size)) return(out) } # end evi.regrid df.melt <- function(dframe.pre){ # dframe = a standard dataframe which has not yet been melted for gg use # because of scoping rules (i believe), have to melt the dataframe # outside of arc.vi.vis to avoid an environment problem # return(melt(dframe.pre, id.vars = c("site","Latitude", "Longitude"))) } arc.vi.vis <- function(x.df){ # # dataframe must have Latitude and Longitude columns, appropriately named # imports buffer size (b.s) variable from main script # cm.key <- "3ba6f5c05bc142209d423981fcbacb4a" # from Cloud Made API l <- min(x.df$Longitude) - b.s b <- min(x.df$Latitude) - b.s r <- max(x.df$Longitude) + b.s t <- max(x.df$Latitude) + b.s if(override){ l <- override.box[1]; b <- override.box[2]; r <- override.box[3]; t <- override.box[4] } # convert to appropriate form for ggplot2/ggmap use colnames(x.df)[5] <- "Ranking" Latitude <- x.df$Latitude Longitude <- x.df$Longitude theme_set(theme_bw(16)) outmap <- get_cloudmademap(bbox = c(left = l, bottom = b, right = r, top = t), api_key = cm.key) # modify this portion if want to change gradient colors ggmap(outmap) + geom_point(aes_string(x = Longitude, y = Latitude, color = Ranking), data = x.df) + scale_color_gradient(low = "red", high = "green") + facet_wrap(~ variable) } # end arc.vi.vis find.worst <- function(obj,colname){ # Returns the years of an obj with column "Year" falling below passed in 'badyear.thres' value # # Requires obj to also have "correlation" column # Could generalize and include an input parameter corresponding to column to be ranked Fn <- ecdf(obj[,colname]) ranks <- Fn(obj[,colname]) return(obj$Year[which(ranks <= badyear.thres)]) } bench.corr.compare <- function(corr_value, bound_size, reGrid_size){ # This function reads in pre-generated correlation threshold EVI agreement # percentages, compares them to the 'benchmark' model of a 10x10km EVI regrid # and produces .csv and .png outputs with the difference percentages. # Positive values denote the correlation threshold version performed better # than status quo, negative values denote worse performance. # # Input Arguments: # corr_value: correlation threshold used to produce output from an earlier # run of the ILO_Report.R file. # bound_size: size of bounding box constructed for the evi.regrid function # reGrid_size: pixel size that MODIS is regridded to - often gridding up # given IRI-DL limitations with finer data # filename containing run output from ILO_Report.R fn0 <- paste0("EVI_SpCorr_R",corr_value,"_bs", round(bound_size, digits = 3), "_rg", reGrid_size, "_", years[1], "-", years[length(years)]) # create window-specific data frames for holding comparison data df.both <- read.csv(paste0(out.path, fn0,".csv"), header = T) df.early <- df.both[which(df.both[,"Window"] == "Early"),] df.late <- df.both[which(df.both[,"Window"] == "Late"),] if (identical(df.early[,"site"],benchmark.early[,"site"])){ print("Sites match, Early comparison success!") out.early <- data.frame(df.early, fin = (df.early[,"Agreement"]-benchmark.early[,"value"])) }else{print("Mismatch in site list - early window comparison failed")} if (identical(df.late[,"site"],benchmark.late[,"site"])){print("Sites match, Late comparison success!") out.late <- data.frame(df.late, fin = (df.late[,"Agreement"]-benchmark.late[,"value"] )) }else{print("Mismatch in site list - late window comparison failed")} out <- rbind(out.early,out.late) # use common naming system for different outputs #fn <- paste0(out.path,"CorrThreshPerfDiff_",corr.value,"_.") # potential title for output plots, currently not included in code below ti.c <- paste0("Agreement Performance Gain [ ", corr.value, "Correlation Threshold - Benchmark]") # save as .png and .csv ggmap(out.stdmap) + geom_point(aes(x = Longitude, y = Latitude, color = fin), data = out) + scale_color_gradient(low = "red", high = "green") + scale_colour_gradient2(low = "red", high = "green", "Difference\nin agreement\npercent") + facet_wrap(~ Window) ggsave(file = paste0(out.path, "Diff",fn0,".png")) write.csv(out,paste0(out.path, "Diff",fn0,".csv")) return(out) } # end of bench.corr.compare function
/vi_functions.R
no_license
a8dx/ILO
R
false
false
12,831
r
# Created: June 26, 2013 # Purpose: Auxiliary Functions Used in VI Comparisons # Author: Anthony D'Agostino (ald2187@columbia.edu) # Last Edit: July 1, 2013 # EDIT HISTORY # # July 15 (ALD) - modified URL in evi.corr.regrid to better reflect the calculation # of masked EVI values. library(gtools) vi.common <- function(product = c("EVI")){ # Product has to be a single product, default is EVI unless otherwise specified # this code [will be] common to several functions in the vi_functions file and # read in VI data from existing .csv's # # product is "EVI", "NDVI", etc. # phase and satellite must already be defined: satellite is "Mod" or "Spo", phase is "early" or "late" fn <- paste0(base.path, scen,site,"/met.data/",phase,product,satellite,"R.csv") # need to create this if loop for sites which don't have complete satellite data if (file.exists(fn)){ vi <- read.csv(fn, header = T, row.names = 1) # read appropriate VI file # tryCatch(stop("you threw an error"), error = function(e) print(e$message)) rownames(vi) <- substr(rownames(vi), 5, 9) # removing month from rownames return(vi[years,]) # take only the relevant years } else {return(NA)} } vi.compare <- function(product){ # This function reads in existing VI data from a scenario folder, identifies # which years are the worst (according to a pre-defined "badyears.thres"), and # then returns the percentage of years in which these worst years correspond # to ARC data # product is "EVI", "NDVI", etc. # phase and satellite must already be defined: satellite is "Mod" or "Spo", phase is "early" or "late" if(any(!is.na(vi.common(product)))){ #returns NA if file does not exist vi <- vi.common(product) # Identify which years are bad according to pre-defined threshold vi.bad.years <- years[which(vi <= quantile(vi, probs = badyear.thres))] if (phase == "Early"){ # calculate overlap ol <- intersect(arc.years.early,vi.bad.years) pct <- length(ol) / max(length(arc.years.early), length(vi.bad.years)) } if (phase == "Late"){ ol <- intersect(arc.years.late,vi.bad.years) pct <- length(ol) / max(length(arc.years.late), length(vi.bad.years)) } } # if VI file does not exist else {pct = NA} return(pct) } #end of vi.compare function vi.ecdf <- function(product){ # Reads in VI data for single phase, single sat products # Calculates ranks using 'ecdf' # Output table is generated in main script # product is "EVI", "NDVI", etc. # `phase' and `satellite' must already be defined: `satellite' is "Mod" or "Spo", `phase' is "early" or "late" if(any(!is.na(vi.common(product)))){ #returns NA if file does not exist vi <- vi.common(product) Fn1 <- ecdf(vi) res <- round(Fn1(vi), digits = 2) } # if VI file does not exist else {res <- rep(NA,length(years))} return(res) } long.cor <- function(longitude,latitude){ # Calculates distance between longitude degrees # Need to determine whether these corrections get included in final version return(cos(latitude/(180/pi))*long.eq) } make.coords2 <- function(Lat,Lon,Range,Points){ #make.coords creates a matrix of all evenly-spaced lat/lon pairs, to create a grid of VI data values coords.mat <- matrix(data = NA, ncol = 2) lat.range <- seq(Lat - Range / (2*long.eq), Lat + Range / (2*long.eq), by = (2*Range/long.eq)/(Points-1)) #extent of lat range lon.range <- seq(Lon - Range / (2* long.eq), Lon + Range / (2*long.eq), by = (2*Range/long.eq)/(Points-1)) #extent of lon range coords.mat <- expand.grid(lat.range,lon.range) colnames(coords.mat) <- c("Latitude", "Longitude") return(coords.mat) } make.coords <- function(df,Buffer){ # Reads in the max/min lat/lon of a df with Latitude and Longitude columns # and generates a list of bounding coordinates to be read into a ggmap setting, # given a user-specified buffer, e.g., (0.1 degrees) return(list(l = min(df$Longitude)-Buffer, b = min(df$Latitude)-Buffer, r = max(df$Longitude)+Buffer, t = max(df$Latitude)+Buffer)) } coord.range <- function(Lat,Lon,Range){ #Provides the edge values of a lat-lon grid given user-specified distance value from site pixel coords.out <- matrix(data = NA, nrow = 2, ncol = 2) lat.range <- c(Lat - Range / (2*long.eq), Lat + Range / (2*long.eq)) lon.range <- c(Lon - Range / (2*long.eq), Lon + Range / (2*long.eq)) coords.out <- rbind(lat.range,lon.range) rownames(coords.out) <- c("Latitude","Longitude") return(coords.out) } evi.corr.regrid <- function(Lat,Lon,Size,CorrThreshold,Month,RegridSize=0.05,Lags = FALSE){ environment() # produces a time-series of monthly, X-Y averaged 1-mo lag of EVI # for a specified 3-letter month # Lat : user-specified latitude value # Lon : user-specified longitude value # Size : value in degrees to create the bounding box # RegridSize : in degrees, what scale is he underlying MODIS data regridded to # CorrThreshold : threshold value for which pixel-level correlations between EVI and ARC under this value are masked out as NA # Month : 3-letter string specifying which months to focus on in identifying worst years # Lags : enables capture of XY average correlation values across pre-specified lags # create IRI-DL url in stages given input values # keeping as small segments to improve flexibility for future functioning lagval <- paste0("T/",lag.start,"/1/",lag.end,"/shiftdatashort") ad1 <- "http://iridl.ldeo.columbia.edu/expert/SOURCES/.NOAA/.NCEP/.CPC/.FEWS/.Africa/.DAILY/.ARC2/.daily/.est_prcp" ad2 <- paste0("/X/",Lon,"/VALUE/Y/",Lat,"/VALUE/") ad3 <- "X/removeGRID/Y/removeGRID/T/1.0/monthlyAverage" ad4 <- "/SOURCES/.USGS/.LandDAAC/.MODIS/.version_005/.EAF/.EVI/" ad5 <- paste0("X/",Lon-Size,"/", Lon+Size, "/RANGEEDGES/") ad6 <- paste0("Y/", Lat-Size, "/", Lat+Size, "/RANGEEDGES/") # ad7 <- paste0("X/", RegridSize, "/", "0.9/boxAverage/Y/", RegridSize, "/", "0.9/boxAverage/") ad7 <- "" ad8 <- "T/0.9/monthlyAverage/" ad9 <- "dup/" # have to duplicate before taking the lag ad10 <- paste0("T/1/1/1/shiftdatashort") # stock lag value ad11 <- "/3/-1/roll" ad12 <- "/[T]/correlate/" ad13 <- paste0(CorrThreshold, "/maskle/") ad14 <- "dataflag/0/maskle/mul/" ad15 <- "[X/Y]average/" ad16 <- paste0("T/(",Month, ")RANGE/") ad17 <- "T+exch+table-+text+text+-table++.csv" # need to ensure get function calls variables from proper environment myfunc <- function(x){ return(get(x,envir = as.environment(-1))) } # modifying url to reflect capture only of correlation values if (Lags == TRUE){ ad7 <- "" ad9 <- lagval ad10 <- "" ad11 <- "" ad12 <- ad12 ad13 <- "" ad14 <- "" ad15 <- ad15 ad16 <- "" ad17 <- "T_lag+exch+table-+text+text+-table++.csv" } # lag URL will look something like this: "http://iridl.ldeo.columbia.edu/expert/SOURCES/.NOAA/.NCEP/.CPC/.FEWS/.Africa/.DAILY/.ARC2/.daily/.est_prcp/X/38.726/VALUE/Y/7.845/VALUE/X/removeGRID/Y/removeGRID/T/1.0/monthlyAverage/SOURCES/.USGS/.LandDAAC/.MODIS/.version_005/.EAF/.EVI/X/38.526/38.926/RANGEEDGES/Y/7.645/8.045/RANGEEDGES/T/1.0/monthlyAverage/T/0/1/0/shiftdatashort/[T]/correlate/[X/Y]average/ T_lag" # pieces together segments to generate a unified, usable URL vec <- paste0("ad",1:17) url.name <- capture.output(cat(unlist(lapply(vec,myfunc)), sep = "", collapse = "")) # specify output .csv filename and location fout <- paste0(out.path,"evicorr_temp.csv") download.file(url.name, fout, cacheOK = FALSE) return(read.csv(fout, header = T)) } # end evi.corr.regrid function # more work needs to be done on this. evi.regrid <- function(GridScale){ # gridscale is a value in degrees out <- eval(evi.regrid(Lat = site.data[site,"Latitude"], Lon = site.data[site,"Longitude"], Month = month, CorrThreshold = 0, Size = GridScale, RegridSize = MODIS.pixel.size)) return(out) } # end evi.regrid df.melt <- function(dframe.pre){ # dframe = a standard dataframe which has not yet been melted for gg use # because of scoping rules (i believe), have to melt the dataframe # outside of arc.vi.vis to avoid an environment problem # return(melt(dframe.pre, id.vars = c("site","Latitude", "Longitude"))) } arc.vi.vis <- function(x.df){ # # dataframe must have Latitude and Longitude columns, appropriately named # imports buffer size (b.s) variable from main script # cm.key <- "3ba6f5c05bc142209d423981fcbacb4a" # from Cloud Made API l <- min(x.df$Longitude) - b.s b <- min(x.df$Latitude) - b.s r <- max(x.df$Longitude) + b.s t <- max(x.df$Latitude) + b.s if(override){ l <- override.box[1]; b <- override.box[2]; r <- override.box[3]; t <- override.box[4] } # convert to appropriate form for ggplot2/ggmap use colnames(x.df)[5] <- "Ranking" Latitude <- x.df$Latitude Longitude <- x.df$Longitude theme_set(theme_bw(16)) outmap <- get_cloudmademap(bbox = c(left = l, bottom = b, right = r, top = t), api_key = cm.key) # modify this portion if want to change gradient colors ggmap(outmap) + geom_point(aes_string(x = Longitude, y = Latitude, color = Ranking), data = x.df) + scale_color_gradient(low = "red", high = "green") + facet_wrap(~ variable) } # end arc.vi.vis find.worst <- function(obj,colname){ # Returns the years of an obj with column "Year" falling below passed in 'badyear.thres' value # # Requires obj to also have "correlation" column # Could generalize and include an input parameter corresponding to column to be ranked Fn <- ecdf(obj[,colname]) ranks <- Fn(obj[,colname]) return(obj$Year[which(ranks <= badyear.thres)]) } bench.corr.compare <- function(corr_value, bound_size, reGrid_size){ # This function reads in pre-generated correlation threshold EVI agreement # percentages, compares them to the 'benchmark' model of a 10x10km EVI regrid # and produces .csv and .png outputs with the difference percentages. # Positive values denote the correlation threshold version performed better # than status quo, negative values denote worse performance. # # Input Arguments: # corr_value: correlation threshold used to produce output from an earlier # run of the ILO_Report.R file. # bound_size: size of bounding box constructed for the evi.regrid function # reGrid_size: pixel size that MODIS is regridded to - often gridding up # given IRI-DL limitations with finer data # filename containing run output from ILO_Report.R fn0 <- paste0("EVI_SpCorr_R",corr_value,"_bs", round(bound_size, digits = 3), "_rg", reGrid_size, "_", years[1], "-", years[length(years)]) # create window-specific data frames for holding comparison data df.both <- read.csv(paste0(out.path, fn0,".csv"), header = T) df.early <- df.both[which(df.both[,"Window"] == "Early"),] df.late <- df.both[which(df.both[,"Window"] == "Late"),] if (identical(df.early[,"site"],benchmark.early[,"site"])){ print("Sites match, Early comparison success!") out.early <- data.frame(df.early, fin = (df.early[,"Agreement"]-benchmark.early[,"value"])) }else{print("Mismatch in site list - early window comparison failed")} if (identical(df.late[,"site"],benchmark.late[,"site"])){print("Sites match, Late comparison success!") out.late <- data.frame(df.late, fin = (df.late[,"Agreement"]-benchmark.late[,"value"] )) }else{print("Mismatch in site list - late window comparison failed")} out <- rbind(out.early,out.late) # use common naming system for different outputs #fn <- paste0(out.path,"CorrThreshPerfDiff_",corr.value,"_.") # potential title for output plots, currently not included in code below ti.c <- paste0("Agreement Performance Gain [ ", corr.value, "Correlation Threshold - Benchmark]") # save as .png and .csv ggmap(out.stdmap) + geom_point(aes(x = Longitude, y = Latitude, color = fin), data = out) + scale_color_gradient(low = "red", high = "green") + scale_colour_gradient2(low = "red", high = "green", "Difference\nin agreement\npercent") + facet_wrap(~ Window) ggsave(file = paste0(out.path, "Diff",fn0,".png")) write.csv(out,paste0(out.path, "Diff",fn0,".csv")) return(out) } # end of bench.corr.compare function
library(gpclib) library(RColorBrewer) library(mapproj) library(maptools) library(rgdal) map <- suppressWarnings(readOGR(dsn="/home/aurelius/workspace/lits/shape/countries_shp", layer="lits_all")) library(car) map$NAME <- recode(map$NAME, "'United Kingdom'='Great Britain'") map$group_general <- recode(map$NAME, "c('France','Germany','Great Britain', 'Italy','Sweden')='Western Europe'; c('Czech Republic','Estonia','Hungary','Bulgaria', 'Latvia','Lithuania','Poland', 'Slovakia','Slovenia','Romania')='CEE'; c('Armenia','Azerbaijan','Belarus','Georgia','Kazakhstan', 'Kyrgyzstan','Moldova','Tajikistan', 'Ukraine','Uzbekistan','Russia')='CIS';else=NA") map$group_fine <- recode(map$NAME, "c('Armenia','Azerbaijan','Georgia') = 'Caucasus'; c('Russia','Ukraine','Moldova','Belarus')= 'CIS North'; c('Kyrgyzstan','Kazakhstan','Tajikistan','Uzbekistan')='Central Asia'; c('Estonia','Latvia','Lithuania')='Baltic States'; c('Poland','Czech Republic','Slovakia','Hungary','Slovenia', 'Romania','Bulgaria')='CEE EU'; c('Italy','Germany','Great Britain','Sweden','France')='West Europe'; else='other'") map$group_fine <- factor(map$group_fine, levels=c('West Europe','CEE EU', 'Baltic States','CIS North', 'Caucasus','Central Asia', 'other')) # shapefile into data.frame gpclibPermit() map@data$id <- rownames(map@data) library(ggplot2) map.points <- fortify(map, region="id") map.df <- merge(map.points, map@data, by="id") # only east of longitude -25 map.df <- subset(map.df, long >= -15 & long <= 80) # names for countries in analysis (excluding others) namedata <- map.df[!is.na(map.df$group_general),] cnames <- stats:::aggregate.formula(cbind(long, lat) ~ NAME, data=namedata, mean) cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "gray90", "#CC79A7") mapplot <- ggplot(map.df, aes(long,lat,group=group)) + geom_polygon(data = map.df, aes(long,lat), fill="gray90", color = "white") + geom_polygon(aes(fill = group_general)) + # or group_fine !! scale_fill_manual(values=cbPalette) + geom_polygon(data = map.df, aes(long,lat), fill=NA, color = "white") + geom_text(data=cnames, aes(long, lat, label = NAME, group=NAME), size=2, color="black") + coord_map(project="orthographic") + theme_minimal() + theme(legend.position="top") + theme(legend.title=element_blank()) + theme(legend.text=element_text(size=6)) + theme(legend.position="top") + theme(axis.title.y = element_blank()) + theme(axis.title.x = element_blank()) + theme(axis.text.y = element_blank()) + theme(axis.text.x = element_blank()) + theme(axis.ticks = element_blank()) + theme(strip.text = element_text(size=6)) + #guides(color = guide_legend(nrow = 2)) + theme(legend.key.size = unit(5, "mm")) ppi <- 300 png("../figure/mapplot.png", width=12/2.54*ppi, height=10/2.54*ppi, res=ppi) mapplot dev.off() ### Faceted showing population shares of explanations load("~/workspace/lits/attrib/attrib_year2013/data/lits.RData") library(survey) d.df <- svydesign(id = ~SerialID, weights = ~weight, data = df) t <- data.frame(prop.table(svytable(~group_general+cntry+q309_rec, d.df), 2)*100) t$Freq <- round(t$Freq, 1) t <- subset(t, Freq > 0) t#names(t2) <- c("group1","country","social.blame","individual.blame","individual.fate","dont.know","social.fate","not.stated") summary(map.df$NAME) summary(t$cntry) map.df.facet <- merge(map.df,t,by.x="NAME",by.y="cntry",all.y=TRUE) map.df.facet <- map.df.facet[order(map.df.facet$order),] # for blame explanations only map.df.facet <- subset(map.df.facet, q309_rec %in% c("Social Blame", "Individual Blame")) summary(map.df.facet$NAME) mapplotfacet <- ggplot(map.df.facet, aes(long,lat,group=group)) + geom_polygon(data = map.df, aes(long,lat), fill="gray90", color = "white") + geom_polygon(aes(fill = Freq)) + geom_polygon(data = map.df.facet, aes(long,lat), fill=NA, color = "white", size=0.1) + #geom_text(data=cnames, aes(long, lat, label = NAME, group=NAME), size=4, color="black") + coord_map(project="orthographic") + theme_minimal() + facet_wrap(~q309_rec)+ theme_minimal() + theme(legend.title=element_blank()) + theme(legend.text=element_text(size=6)) + theme(legend.position="top") + theme(axis.title.y = element_blank()) + theme(axis.title.x = element_blank()) + theme(axis.text.y = element_blank()) + theme(axis.text.x = element_blank()) + theme(axis.ticks = element_blank()) + theme(strip.text = element_text(size=6)) + #guides(color = guide_legend(nrow = 2)) + theme(legend.key.size = unit(5, "mm")) ppi <- 300 png("../figure/mapplotfacet.png", width=14/2.54*ppi, height=8/2.54*ppi, res=ppi) mapplotfacet dev.off()
/code/plot_map.R
permissive
muuankarski/attributions
R
false
false
5,386
r
library(gpclib) library(RColorBrewer) library(mapproj) library(maptools) library(rgdal) map <- suppressWarnings(readOGR(dsn="/home/aurelius/workspace/lits/shape/countries_shp", layer="lits_all")) library(car) map$NAME <- recode(map$NAME, "'United Kingdom'='Great Britain'") map$group_general <- recode(map$NAME, "c('France','Germany','Great Britain', 'Italy','Sweden')='Western Europe'; c('Czech Republic','Estonia','Hungary','Bulgaria', 'Latvia','Lithuania','Poland', 'Slovakia','Slovenia','Romania')='CEE'; c('Armenia','Azerbaijan','Belarus','Georgia','Kazakhstan', 'Kyrgyzstan','Moldova','Tajikistan', 'Ukraine','Uzbekistan','Russia')='CIS';else=NA") map$group_fine <- recode(map$NAME, "c('Armenia','Azerbaijan','Georgia') = 'Caucasus'; c('Russia','Ukraine','Moldova','Belarus')= 'CIS North'; c('Kyrgyzstan','Kazakhstan','Tajikistan','Uzbekistan')='Central Asia'; c('Estonia','Latvia','Lithuania')='Baltic States'; c('Poland','Czech Republic','Slovakia','Hungary','Slovenia', 'Romania','Bulgaria')='CEE EU'; c('Italy','Germany','Great Britain','Sweden','France')='West Europe'; else='other'") map$group_fine <- factor(map$group_fine, levels=c('West Europe','CEE EU', 'Baltic States','CIS North', 'Caucasus','Central Asia', 'other')) # shapefile into data.frame gpclibPermit() map@data$id <- rownames(map@data) library(ggplot2) map.points <- fortify(map, region="id") map.df <- merge(map.points, map@data, by="id") # only east of longitude -25 map.df <- subset(map.df, long >= -15 & long <= 80) # names for countries in analysis (excluding others) namedata <- map.df[!is.na(map.df$group_general),] cnames <- stats:::aggregate.formula(cbind(long, lat) ~ NAME, data=namedata, mean) cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "gray90", "#CC79A7") mapplot <- ggplot(map.df, aes(long,lat,group=group)) + geom_polygon(data = map.df, aes(long,lat), fill="gray90", color = "white") + geom_polygon(aes(fill = group_general)) + # or group_fine !! scale_fill_manual(values=cbPalette) + geom_polygon(data = map.df, aes(long,lat), fill=NA, color = "white") + geom_text(data=cnames, aes(long, lat, label = NAME, group=NAME), size=2, color="black") + coord_map(project="orthographic") + theme_minimal() + theme(legend.position="top") + theme(legend.title=element_blank()) + theme(legend.text=element_text(size=6)) + theme(legend.position="top") + theme(axis.title.y = element_blank()) + theme(axis.title.x = element_blank()) + theme(axis.text.y = element_blank()) + theme(axis.text.x = element_blank()) + theme(axis.ticks = element_blank()) + theme(strip.text = element_text(size=6)) + #guides(color = guide_legend(nrow = 2)) + theme(legend.key.size = unit(5, "mm")) ppi <- 300 png("../figure/mapplot.png", width=12/2.54*ppi, height=10/2.54*ppi, res=ppi) mapplot dev.off() ### Faceted showing population shares of explanations load("~/workspace/lits/attrib/attrib_year2013/data/lits.RData") library(survey) d.df <- svydesign(id = ~SerialID, weights = ~weight, data = df) t <- data.frame(prop.table(svytable(~group_general+cntry+q309_rec, d.df), 2)*100) t$Freq <- round(t$Freq, 1) t <- subset(t, Freq > 0) t#names(t2) <- c("group1","country","social.blame","individual.blame","individual.fate","dont.know","social.fate","not.stated") summary(map.df$NAME) summary(t$cntry) map.df.facet <- merge(map.df,t,by.x="NAME",by.y="cntry",all.y=TRUE) map.df.facet <- map.df.facet[order(map.df.facet$order),] # for blame explanations only map.df.facet <- subset(map.df.facet, q309_rec %in% c("Social Blame", "Individual Blame")) summary(map.df.facet$NAME) mapplotfacet <- ggplot(map.df.facet, aes(long,lat,group=group)) + geom_polygon(data = map.df, aes(long,lat), fill="gray90", color = "white") + geom_polygon(aes(fill = Freq)) + geom_polygon(data = map.df.facet, aes(long,lat), fill=NA, color = "white", size=0.1) + #geom_text(data=cnames, aes(long, lat, label = NAME, group=NAME), size=4, color="black") + coord_map(project="orthographic") + theme_minimal() + facet_wrap(~q309_rec)+ theme_minimal() + theme(legend.title=element_blank()) + theme(legend.text=element_text(size=6)) + theme(legend.position="top") + theme(axis.title.y = element_blank()) + theme(axis.title.x = element_blank()) + theme(axis.text.y = element_blank()) + theme(axis.text.x = element_blank()) + theme(axis.ticks = element_blank()) + theme(strip.text = element_text(size=6)) + #guides(color = guide_legend(nrow = 2)) + theme(legend.key.size = unit(5, "mm")) ppi <- 300 png("../figure/mapplotfacet.png", width=14/2.54*ppi, height=8/2.54*ppi, res=ppi) mapplotfacet dev.off()
# Script description ---- # Explores the real datasets in order to extract calibration metrics. These are: # media spending patterns, media budgets, etc. The real data set is compared to the # simulated data set and relevant metrics (s.a. correlation matrices) are computed. Also, # graphs for comparison illustrations are computed. ggcorrplot is really nice for # correlation plots! # # Requires: A real data set and the observed.data data frame. # ----------------------------------------------- # Libraries ---- library(tidyverse) library(readxl) library(corrplot) library(ggcorrplot) library(regclass) library(prophet) # Source mmm functions (Nepa's software property) source(paste0(getwd(),"/mmm_functions.R")) select <- dplyr::select slice <- dplyr::slice # ----------------------------------------------- # # Import data ---- # observed.data <- readRDS(paste0(getwd(), "/Data/observed.data.rds")) # ----------------------------------------------- # Data prep ---- data.real <- read_csv2(paste0(getwd(), "/Data/real data/data_real.csv"), col_names = T) summary.data.real <- summary(data.real) data.real$time.index <- 1:nrow(data.real) # ----------------------------------------------- # Decomposition ---- # Subsequently we leverage nepa"s function from the the mmm_functions script which in turn # decomposes the time series with help of the prophet function. # Recode YearWeek as Week data.real <- data.real %>% mutate(Week = substr(YearWeek, 5,6)) %>% select(-YearWeek) # # Seasontrend component for simulation: # # We need to standardize 0-1 before decomposition (recall from the simulation_1 script, # # that trend patterns have to be on that scale!) # # Range function # range01 <- function(x) {(x - min(x)) / (max(x) - min(x))} # data.elg.n <- range01(data.real %>% select(-Week)) %>% as_tibble() # data.real.n$Week <- data.real$Week # data.real.n$Year <- data.real$Year # # prophet.real.n <- SeasonTrendProphet(data.real.n, "StoreSales", holidays = NULL) # # prophet.real.n <- SeasonTrendProphet(data.real.n, "StoreSales", holidays = NULL) # data.real.n$seasontrend <- prophet.real.n$trend + prophet.real.n$yearly # # seasontrend.sim.real.n <- as.numeric(append(data.real.n$seasontrend[1:54], data.real.n$seasontrend)) # saveRDS(seasontrend.sim.real.n, paste0(getwd(), "/Data/seasontrend.sim.real.n.rds")) prophet.real <- SeasonTrendProphet(data.real, "StoreSales", holidays = NULL) # Add seasontrend as a column in model data data.real$seasontrend <- prophet.real$trend + prophet.real$yearly # Remove Week (and WeekNumber) data.real <- data.real %>% select(-c(Week, WeekNumber)) # ----------------------------------------------- # Exploration ---- # Variables and summary stats -- names(data.real) summary(data.real) # Correlation matrix befor decay transformation -- col <- colorRampPalette(c("red", "white", "blue"))(20) corrplot(cor(data.real), tl.cex = 0.6, col = col) corrplot(cor(data.real), tl.cex = 0.6, col = col, method = "number") # VIF values -- real.fit <- lm(StoreSales ~. , data = data.real %>% select(-time.index)) summary(real.fit) real.vif <- VIF(real.fit) real.vif # Select media variables of interest ---- vars.real <- c("Display", "DM", "DM_Insert", "Facebook", "OOH", "Print", "Search", "TVC", "Youtube") # ----------------------------------------------- # Plots ---- # Sales time-series -- ggplot(data.real, aes(x = time.index, y = StoreSales)) + geom_line() + geom_line(aes(x = time.index, y = seasontrend), col = "blue") + labs(x = "week", y = "sales") # Spending patterns -- data.real.long <- data.real %>% pivot_longer(cols = vars.real) ggplot(data.real.long %>% filter(name != "StoreSales"), aes(x = time.index, y = value, col = name)) + geom_line() # We might choose these three channels... ggplot(data.real %>% pivot_longer(cols = c(Display, Print)), aes(x = time.index, y = value, col = name)) + geom_line() # ----------------------------------------------- # Calibration ---- # In particular, we extract yearly budgets and spending patterns for 3 selected # media channels and make various comparisons. The level and general nature of the # simulated data is mainly driven by either the population size or the market.rate # (both in the simulation script) which is set to be equal to seasontrend (extracted # from real data with prophet - see above) # As the simulated data consists of 3 years of weekly data (156 obs) after burn-in # is droped, we need to adjust the length of the data we use in the simulation... # Recall that we simulate 4 years of weekly data (208 obs) where the first year is # later to be dropped. hence we simply duplicate the first year (of real data). # Choose 2 media channels ---- media.names <- c("Display", "Print") # ----------------------------------------------- # Yearly budget ---- # Define index i.1 <- 1:52 i.2 <- 53:105 i.3 <- 106:nrow(data.elg) budget.1 <- data.elg %>% filter(time.index %in% i.1) %>% select(media.names) %>% summarise_all(.funs = ~sum(.x)) budget.2 <- data.elg %>% filter(time.index %in% i.2) %>% select(media.names) %>% summarise_all(.funs = ~sum(.x)) yearly.budget <- bind_rows(budget.1, budget.2) # Duplicate first year (because the burn in is dropped) yearly.budget <- bind_rows(budget.1, yearly.budget) # # save # saveRDS(yearly.budget, paste0(getwd(), "/Data/yearly.budget.rds")) # ----------------------------------------------- # Spending functions ---- spending <- list() for (i in 1:length(media.names)) { spending[[i]] <- data.elg[, media.names[i]] %>% unlist() %>% as.numeric() spending.1 <- spending[[i]][i.1] / as.numeric(budget.1[1, media.names[i]]) spending.2 <- spending[[i]][i.2] / as.numeric(budget.2[1, media.names[i]]) spending[[i]] <- c(spending.1, spending.2) # Duplicate first year spending[[i]] <- append(spending[[i]][1:54], spending[[i]]) } # # Save # saveRDS(spending, paste0(getwd(), "/Data/spending.rds")) # ----------------------------------------------- # Real vs. simulated data ---- # Sales time-series -- # Compare real sales to simulated sales summary_real_sim <- summary(observed.data$revenue) / summary(data.elg$StoreSales) summary_real_sim data.elg$sales.sim <- observed.data$revenue[3:nrow(observed.data)] plot_real_sim <- ggplot(data.elg %>% pivot_longer(cols = c(StoreSales, sales.sim)) %>% mutate(name = ifelse(name == "sales.sim", "simulated", "real")), aes(x = time.index, y = value, col = name)) + geom_line() + labs(x = "time", y = "sales", col = "dataset") + theme_bw() plot_real_sim # Spending patterns -- data.elg$media.1 <- observed.data$media.1.spend[3:nrow(observed.data)] ggplot(data.elg %>% pivot_longer(cols = c(Display, media.1)), aes(x = time.index, y = value, col = name)) + geom_line() data.elg$media.2 <- observed.data$media.2.spend[3:nrow(observed.data)] ggplot(data.elg %>% pivot_longer(cols = c(Print, media.2)), aes(x = time.index, y = value, col = name)) + geom_line() # Make facet wrap for thesis plot_spending_patterns <- ggplot(data.elg %>% rename("media 1" = media.1, "media 2" = media.2) %>% pivot_longer(cols = c("media 1", "media 2")), aes(x = time.index, y = value, col = name)) + geom_line() + facet_grid(cols = vars(name)) + labs(x = "time", y = "media spend", col = "channel") + theme_bw() plot_spending_patterns # Corrplots corr <- round(cor(observed.data %>% select(revenue, market.rate, media.1.spend, media.2.spend) %>% rename(sales = revenue, "market rate" = market.rate, "media 1" = media.1.spend, "media.2" = media.2.spend)), 1) # ggcorrplot is really nice for correlation plots! plot_corr_sim <- ggcorrplot(corr, lab = T, col = c("red", "green", "blue")) + ggtitle("simulated data") plot_corr_sim corr <- round(cor(data.elg %>% select(StoreSales, seasontrend, Display, Print) %>% rename(sales = StoreSales, "market rate" = seasontrend, "media 1" = Display, "media 2" = Print)), 1) plot_corr_real <- ggcorrplot(corr, lab = T, col = c("red", "green", "blue")) + ggtitle("real data") plot_corr_real
/real_data_exploration_calibration.R
no_license
dheimgartner/master-thesis-mmm
R
false
false
8,203
r
# Script description ---- # Explores the real datasets in order to extract calibration metrics. These are: # media spending patterns, media budgets, etc. The real data set is compared to the # simulated data set and relevant metrics (s.a. correlation matrices) are computed. Also, # graphs for comparison illustrations are computed. ggcorrplot is really nice for # correlation plots! # # Requires: A real data set and the observed.data data frame. # ----------------------------------------------- # Libraries ---- library(tidyverse) library(readxl) library(corrplot) library(ggcorrplot) library(regclass) library(prophet) # Source mmm functions (Nepa's software property) source(paste0(getwd(),"/mmm_functions.R")) select <- dplyr::select slice <- dplyr::slice # ----------------------------------------------- # # Import data ---- # observed.data <- readRDS(paste0(getwd(), "/Data/observed.data.rds")) # ----------------------------------------------- # Data prep ---- data.real <- read_csv2(paste0(getwd(), "/Data/real data/data_real.csv"), col_names = T) summary.data.real <- summary(data.real) data.real$time.index <- 1:nrow(data.real) # ----------------------------------------------- # Decomposition ---- # Subsequently we leverage nepa"s function from the the mmm_functions script which in turn # decomposes the time series with help of the prophet function. # Recode YearWeek as Week data.real <- data.real %>% mutate(Week = substr(YearWeek, 5,6)) %>% select(-YearWeek) # # Seasontrend component for simulation: # # We need to standardize 0-1 before decomposition (recall from the simulation_1 script, # # that trend patterns have to be on that scale!) # # Range function # range01 <- function(x) {(x - min(x)) / (max(x) - min(x))} # data.elg.n <- range01(data.real %>% select(-Week)) %>% as_tibble() # data.real.n$Week <- data.real$Week # data.real.n$Year <- data.real$Year # # prophet.real.n <- SeasonTrendProphet(data.real.n, "StoreSales", holidays = NULL) # # prophet.real.n <- SeasonTrendProphet(data.real.n, "StoreSales", holidays = NULL) # data.real.n$seasontrend <- prophet.real.n$trend + prophet.real.n$yearly # # seasontrend.sim.real.n <- as.numeric(append(data.real.n$seasontrend[1:54], data.real.n$seasontrend)) # saveRDS(seasontrend.sim.real.n, paste0(getwd(), "/Data/seasontrend.sim.real.n.rds")) prophet.real <- SeasonTrendProphet(data.real, "StoreSales", holidays = NULL) # Add seasontrend as a column in model data data.real$seasontrend <- prophet.real$trend + prophet.real$yearly # Remove Week (and WeekNumber) data.real <- data.real %>% select(-c(Week, WeekNumber)) # ----------------------------------------------- # Exploration ---- # Variables and summary stats -- names(data.real) summary(data.real) # Correlation matrix befor decay transformation -- col <- colorRampPalette(c("red", "white", "blue"))(20) corrplot(cor(data.real), tl.cex = 0.6, col = col) corrplot(cor(data.real), tl.cex = 0.6, col = col, method = "number") # VIF values -- real.fit <- lm(StoreSales ~. , data = data.real %>% select(-time.index)) summary(real.fit) real.vif <- VIF(real.fit) real.vif # Select media variables of interest ---- vars.real <- c("Display", "DM", "DM_Insert", "Facebook", "OOH", "Print", "Search", "TVC", "Youtube") # ----------------------------------------------- # Plots ---- # Sales time-series -- ggplot(data.real, aes(x = time.index, y = StoreSales)) + geom_line() + geom_line(aes(x = time.index, y = seasontrend), col = "blue") + labs(x = "week", y = "sales") # Spending patterns -- data.real.long <- data.real %>% pivot_longer(cols = vars.real) ggplot(data.real.long %>% filter(name != "StoreSales"), aes(x = time.index, y = value, col = name)) + geom_line() # We might choose these three channels... ggplot(data.real %>% pivot_longer(cols = c(Display, Print)), aes(x = time.index, y = value, col = name)) + geom_line() # ----------------------------------------------- # Calibration ---- # In particular, we extract yearly budgets and spending patterns for 3 selected # media channels and make various comparisons. The level and general nature of the # simulated data is mainly driven by either the population size or the market.rate # (both in the simulation script) which is set to be equal to seasontrend (extracted # from real data with prophet - see above) # As the simulated data consists of 3 years of weekly data (156 obs) after burn-in # is droped, we need to adjust the length of the data we use in the simulation... # Recall that we simulate 4 years of weekly data (208 obs) where the first year is # later to be dropped. hence we simply duplicate the first year (of real data). # Choose 2 media channels ---- media.names <- c("Display", "Print") # ----------------------------------------------- # Yearly budget ---- # Define index i.1 <- 1:52 i.2 <- 53:105 i.3 <- 106:nrow(data.elg) budget.1 <- data.elg %>% filter(time.index %in% i.1) %>% select(media.names) %>% summarise_all(.funs = ~sum(.x)) budget.2 <- data.elg %>% filter(time.index %in% i.2) %>% select(media.names) %>% summarise_all(.funs = ~sum(.x)) yearly.budget <- bind_rows(budget.1, budget.2) # Duplicate first year (because the burn in is dropped) yearly.budget <- bind_rows(budget.1, yearly.budget) # # save # saveRDS(yearly.budget, paste0(getwd(), "/Data/yearly.budget.rds")) # ----------------------------------------------- # Spending functions ---- spending <- list() for (i in 1:length(media.names)) { spending[[i]] <- data.elg[, media.names[i]] %>% unlist() %>% as.numeric() spending.1 <- spending[[i]][i.1] / as.numeric(budget.1[1, media.names[i]]) spending.2 <- spending[[i]][i.2] / as.numeric(budget.2[1, media.names[i]]) spending[[i]] <- c(spending.1, spending.2) # Duplicate first year spending[[i]] <- append(spending[[i]][1:54], spending[[i]]) } # # Save # saveRDS(spending, paste0(getwd(), "/Data/spending.rds")) # ----------------------------------------------- # Real vs. simulated data ---- # Sales time-series -- # Compare real sales to simulated sales summary_real_sim <- summary(observed.data$revenue) / summary(data.elg$StoreSales) summary_real_sim data.elg$sales.sim <- observed.data$revenue[3:nrow(observed.data)] plot_real_sim <- ggplot(data.elg %>% pivot_longer(cols = c(StoreSales, sales.sim)) %>% mutate(name = ifelse(name == "sales.sim", "simulated", "real")), aes(x = time.index, y = value, col = name)) + geom_line() + labs(x = "time", y = "sales", col = "dataset") + theme_bw() plot_real_sim # Spending patterns -- data.elg$media.1 <- observed.data$media.1.spend[3:nrow(observed.data)] ggplot(data.elg %>% pivot_longer(cols = c(Display, media.1)), aes(x = time.index, y = value, col = name)) + geom_line() data.elg$media.2 <- observed.data$media.2.spend[3:nrow(observed.data)] ggplot(data.elg %>% pivot_longer(cols = c(Print, media.2)), aes(x = time.index, y = value, col = name)) + geom_line() # Make facet wrap for thesis plot_spending_patterns <- ggplot(data.elg %>% rename("media 1" = media.1, "media 2" = media.2) %>% pivot_longer(cols = c("media 1", "media 2")), aes(x = time.index, y = value, col = name)) + geom_line() + facet_grid(cols = vars(name)) + labs(x = "time", y = "media spend", col = "channel") + theme_bw() plot_spending_patterns # Corrplots corr <- round(cor(observed.data %>% select(revenue, market.rate, media.1.spend, media.2.spend) %>% rename(sales = revenue, "market rate" = market.rate, "media 1" = media.1.spend, "media.2" = media.2.spend)), 1) # ggcorrplot is really nice for correlation plots! plot_corr_sim <- ggcorrplot(corr, lab = T, col = c("red", "green", "blue")) + ggtitle("simulated data") plot_corr_sim corr <- round(cor(data.elg %>% select(StoreSales, seasontrend, Display, Print) %>% rename(sales = StoreSales, "market rate" = seasontrend, "media 1" = Display, "media 2" = Print)), 1) plot_corr_real <- ggcorrplot(corr, lab = T, col = c("red", "green", "blue")) + ggtitle("real data") plot_corr_real
#' chrFreqs #' #' Count features per chromosome #' #' @param gr A GenomicRanges object. #' @param chrs A character vector of chromosome names. #' #' @return A named character vector indicating the number of feature #' counts per chromosome in a GenomicRanges object. This is done by #' running GRanges' seqnames() function, converting the output to a #' table, and converting the table to a named character vector, according #' to the names passed to the function. This is useful to create a #' matrix or a data frame from a list of genomic ranges. #' #' @examples #' if(require(rtracklayer, quietly = TRUE)) { #' gr <- system.file("extdata", "BED12_A.bed", package="smallCAGEqc") %>% #' rtracklayer::import.bed #' chrFreqs(gr, c("chr1", "chr2", "chrX", "chrY", "chrZ")) #' } #' #' @importFrom GenomicRanges seqnames #' @importFrom BiocGenerics table #' @export chrFreqs chrFreqs <- function(gr, chrs) gr %>% GenomicRanges::seqnames() %>% BiocGenerics::table() %>% tableToNamedVector(chrs) tableToNamedVector <- function(tbl, nms) tbl %>% extract(nms) %>% as.vector %>% set_names(nms)
/R/chrFreqs.R
permissive
charles-plessy/smallCAGEqc
R
false
false
1,131
r
#' chrFreqs #' #' Count features per chromosome #' #' @param gr A GenomicRanges object. #' @param chrs A character vector of chromosome names. #' #' @return A named character vector indicating the number of feature #' counts per chromosome in a GenomicRanges object. This is done by #' running GRanges' seqnames() function, converting the output to a #' table, and converting the table to a named character vector, according #' to the names passed to the function. This is useful to create a #' matrix or a data frame from a list of genomic ranges. #' #' @examples #' if(require(rtracklayer, quietly = TRUE)) { #' gr <- system.file("extdata", "BED12_A.bed", package="smallCAGEqc") %>% #' rtracklayer::import.bed #' chrFreqs(gr, c("chr1", "chr2", "chrX", "chrY", "chrZ")) #' } #' #' @importFrom GenomicRanges seqnames #' @importFrom BiocGenerics table #' @export chrFreqs chrFreqs <- function(gr, chrs) gr %>% GenomicRanges::seqnames() %>% BiocGenerics::table() %>% tableToNamedVector(chrs) tableToNamedVector <- function(tbl, nms) tbl %>% extract(nms) %>% as.vector %>% set_names(nms)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc-sex-props.R, R/export-survey-indices.R \name{props_surv} \alias{props_surv} \alias{props_surv_data_summary} \alias{export_survey_indices} \title{Calculate the proportions female for the survey data} \usage{ props_surv( surv_series = 1:4, surv_series_names = c("qcsss", "hsmas", "hsss", "wcviss"), surv_sets = survey_sets, surv_samples = survey_samples, ... ) props_surv_data_summary( surv_samples, surv_series = c(1, 3, 4, 16), surv_series_names = c("QCS", "HS", "WCVI", "WCHG") ) export_survey_indices( survey_index, surv_series = c(2, 3, 4, 5, 17), surv_series_names = c("SYN QCS", "OTHER HS MSA", "SYN HS", "SYN WCVI", "SYN WCHG"), iphc = NULL, discard_cpue = NULL, stitched_syn = NULL, write_to_file = TRUE, append = FALSE, ... ) } \arguments{ \item{surv_series}{Values of \code{survey_series_id} which is a column of \code{surv_samples}} \item{surv_series_names}{Names to be associated to the values of \code{surv_series}} \item{surv_sets}{A data frame object returned from \code{\link[gfdata:get_data]{gfdata::get_survey_sets()}}} \item{surv_samples}{Output from \code{\link[gfdata:get_data]{gfdata::get_survey_samples()}}} \item{...}{Arguments passed to \code{\link[=props_comm]{props_comm()}}} \item{survey_index}{Survey index data frame as output by \code{\link[gfdata:get_data]{gfdata::get_survey_index()}}} \item{iphc}{The IPHC index as read in from iphc-survey-index.rds} \item{discard_cpue}{The discard CPUE index as read from cpue-predictions-arrowtooth-flounder-modern-3CD5ABCDE-discard-july-26-feb-fishing-year} \item{stitched_syn}{The stitched synoptic index as read in from stitched-syn-index.rds} \item{write_to_file}{If \code{TRUE}, write the output to the file. If \code{FALSE}, return the data frame} \item{append}{If \code{TRUE}, append the output to the file. If \code{FALSE}, overwrite the file} } \value{ A data frame containing proportions of females for the required surveys A data frame summarizing the data A list of survey indices for pasting into a iSCAM data file } \description{ Calculate the proportions female for the survey data Summarize the data used to make the proportions female table Extract the female survey index data for pasting into the iSCAM data file. The 'Discard CPUE' index must be multiplied by the proportions female from the commercial fishery so \code{comm_samples} is required } \details{ The CV can be calculated as CV = SD / MEAN or from the Standard error: CV = sqrt(exp(SE^2) - 1) } \examples{ \dontrun{ si <- gfdata::get_survey_index("arrowtooth flounder") extract_survey_indices (survey_index, start_year = 1996, end_year = 2019, species_category = 1) } }
/man/props_surv.Rd
no_license
pbs-assess/arrowtooth
R
false
true
2,753
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc-sex-props.R, R/export-survey-indices.R \name{props_surv} \alias{props_surv} \alias{props_surv_data_summary} \alias{export_survey_indices} \title{Calculate the proportions female for the survey data} \usage{ props_surv( surv_series = 1:4, surv_series_names = c("qcsss", "hsmas", "hsss", "wcviss"), surv_sets = survey_sets, surv_samples = survey_samples, ... ) props_surv_data_summary( surv_samples, surv_series = c(1, 3, 4, 16), surv_series_names = c("QCS", "HS", "WCVI", "WCHG") ) export_survey_indices( survey_index, surv_series = c(2, 3, 4, 5, 17), surv_series_names = c("SYN QCS", "OTHER HS MSA", "SYN HS", "SYN WCVI", "SYN WCHG"), iphc = NULL, discard_cpue = NULL, stitched_syn = NULL, write_to_file = TRUE, append = FALSE, ... ) } \arguments{ \item{surv_series}{Values of \code{survey_series_id} which is a column of \code{surv_samples}} \item{surv_series_names}{Names to be associated to the values of \code{surv_series}} \item{surv_sets}{A data frame object returned from \code{\link[gfdata:get_data]{gfdata::get_survey_sets()}}} \item{surv_samples}{Output from \code{\link[gfdata:get_data]{gfdata::get_survey_samples()}}} \item{...}{Arguments passed to \code{\link[=props_comm]{props_comm()}}} \item{survey_index}{Survey index data frame as output by \code{\link[gfdata:get_data]{gfdata::get_survey_index()}}} \item{iphc}{The IPHC index as read in from iphc-survey-index.rds} \item{discard_cpue}{The discard CPUE index as read from cpue-predictions-arrowtooth-flounder-modern-3CD5ABCDE-discard-july-26-feb-fishing-year} \item{stitched_syn}{The stitched synoptic index as read in from stitched-syn-index.rds} \item{write_to_file}{If \code{TRUE}, write the output to the file. If \code{FALSE}, return the data frame} \item{append}{If \code{TRUE}, append the output to the file. If \code{FALSE}, overwrite the file} } \value{ A data frame containing proportions of females for the required surveys A data frame summarizing the data A list of survey indices for pasting into a iSCAM data file } \description{ Calculate the proportions female for the survey data Summarize the data used to make the proportions female table Extract the female survey index data for pasting into the iSCAM data file. The 'Discard CPUE' index must be multiplied by the proportions female from the commercial fishery so \code{comm_samples} is required } \details{ The CV can be calculated as CV = SD / MEAN or from the Standard error: CV = sqrt(exp(SE^2) - 1) } \examples{ \dontrun{ si <- gfdata::get_survey_index("arrowtooth flounder") extract_survey_indices (survey_index, start_year = 1996, end_year = 2019, species_category = 1) } }
#include "AEConfig.h" #include "AE_EffectVers.h" #ifndef AE_OS_WIN #include <AE_General.r> #endif resource 'PiPL' (16000) { { /* array properties: 12 elements */ /* [1] */ Kind { AEEffect }, /* [2] */ Name { "MedianFilter" }, /* [3] */ Category { "Abramov plugins" }, #ifdef AE_OS_WIN #ifdef AE_PROC_INTELx64 CodeWin64X86 {"EffectMain"}, #endif #else #ifdef AE_OS_MAC CodeMacIntel64 {"EffectMain"}, #endif #endif /* [6] */ AE_PiPL_Version { 2, 0 }, /* [7] */ AE_Effect_Spec_Version { PF_PLUG_IN_VERSION, PF_PLUG_IN_SUBVERS }, /* [8] */ AE_Effect_Version { 524289 /* 1.0 */ }, /* [9] */ AE_Effect_Info_Flags { 0 }, /* [10] */ AE_Effect_Global_OutFlags { 0x02000000 //50332160 }, AE_Effect_Global_OutFlags_2 { 0x00000000 }, /* [11] */ AE_Effect_Match_Name { "ADBE MedianFilter" }, /* [12] */ AE_Reserved_Info { 0 } } };
/MedianFilterPiPL.r
no_license
karvozavr/AE-median-filter-plugin
R
false
false
935
r
#include "AEConfig.h" #include "AE_EffectVers.h" #ifndef AE_OS_WIN #include <AE_General.r> #endif resource 'PiPL' (16000) { { /* array properties: 12 elements */ /* [1] */ Kind { AEEffect }, /* [2] */ Name { "MedianFilter" }, /* [3] */ Category { "Abramov plugins" }, #ifdef AE_OS_WIN #ifdef AE_PROC_INTELx64 CodeWin64X86 {"EffectMain"}, #endif #else #ifdef AE_OS_MAC CodeMacIntel64 {"EffectMain"}, #endif #endif /* [6] */ AE_PiPL_Version { 2, 0 }, /* [7] */ AE_Effect_Spec_Version { PF_PLUG_IN_VERSION, PF_PLUG_IN_SUBVERS }, /* [8] */ AE_Effect_Version { 524289 /* 1.0 */ }, /* [9] */ AE_Effect_Info_Flags { 0 }, /* [10] */ AE_Effect_Global_OutFlags { 0x02000000 //50332160 }, AE_Effect_Global_OutFlags_2 { 0x00000000 }, /* [11] */ AE_Effect_Match_Name { "ADBE MedianFilter" }, /* [12] */ AE_Reserved_Info { 0 } } };
for (i in 1:nrow(key)) { comb <- i ## recover parameters from key pars <- key[comb, ] meDur <- pars[["meDur"]] reStart <- meStart + meDur reDur <- pars[["reDur"]] meInt <- pars[["meInt"]] mu_me <- lambda_bg - log(1 - meInt) / meDur reInt <- pars[["reInt"]] lambda_re <- (log(reInt) - (lambda_bg - mu_me) * meDur) / reDur + mu_bg lambda_null <- stepfun(c(reStart, reStart + reDur), c(lambda_bg, lambda_re, lambda_bg)) mu_null <- stepfun(c(meStart, meStart + meDur), c(mu_bg, mu_me, mu_bg)) expNt <- Vectorize(function(t) { exp(integrate(function(x) lambda_null(x) - mu_null(x), 0, t)$value) }) expTotalNt <- function(t) { integrate(function(x) lambda_null(x) * expNt(x), 0, t, stop.on.error = FALSE)$value + 1 } tMax <- uniroot(function(t) expTotalNt(t) - nExp, c(meStart + meDur + reDur, meStart + meDur + reDur + 10), extendInt = "yes")$root bmSigma2 <- pars[["bmSigma2"]] stQ01 <- pars[["stQ01"]] stQ10 <- pars[["stQSum"]] - stQ01 stQ <- matrix(c(0, stQ10, stQ01, 0), 2, 2) meanLambda <- function(l) { exponent <- Vectorize(function(t, l) { integrate(function(x) l - l/5* expected.trait(stQ01, stQ10, x), 0, t)$value }) integrate(function(t) t * (l - l/5 * expected.trait(stQ01, stQ10, t)) * exp(-exponent(t, l)), 0, Inf)$value } lambda_bg_0 <- uniroot(function(l) meanLambda(l) - 1/lambda_bg, interval = c(0.1, 1), extendInt = "yes")$root lambdaModCont <- pars[["lambdaModCont"]] lambda <- function(t, traits) { reStart <- meStart + meDur reEnd <- reStart + reDur ifelse((t < reStart) || (t > reEnd), # BG lambda_bg_0 - lambda_bg_0/5 * traits[2], # RE lambda_re + lambdaModCont * lambda_re * traits[1]) } meanMu <- function(m) { exponent <- Vectorize(function(t, m) { integrate(function(x) m - m/5* expected.trait(stQ01, stQ10, x), 0, t)$value }) integrate(function(t) t * (m - m/5 * expected.trait(stQ01, stQ10, t)) * exp(-exponent(t, m)), 0, Inf)$value } mu_me_0 <- uniroot(function(m) meanMu(m) - 1/mu_me, interval = c(0.1, 1), extendInt = "yes")$root muModCont <- pars[["muModCont"]] mu <- function(t, traits) { ifelse((t < meStart) || (t > meStart + meDur), # BG mu_bg + muModCont * mu_bg * traits[1], # ME mu_me_0 - mu_me_0/5 * traits[2]) } expNt1 <- Vectorize(function(t) { exp(integrate(Vectorize(function(x) lambda(x, c(0, expected.trait(stQ01, stQ10, x))) - mu(x, c(0, expected.trait(stQ01, stQ10, x)))), 0, t)$value) }) expTotalNt1 <- function(t) { integrate(Vectorize(function(x) lambda(x, c(0, expected.trait(stQ01, stQ10, x))) * expNt1(x)), 0, t, stop.on.error = FALSE)$value + 1 } print(paste0(tMax, " ", reStart + reDur, ", Diff = ", tMax - (reStart + reDur))) print(paste0("Comb: ", comb)) print(paste0("Null: ", expNt(tMax), ", Trait: ", expNt1(tMax))) print(paste0("Null: ", expTotalNt(tMax), ", Trait: ", expTotalNt1(tMax))) }
/Simulation/Tests_pipeline.R
no_license
brpetrucci/EvolProject2021
R
false
false
3,439
r
for (i in 1:nrow(key)) { comb <- i ## recover parameters from key pars <- key[comb, ] meDur <- pars[["meDur"]] reStart <- meStart + meDur reDur <- pars[["reDur"]] meInt <- pars[["meInt"]] mu_me <- lambda_bg - log(1 - meInt) / meDur reInt <- pars[["reInt"]] lambda_re <- (log(reInt) - (lambda_bg - mu_me) * meDur) / reDur + mu_bg lambda_null <- stepfun(c(reStart, reStart + reDur), c(lambda_bg, lambda_re, lambda_bg)) mu_null <- stepfun(c(meStart, meStart + meDur), c(mu_bg, mu_me, mu_bg)) expNt <- Vectorize(function(t) { exp(integrate(function(x) lambda_null(x) - mu_null(x), 0, t)$value) }) expTotalNt <- function(t) { integrate(function(x) lambda_null(x) * expNt(x), 0, t, stop.on.error = FALSE)$value + 1 } tMax <- uniroot(function(t) expTotalNt(t) - nExp, c(meStart + meDur + reDur, meStart + meDur + reDur + 10), extendInt = "yes")$root bmSigma2 <- pars[["bmSigma2"]] stQ01 <- pars[["stQ01"]] stQ10 <- pars[["stQSum"]] - stQ01 stQ <- matrix(c(0, stQ10, stQ01, 0), 2, 2) meanLambda <- function(l) { exponent <- Vectorize(function(t, l) { integrate(function(x) l - l/5* expected.trait(stQ01, stQ10, x), 0, t)$value }) integrate(function(t) t * (l - l/5 * expected.trait(stQ01, stQ10, t)) * exp(-exponent(t, l)), 0, Inf)$value } lambda_bg_0 <- uniroot(function(l) meanLambda(l) - 1/lambda_bg, interval = c(0.1, 1), extendInt = "yes")$root lambdaModCont <- pars[["lambdaModCont"]] lambda <- function(t, traits) { reStart <- meStart + meDur reEnd <- reStart + reDur ifelse((t < reStart) || (t > reEnd), # BG lambda_bg_0 - lambda_bg_0/5 * traits[2], # RE lambda_re + lambdaModCont * lambda_re * traits[1]) } meanMu <- function(m) { exponent <- Vectorize(function(t, m) { integrate(function(x) m - m/5* expected.trait(stQ01, stQ10, x), 0, t)$value }) integrate(function(t) t * (m - m/5 * expected.trait(stQ01, stQ10, t)) * exp(-exponent(t, m)), 0, Inf)$value } mu_me_0 <- uniroot(function(m) meanMu(m) - 1/mu_me, interval = c(0.1, 1), extendInt = "yes")$root muModCont <- pars[["muModCont"]] mu <- function(t, traits) { ifelse((t < meStart) || (t > meStart + meDur), # BG mu_bg + muModCont * mu_bg * traits[1], # ME mu_me_0 - mu_me_0/5 * traits[2]) } expNt1 <- Vectorize(function(t) { exp(integrate(Vectorize(function(x) lambda(x, c(0, expected.trait(stQ01, stQ10, x))) - mu(x, c(0, expected.trait(stQ01, stQ10, x)))), 0, t)$value) }) expTotalNt1 <- function(t) { integrate(Vectorize(function(x) lambda(x, c(0, expected.trait(stQ01, stQ10, x))) * expNt1(x)), 0, t, stop.on.error = FALSE)$value + 1 } print(paste0(tMax, " ", reStart + reDur, ", Diff = ", tMax - (reStart + reDur))) print(paste0("Comb: ", comb)) print(paste0("Null: ", expNt(tMax), ", Trait: ", expNt1(tMax))) print(paste0("Null: ", expTotalNt(tMax), ", Trait: ", expTotalNt1(tMax))) }
### PRELIMINARY AND UNCHECKED ### # Clear the console cat("\014") # Remove every object in the environment rm(list = ls()) #install and load packages lib <- c("dplyr", "stringdist", "tidyr") #sapply(lib, function(x) install.packages(x)) sapply(lib, function(x) require(x, character.only = TRUE)) # set working directory setwd("~/Google Drive/R/Service Providers/data/intermediate/") ### # load data services <- read.csv("~/Google Drive/R/Service Providers/data/mode/services_received_20160224.csv", as.is = TRUE) providers <- read.csv("~/Google Drive/R/Service Providers/data/mode/service_provider_categories_20160225.csv", as.is = TRUE) deluxe <- read.csv("~/Google Drive/R/Service Providers/data/mode/deluxe_districts_20160229.csv", as.is = TRUE) #line <- read.csv("~/Google Drive/R/Service Providers/data/mode/line_items_20160222.csv", as.is = TRUE) #neca <- read.csv("~/Google Drive/R/Service Providers/data/neca/neca_members_2016.csv", as.is = TRUE) # import manually matched NECA providers -- national providers are excluded neca_review <- read.csv("~/Google Drive/R/Service Providers/data/neca/neca_manual_review_20160309.csv", as.is = TRUE) # import NECA manual matches -- national providers are included possible_neca_review <- read.csv("~/Google Drive/R/Service Providers/data/neca/neca_manual_review_20160226.csv", as.is = TRUE) # take care of un-clean import / rename columns in NECA data neca_review <- neca_review[, c(2, 5)] names(neca_review) <- c("neca_name", "service_provider_name") possible_neca_review <- possible_neca_review[, c(2, 5)] names(possible_neca_review) <- c("neca_name", "possible_service_provider_name") # merge neca_review <- dplyr::left_join(neca_review, possible_neca_review, by = c("neca_name")) # change column types in deluxe table deluxe$ia_cost_per_mbps <- as.numeric(deluxe$ia_cost_per_mbps) deluxe$ia_bandwidth_per_student <- as.numeric(deluxe$ia_bandwidth_per_student) # join provider SPINS to services providers <- providers[!duplicated(providers$name), ] providers<- dplyr::rename(providers, service_provider_name = name) services <- left_join(services, providers, by = c("service_provider_name")) # tagging NECA in three step # 1. service providers matched through manual review # 2. additional restriction for small town or rural locale services$neca <- ifelse(tolower(services$service_provider_name) %in% tolower(neca_review$service_provider_name), 1, 0) # & services$locale %in% c('Rural', 'Small Town'), 1, 0) services$possible_neca <- ifelse(tolower(services$service_provider_name) %in% tolower(neca_review$possible_service_provider_name), 1, 0) # & services$locale %in% c('Rural', 'Small Town'), 1, 0) # 2. large service providers who are under neca tariffs only in certain region # table this part of tagging for now because there is concern that we may be over-tagging # update per conversation with Jen O. on 2.29.2016 ##Armstrong ##Contains ‘Armstrong Tel’ in service_provider_name ##Allocated to a rural & small town locale in NY, PA, WV, MD armstrong <- which(grepl("Armstrong Cable", services$service_provider_name, ignore.case = TRUE) & services$locale %in% c("Rural", "Small Town") & services$postal_cd %in% c("NY", "PA", "WV", "MD")) services[armstrong, ]$possible_neca <- 1 rm(armstrong) ##Centurytel ##Contains ‘CenturyLink’ in service_provider_name ##Allocated to a rural & small town locale in LA, MI, AR, AL, IN, WI, CO, ID, MN, MO, MS, OH, OR, NV, WA, WY centurylink <- which(grepl("CenturyLink", services$service_provider_name, ignore.case = TRUE) & services$locale %in% c("Rural", "Small Town") & services$postal_cd %in% c("LA", "MI", "AR", "AL", "IN", "WI", "CO", "ID", "MN", "MO", "MS", "OH", "OR", "NV", "WA", "WY")) services[centurylink, ]$possible_neca <- 1 rm(centurylink) ##Citizens Telecommunications ##Contains ‘Citizens Tel’ in service_provider_name ##Allocated to a rural & small town locale in NY, NE, WV, CA, OR, TN, UT, ID, IL, MN, MT, NV citizenstel <- which(grepl("citizens tel", services$service_provider_name, ignore.case = TRUE) & services$locale %in% c("Rural", "Small Town") & services$postal_cd %in% c("NY", "NE", "WV", "CA", "OR", "TN", "UT", "ID", "IL", "MN", "MT", "NV")) services[citizenstel, ]$possible_neca <- 1 rm(citizenstel) ##Frontier Communications ##Contains ‘Frontier Comm’ in service_provider_name ##Allocated to a rural & small town district in TX, AL, GA, IL, IN, IA, MI, MN, MS, NY, PA, NC, SC, WI frontier <- which(grepl("Frontier Comm", services$service_provider_name, ignore.case = TRUE) & services$locale %in% c("Rural", "Small Town") & services$postal_cd %in% c("TX", "AL", "GA", "IL", "IN", "IA", "MI", "MN", "MS", "NY", "PA", "NC", "SC", "WI")) services[frontier, ]$possible_neca <- 1 rm(frontier) ##Verizon ##Contains ‘Verizon’ in service_provider_name ##Allocated to a rural & small town district in CA, AZ, DE, FL, MD, NJ, NY, PA, VA, DC, NC verizon <- which(grepl("Verizon", services$service_provider_name, ignore.case = TRUE) & services$locale %in% c("Rural", "Small Town") & services$postal_cd %in% c("CA", "AZ", "DE", "FL", "MD", "NJ", "NY", "PA", "VA", "DC", "NC")) services[verizon, ]$possible_neca <- 1 rm(verizon) ##Windstream ##Contains ‘Windstream’ in service_provider_name ##Allocated to a rural & small town district in AL, AR, FL, GA, IA, KY, MS, MO, NE, NY, NC, OH, OK, PA, SC windstream <- which(grepl("windstream", services$service_provider_name, ignore.case = TRUE) & services$locale %in% c("Rural", "Small Town") & services$postal_cd %in% c("AL", "AR", "FL", "GA", "IA", "KY", "MS", "MO", "NE", "NY", "NC", "OH", "OK", "PA", "SC")) services[windstream, ]$possible_neca <- 1 rm(windstream) # 3. TDS telecom which has many many subsidiaries # subsidiaries that appear in the NECA data # and their SPINs according to website http://www.tdsbusiness.com/e-rate/spin.aspx # Communication Corporation of Michigan 143001691 # Concord Telephone Exchange 143001627 # Continental Telephone Co. 143001658 # Decatur Telephone Co. 143002261 # Deposit Telephone Co. 143001327 # Dickeyville Telephone, LLC 143001791 # EastCoast Telecom, Inc. 143001813 # Edwards Telephone Co., Inc. 143001329 # Kearsarge Telephone Co. 143001297 # Leslie County Telephone Co. 143001572 # Lewis River Telephone Co. 143002599 # Lewisport Telephone Co. 143001573 # Little Miami Communications Corporation 143001661 # Ludlow Telephone Company 143001307 # McClellanville Telephone Co. 143001523 # Mahanoy & Mahantango Tel. Co. 143001380 # McDaniel Telephone Company 143002600 # Merchants & FarmersTelephone Co. 143001744 # Merrimack County Telephone 143001299 # Mid-State Telephone Co. 143002119 # Mid-Plains Telephone, LLC 143001795 # Midway Telephone Co. 143001809 # Myrtle Telephone Co., Inc 143001621 # New Castle Telephone Co. 143001421 # New London Telephone Co. 143002354 # Norway Telephone Company 143001524 # Oakman Telephone Co. 143001555 # Oakwood Oakwood Telephone Company 143001672 # Oklahoma Communication Systems, Inc. 143002382 # Orchard Farm Telephone Co. 143002359 # Port Byron Telephone Co. 143001347 # Potlatch Telephone Co. 143002520 ID # Quincy Telephone Co. 143001447 # Riverside Telecom, LLC 143001831 # S & W Telephone Co. 143001755 # Salem Telephone Company 143001577 # Saluda Mountain Telephone Co. 143001498 # Scandinavia Telephone Co. 143001833 # Service Telephone Co. 143001499 # Shiawassee Telephone Co. 143001719 # Stockbridge & Sherwood Telephone Company 143001840 # The Stoutland Telephone Co. 143002365 # Strasburg Strasburg Telephone Co. 143002505 # Sugar Valley Telephone Co. 143001393 # Tenney Telephone Co. 143001843 # Tipton Telephone 143001762 # Tri-County Telephone Co. 143001763 # The Vanlue Telephone Company 143001681 # Virginia Telephone Co. 143001417 # Warren Telephone Co. 143001283 # Winsted Telephone Company 143002149 # Winterhaven Telephone Co. 143002654 # Wolverine Telephone Co. 143001725 tds <- which(services$spin %in% c(143001691, 143001627, 143001658, 143002261, 143001327, 143001791, 143001813, 143001329, 143001297, 143001572, 143002599, 143001573, 143001661, 143001307, 143001523, 143001380, 143002600, 143001744, 143001299, 143002119, 143001795, 143001809, 143001621, 143001421, 143002354, 143001524, 143001555, 143001672, 143002382, 143002359,143001347 , 143002520, 143001447, 143001831, 143001755, 143001577, 143001498, 143001833, 143001499, 143001719, 143001840, 143002365, 143002505, 143001393, 143001843, 143001762, 143001763, 143001681, 143001417, 143001283, 143002149, 143002654, 143001725) & services$locale %in% c('Rural', 'Small Town')) services[tds, ]$neca <- 1 services[tds, ]$possible_neca <- 1 rm(tds) # NECA tags? #sum(services$possible_neca) write.csv(services, "01_services_tagged_neca.csv", row.names = FALSE) # version for the engineering team # no duplicates services_eng <- services[!duplicated(services$line_item_id), ] # both are 0 services_eng <- services[services$neca == 1 | services$possible_neca == 1, ] write.csv(services_eng, "01_services_tagged_neca_eng.csv", row.names = FALSE) ## end
/Old_Analysis/NECA Analysis/01_tag_neca_services_received.R
no_license
mtejas88/esh
R
false
false
9,497
r
### PRELIMINARY AND UNCHECKED ### # Clear the console cat("\014") # Remove every object in the environment rm(list = ls()) #install and load packages lib <- c("dplyr", "stringdist", "tidyr") #sapply(lib, function(x) install.packages(x)) sapply(lib, function(x) require(x, character.only = TRUE)) # set working directory setwd("~/Google Drive/R/Service Providers/data/intermediate/") ### # load data services <- read.csv("~/Google Drive/R/Service Providers/data/mode/services_received_20160224.csv", as.is = TRUE) providers <- read.csv("~/Google Drive/R/Service Providers/data/mode/service_provider_categories_20160225.csv", as.is = TRUE) deluxe <- read.csv("~/Google Drive/R/Service Providers/data/mode/deluxe_districts_20160229.csv", as.is = TRUE) #line <- read.csv("~/Google Drive/R/Service Providers/data/mode/line_items_20160222.csv", as.is = TRUE) #neca <- read.csv("~/Google Drive/R/Service Providers/data/neca/neca_members_2016.csv", as.is = TRUE) # import manually matched NECA providers -- national providers are excluded neca_review <- read.csv("~/Google Drive/R/Service Providers/data/neca/neca_manual_review_20160309.csv", as.is = TRUE) # import NECA manual matches -- national providers are included possible_neca_review <- read.csv("~/Google Drive/R/Service Providers/data/neca/neca_manual_review_20160226.csv", as.is = TRUE) # take care of un-clean import / rename columns in NECA data neca_review <- neca_review[, c(2, 5)] names(neca_review) <- c("neca_name", "service_provider_name") possible_neca_review <- possible_neca_review[, c(2, 5)] names(possible_neca_review) <- c("neca_name", "possible_service_provider_name") # merge neca_review <- dplyr::left_join(neca_review, possible_neca_review, by = c("neca_name")) # change column types in deluxe table deluxe$ia_cost_per_mbps <- as.numeric(deluxe$ia_cost_per_mbps) deluxe$ia_bandwidth_per_student <- as.numeric(deluxe$ia_bandwidth_per_student) # join provider SPINS to services providers <- providers[!duplicated(providers$name), ] providers<- dplyr::rename(providers, service_provider_name = name) services <- left_join(services, providers, by = c("service_provider_name")) # tagging NECA in three step # 1. service providers matched through manual review # 2. additional restriction for small town or rural locale services$neca <- ifelse(tolower(services$service_provider_name) %in% tolower(neca_review$service_provider_name), 1, 0) # & services$locale %in% c('Rural', 'Small Town'), 1, 0) services$possible_neca <- ifelse(tolower(services$service_provider_name) %in% tolower(neca_review$possible_service_provider_name), 1, 0) # & services$locale %in% c('Rural', 'Small Town'), 1, 0) # 2. large service providers who are under neca tariffs only in certain region # table this part of tagging for now because there is concern that we may be over-tagging # update per conversation with Jen O. on 2.29.2016 ##Armstrong ##Contains ‘Armstrong Tel’ in service_provider_name ##Allocated to a rural & small town locale in NY, PA, WV, MD armstrong <- which(grepl("Armstrong Cable", services$service_provider_name, ignore.case = TRUE) & services$locale %in% c("Rural", "Small Town") & services$postal_cd %in% c("NY", "PA", "WV", "MD")) services[armstrong, ]$possible_neca <- 1 rm(armstrong) ##Centurytel ##Contains ‘CenturyLink’ in service_provider_name ##Allocated to a rural & small town locale in LA, MI, AR, AL, IN, WI, CO, ID, MN, MO, MS, OH, OR, NV, WA, WY centurylink <- which(grepl("CenturyLink", services$service_provider_name, ignore.case = TRUE) & services$locale %in% c("Rural", "Small Town") & services$postal_cd %in% c("LA", "MI", "AR", "AL", "IN", "WI", "CO", "ID", "MN", "MO", "MS", "OH", "OR", "NV", "WA", "WY")) services[centurylink, ]$possible_neca <- 1 rm(centurylink) ##Citizens Telecommunications ##Contains ‘Citizens Tel’ in service_provider_name ##Allocated to a rural & small town locale in NY, NE, WV, CA, OR, TN, UT, ID, IL, MN, MT, NV citizenstel <- which(grepl("citizens tel", services$service_provider_name, ignore.case = TRUE) & services$locale %in% c("Rural", "Small Town") & services$postal_cd %in% c("NY", "NE", "WV", "CA", "OR", "TN", "UT", "ID", "IL", "MN", "MT", "NV")) services[citizenstel, ]$possible_neca <- 1 rm(citizenstel) ##Frontier Communications ##Contains ‘Frontier Comm’ in service_provider_name ##Allocated to a rural & small town district in TX, AL, GA, IL, IN, IA, MI, MN, MS, NY, PA, NC, SC, WI frontier <- which(grepl("Frontier Comm", services$service_provider_name, ignore.case = TRUE) & services$locale %in% c("Rural", "Small Town") & services$postal_cd %in% c("TX", "AL", "GA", "IL", "IN", "IA", "MI", "MN", "MS", "NY", "PA", "NC", "SC", "WI")) services[frontier, ]$possible_neca <- 1 rm(frontier) ##Verizon ##Contains ‘Verizon’ in service_provider_name ##Allocated to a rural & small town district in CA, AZ, DE, FL, MD, NJ, NY, PA, VA, DC, NC verizon <- which(grepl("Verizon", services$service_provider_name, ignore.case = TRUE) & services$locale %in% c("Rural", "Small Town") & services$postal_cd %in% c("CA", "AZ", "DE", "FL", "MD", "NJ", "NY", "PA", "VA", "DC", "NC")) services[verizon, ]$possible_neca <- 1 rm(verizon) ##Windstream ##Contains ‘Windstream’ in service_provider_name ##Allocated to a rural & small town district in AL, AR, FL, GA, IA, KY, MS, MO, NE, NY, NC, OH, OK, PA, SC windstream <- which(grepl("windstream", services$service_provider_name, ignore.case = TRUE) & services$locale %in% c("Rural", "Small Town") & services$postal_cd %in% c("AL", "AR", "FL", "GA", "IA", "KY", "MS", "MO", "NE", "NY", "NC", "OH", "OK", "PA", "SC")) services[windstream, ]$possible_neca <- 1 rm(windstream) # 3. TDS telecom which has many many subsidiaries # subsidiaries that appear in the NECA data # and their SPINs according to website http://www.tdsbusiness.com/e-rate/spin.aspx # Communication Corporation of Michigan 143001691 # Concord Telephone Exchange 143001627 # Continental Telephone Co. 143001658 # Decatur Telephone Co. 143002261 # Deposit Telephone Co. 143001327 # Dickeyville Telephone, LLC 143001791 # EastCoast Telecom, Inc. 143001813 # Edwards Telephone Co., Inc. 143001329 # Kearsarge Telephone Co. 143001297 # Leslie County Telephone Co. 143001572 # Lewis River Telephone Co. 143002599 # Lewisport Telephone Co. 143001573 # Little Miami Communications Corporation 143001661 # Ludlow Telephone Company 143001307 # McClellanville Telephone Co. 143001523 # Mahanoy & Mahantango Tel. Co. 143001380 # McDaniel Telephone Company 143002600 # Merchants & FarmersTelephone Co. 143001744 # Merrimack County Telephone 143001299 # Mid-State Telephone Co. 143002119 # Mid-Plains Telephone, LLC 143001795 # Midway Telephone Co. 143001809 # Myrtle Telephone Co., Inc 143001621 # New Castle Telephone Co. 143001421 # New London Telephone Co. 143002354 # Norway Telephone Company 143001524 # Oakman Telephone Co. 143001555 # Oakwood Oakwood Telephone Company 143001672 # Oklahoma Communication Systems, Inc. 143002382 # Orchard Farm Telephone Co. 143002359 # Port Byron Telephone Co. 143001347 # Potlatch Telephone Co. 143002520 ID # Quincy Telephone Co. 143001447 # Riverside Telecom, LLC 143001831 # S & W Telephone Co. 143001755 # Salem Telephone Company 143001577 # Saluda Mountain Telephone Co. 143001498 # Scandinavia Telephone Co. 143001833 # Service Telephone Co. 143001499 # Shiawassee Telephone Co. 143001719 # Stockbridge & Sherwood Telephone Company 143001840 # The Stoutland Telephone Co. 143002365 # Strasburg Strasburg Telephone Co. 143002505 # Sugar Valley Telephone Co. 143001393 # Tenney Telephone Co. 143001843 # Tipton Telephone 143001762 # Tri-County Telephone Co. 143001763 # The Vanlue Telephone Company 143001681 # Virginia Telephone Co. 143001417 # Warren Telephone Co. 143001283 # Winsted Telephone Company 143002149 # Winterhaven Telephone Co. 143002654 # Wolverine Telephone Co. 143001725 tds <- which(services$spin %in% c(143001691, 143001627, 143001658, 143002261, 143001327, 143001791, 143001813, 143001329, 143001297, 143001572, 143002599, 143001573, 143001661, 143001307, 143001523, 143001380, 143002600, 143001744, 143001299, 143002119, 143001795, 143001809, 143001621, 143001421, 143002354, 143001524, 143001555, 143001672, 143002382, 143002359,143001347 , 143002520, 143001447, 143001831, 143001755, 143001577, 143001498, 143001833, 143001499, 143001719, 143001840, 143002365, 143002505, 143001393, 143001843, 143001762, 143001763, 143001681, 143001417, 143001283, 143002149, 143002654, 143001725) & services$locale %in% c('Rural', 'Small Town')) services[tds, ]$neca <- 1 services[tds, ]$possible_neca <- 1 rm(tds) # NECA tags? #sum(services$possible_neca) write.csv(services, "01_services_tagged_neca.csv", row.names = FALSE) # version for the engineering team # no duplicates services_eng <- services[!duplicated(services$line_item_id), ] # both are 0 services_eng <- services[services$neca == 1 | services$possible_neca == 1, ] write.csv(services_eng, "01_services_tagged_neca_eng.csv", row.names = FALSE) ## end
outliers <- function(x, p = 0.99, tail = "positive", distribution = "t", var.est = "mad", center.est = "mean") { u <- eval(parse(text = center.est))(x[!is.na(x)]) z <- hotspots(x-u, p = p, tail = tail, distribution = distribution, var.est = var.est) z$u <- u z$tail <- tail z$x <- z$x + u z$data <- z$data + u if (tail == "positive" | tail == "both") z$positive.cut <- z$positive.cut + u if (tail == "negative" | tail == "both") z$negative.cut <- z$negative.cut + u z$dataset_name <- deparse(substitute(x)) z }
/R/outliers.R
no_license
cran/hotspots
R
false
false
535
r
outliers <- function(x, p = 0.99, tail = "positive", distribution = "t", var.est = "mad", center.est = "mean") { u <- eval(parse(text = center.est))(x[!is.na(x)]) z <- hotspots(x-u, p = p, tail = tail, distribution = distribution, var.est = var.est) z$u <- u z$tail <- tail z$x <- z$x + u z$data <- z$data + u if (tail == "positive" | tail == "both") z$positive.cut <- z$positive.cut + u if (tail == "negative" | tail == "both") z$negative.cut <- z$negative.cut + u z$dataset_name <- deparse(substitute(x)) z }
# reads data from file and filters out the measurements for the two days get.data <- function() { data.dir <- "data" data.file <- "household_power_consumption.txt" start.date <- as.Date("2007-02-01","%Y-%m-%d") end.date <- as.Date("2007-02-02", "%Y-%m-%d") # read all data data.all <- read.table(file.path(data.dir,data.file), sep=";", header= TRUE, na.strings = "?") # filter data: only use data between (and including) start and end date data <- subset(data.all,as.Date(Date,"%d/%m/%Y") >= start.date & as.Date(Date,"%d/%m/%Y") <= end.date) # add a datatime column data$datetime <- strptime(with(data,paste(Date,Time)), format="%d/%m/%Y %H:%M:%S") return (data) }
/getData.R
no_license
erikboertjes/ExData_Plotting1
R
false
false
691
r
# reads data from file and filters out the measurements for the two days get.data <- function() { data.dir <- "data" data.file <- "household_power_consumption.txt" start.date <- as.Date("2007-02-01","%Y-%m-%d") end.date <- as.Date("2007-02-02", "%Y-%m-%d") # read all data data.all <- read.table(file.path(data.dir,data.file), sep=";", header= TRUE, na.strings = "?") # filter data: only use data between (and including) start and end date data <- subset(data.all,as.Date(Date,"%d/%m/%Y") >= start.date & as.Date(Date,"%d/%m/%Y") <= end.date) # add a datatime column data$datetime <- strptime(with(data,paste(Date,Time)), format="%d/%m/%Y %H:%M:%S") return (data) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataset_creation_functions.R \name{read_data} \alias{read_data} \title{Scrape Events of Right-Wing Violence in 2015 from Online Source.} \usage{ read_data(page) } \arguments{ \item{page}{integer between 0 and n (which is the current maximum number of subpages in the list of events minus 1).} } \value{ A data frame of events as listed on the website, consisting of columns for date, location, bundesland, category, summary and source. } \description{ This function is built only for one specific purpose: Scraping the website \url{https://www.mut-gegen-rechte-gewalt.de/service/chronik-vorfaelle} for all events in 2015. It retrieves the date, location, bundesland, category, summary and source for each event and returns a data frame. }
/man/read_data.Rd
no_license
flinder/arvig
R
false
true
821
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataset_creation_functions.R \name{read_data} \alias{read_data} \title{Scrape Events of Right-Wing Violence in 2015 from Online Source.} \usage{ read_data(page) } \arguments{ \item{page}{integer between 0 and n (which is the current maximum number of subpages in the list of events minus 1).} } \value{ A data frame of events as listed on the website, consisting of columns for date, location, bundesland, category, summary and source. } \description{ This function is built only for one specific purpose: Scraping the website \url{https://www.mut-gegen-rechte-gewalt.de/service/chronik-vorfaelle} for all events in 2015. It retrieves the date, location, bundesland, category, summary and source for each event and returns a data frame. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.r \docType{data} \name{hair1958} \alias{hair1958} \title{Timber products: Production, imports, exports, and new supply of lumber in the United States, specified years, 1799-1958} \format{A data frame with 58 observations on 23 variables: \describe{ \item{Years}{Years} \item{Prod.Tot}{Production Total} \item{Prod.SW}{Production of Softwoods} \item{Prod.HW}{Production of Hardwoods} \item{Imports.Tot}{Total Imports} \item{Imports.SW}{Imports for Softwood} \item{Imports.HW}{Imports for Hardwoods} \item{Imports.Mixed}{Imports for Mixed Woods} \item{Imports.Mixed.PercOfTot}{Mixed Woods Percent of Total Imports} \item{Imports.SW.PercOfTot}{Softwoods Percent of Total Imports} \item{Imports.HW.PercOfTot}{Hardwoods Percent of Total Imports} \item{Imports.Estimated.SW}{Estimated Softwood Imports} \item{Imports.Estimated.HW}{Estimated Hardwood Imports} \item{Exports.Tot}{Total Exports} \item{Exports.SW}{Softwood Exports} \item{Exports.HW}{Hardwood Exports} \item{Exports.Mixed}{Mixed Woods Exports} \item{Exports.Mixed.PercOfTot}{Mixed Woods Percent of Total Exports} \item{Exports.SW.PercOfTot}{Softwoods Percent of Total Exports} \item{Exports.HW.PercOfTot}{Hardwoods Percent of Total Exports} \item{Exports.Estimated.SW}{Estimated Softwood Exports} \item{Exports.Estimated.HW}{Estimated Hardwood Exports} \item{NewSupply}{New Supply} \item{PerCapita}{Per Capita Use, in Board ft.} }} \source{ USFS Estimations and Bureau of Census Data } \usage{ hair1958 } \description{ Dataset containing statistics for timber products. All units are in Million Board Ft, unless otherwise specified. All vectors are numeric. } \keyword{hair1958}
/man/hair1958.Rd
no_license
benjones2/WOODCARB3R
R
false
true
1,720
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.r \docType{data} \name{hair1958} \alias{hair1958} \title{Timber products: Production, imports, exports, and new supply of lumber in the United States, specified years, 1799-1958} \format{A data frame with 58 observations on 23 variables: \describe{ \item{Years}{Years} \item{Prod.Tot}{Production Total} \item{Prod.SW}{Production of Softwoods} \item{Prod.HW}{Production of Hardwoods} \item{Imports.Tot}{Total Imports} \item{Imports.SW}{Imports for Softwood} \item{Imports.HW}{Imports for Hardwoods} \item{Imports.Mixed}{Imports for Mixed Woods} \item{Imports.Mixed.PercOfTot}{Mixed Woods Percent of Total Imports} \item{Imports.SW.PercOfTot}{Softwoods Percent of Total Imports} \item{Imports.HW.PercOfTot}{Hardwoods Percent of Total Imports} \item{Imports.Estimated.SW}{Estimated Softwood Imports} \item{Imports.Estimated.HW}{Estimated Hardwood Imports} \item{Exports.Tot}{Total Exports} \item{Exports.SW}{Softwood Exports} \item{Exports.HW}{Hardwood Exports} \item{Exports.Mixed}{Mixed Woods Exports} \item{Exports.Mixed.PercOfTot}{Mixed Woods Percent of Total Exports} \item{Exports.SW.PercOfTot}{Softwoods Percent of Total Exports} \item{Exports.HW.PercOfTot}{Hardwoods Percent of Total Exports} \item{Exports.Estimated.SW}{Estimated Softwood Exports} \item{Exports.Estimated.HW}{Estimated Hardwood Exports} \item{NewSupply}{New Supply} \item{PerCapita}{Per Capita Use, in Board ft.} }} \source{ USFS Estimations and Bureau of Census Data } \usage{ hair1958 } \description{ Dataset containing statistics for timber products. All units are in Million Board Ft, unless otherwise specified. All vectors are numeric. } \keyword{hair1958}
#------------------------------------------------------------ # Load data and packages #------------------------------------------------------------ library('tidyverse') library('lubridate') AB <- read.csv(here::here("datasets","ab_data.csv")) head(AB) #--------------------------------------------------------------- # clean data file to prepare for analysis #--------------------------------------------------------------- # group by user_id, arrange by timestamp, and remove # duplicate IDs # create a day date variable from the raw # timestamp information AB_clean <- AB %>% group_by(user_id) %>% arrange(timestamp) %>% filter(!duplicated(user_id)) %>% mutate(day = as.Date(timestamp)) # and group by date and control/treatment indicator AB_sum <- AB_clean %>% group_by(day, group) %>% summarize(conv_rate = mean(converted)) print(AB_sum) #--------------------------------------------------------------- # Plot difference between treated and control #--------------------------------------------------------------- ggplot(AB_sum, aes(x = day, y = conv_rate, color = group, group = group)) + geom_point() + geom_line() + theme_minimal() #--------------------------------------------------------------- # Estimate model to identify treatment effect #--------------------------------------------------------------- AB_clean <- AB_clean %>% mutate(treated = factor(group, levels = c("control","treatment"))) logit_mod <- glm(converted ~ treated, data = AB_clean, family = "binomial") summary(logit_mod) #--------------------------------------------------------------- # Cluster standard errors around day # and control for covariates (day effect) #--------------------------------------------------------------- library('miceadds') library('sandwich') logit_cluster <- glm.cluster(converted ~ treated + factor(day), cluster = "day", data = AB_clean) summary(logit_cluster) #--------------------------------------------------------------- # Read in Cookie Cats Data #--------------------------------------------------------------- cats <- read.csv(here::here("datasets","cookie_cats.csv")) head(cats) # userid - unique player ID # version - whether the player was put in the control group # (gate_30 - a gate at level 30) # or the test group (gate_40 - a gate at level 40). # sum_game rounds - the number of game rounds played by the player # during the first week after installation # retention_1 - did the player come back and play 1 day after installing? # retention_7 - did the player come back and play 7 days after installing? # 1. Arrange by userid and remove any duplicate IDs that exist # Use the mutate function to create a "treated" variable = 1 # if player was in the treated category where the gate was set to level 40 cats_clean <- cats %>% arrange(userid) %>% filter(!duplicated(userid)) %>% mutate(treated = if_else(version == "gate_40",1,0), ret_1 = if_else(retention_1 == "TRUE",1,0), ret_7 = if_else(retention_7 == "TRUE",1,0)) # 2. Estimate the treatment impact of the gate 40 # intervention on day 1 retention # 3. Estimate the treatment impact of the gate 40 # intervention on day 7 retention # 4. Estimate the treatment impact of the gate 40 # intervention on game rounds played # 5. What do you conclude? Should they adopt the treatment? # Why or why not?
/labs/lab_class_9_experiments.R
no_license
cesarruy/BUS696
R
false
false
3,520
r
#------------------------------------------------------------ # Load data and packages #------------------------------------------------------------ library('tidyverse') library('lubridate') AB <- read.csv(here::here("datasets","ab_data.csv")) head(AB) #--------------------------------------------------------------- # clean data file to prepare for analysis #--------------------------------------------------------------- # group by user_id, arrange by timestamp, and remove # duplicate IDs # create a day date variable from the raw # timestamp information AB_clean <- AB %>% group_by(user_id) %>% arrange(timestamp) %>% filter(!duplicated(user_id)) %>% mutate(day = as.Date(timestamp)) # and group by date and control/treatment indicator AB_sum <- AB_clean %>% group_by(day, group) %>% summarize(conv_rate = mean(converted)) print(AB_sum) #--------------------------------------------------------------- # Plot difference between treated and control #--------------------------------------------------------------- ggplot(AB_sum, aes(x = day, y = conv_rate, color = group, group = group)) + geom_point() + geom_line() + theme_minimal() #--------------------------------------------------------------- # Estimate model to identify treatment effect #--------------------------------------------------------------- AB_clean <- AB_clean %>% mutate(treated = factor(group, levels = c("control","treatment"))) logit_mod <- glm(converted ~ treated, data = AB_clean, family = "binomial") summary(logit_mod) #--------------------------------------------------------------- # Cluster standard errors around day # and control for covariates (day effect) #--------------------------------------------------------------- library('miceadds') library('sandwich') logit_cluster <- glm.cluster(converted ~ treated + factor(day), cluster = "day", data = AB_clean) summary(logit_cluster) #--------------------------------------------------------------- # Read in Cookie Cats Data #--------------------------------------------------------------- cats <- read.csv(here::here("datasets","cookie_cats.csv")) head(cats) # userid - unique player ID # version - whether the player was put in the control group # (gate_30 - a gate at level 30) # or the test group (gate_40 - a gate at level 40). # sum_game rounds - the number of game rounds played by the player # during the first week after installation # retention_1 - did the player come back and play 1 day after installing? # retention_7 - did the player come back and play 7 days after installing? # 1. Arrange by userid and remove any duplicate IDs that exist # Use the mutate function to create a "treated" variable = 1 # if player was in the treated category where the gate was set to level 40 cats_clean <- cats %>% arrange(userid) %>% filter(!duplicated(userid)) %>% mutate(treated = if_else(version == "gate_40",1,0), ret_1 = if_else(retention_1 == "TRUE",1,0), ret_7 = if_else(retention_7 == "TRUE",1,0)) # 2. Estimate the treatment impact of the gate 40 # intervention on day 1 retention # 3. Estimate the treatment impact of the gate 40 # intervention on day 7 retention # 4. Estimate the treatment impact of the gate 40 # intervention on game rounds played # 5. What do you conclude? Should they adopt the treatment? # Why or why not?
/R/11.1.R
no_license
njamali/CodeBase
R
false
false
26,426
r
##Get working directory, getwd() #check if data file exists already, if not download filename <- 'household_power_consumption.zip' if (!file.exists(filename)){ file1 <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(file1, filename, method="curl") } if (!file.exists("household_power_consumption.txt")) { unzip(filename) } ## Getting full dataset, I put the dataset in the same level as plot4.R new_hpc = read.table('household_power_consumption.txt', sep=';', header = TRUE, colClasses = c('character', 'character', 'numeric', 'numeric', 'numeric', 'numeric','numeric', 'numeric', 'numeric'), na.strings = '?') ## Converting time new_hpc$Date_Time = strptime(paste(new_hpc$Date, new_hpc$Time),"%d/%m/%Y %H:%M:%S") ## Subsetting the data new_hpc = subset(new_hpc,as.Date(Date_Time) >= as.Date("2007-02-01") &as.Date(Date_Time) <= as.Date("2007-02-02")) #Set mfrow and mar par(mfrow=c(2,2)) #Plot4 png("plot4.png", height=480, width=480) plot(new_hpc$Date_Time, new_hpc$Global_active_power, pch=NA, xlab="", ylab="Global Active Power (kilowatts)") lines(new_hpc$Date_Time, new_hpc$Global_active_power) plot(new_hpc$Date_Time, new_hpc$Voltage, ylab="Voltage", xlab="datetime", pch=NA) lines(new_hpc$Date_Time, new_hpc$Voltage) plot(new_hpc$Date_Time,new_hpc$Sub_metering_1, pch=NA,xlab="", ylab="Energy sub metering") lines(new_hpc$Date_Time, new_hpc$Sub_metering_1) lines(new_hpc$Date_Time, new_hpc$Sub_metering_2, col='red') lines(new_hpc$Date_Time, new_hpc$Sub_metering_3, col='blue') legend('topright', c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),lty = c(1,1,1),col = c('black', 'red', 'blue'), bty = 'n') with(new_hpc, plot(Date_Time, Global_reactive_power, xlab='datetime', pch=NA)) with(new_hpc, lines(Date_Time, Global_reactive_power)) dev.off()
/plot4.R
no_license
A-Wei/ExData_Plotting1
R
false
false
1,921
r
##Get working directory, getwd() #check if data file exists already, if not download filename <- 'household_power_consumption.zip' if (!file.exists(filename)){ file1 <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(file1, filename, method="curl") } if (!file.exists("household_power_consumption.txt")) { unzip(filename) } ## Getting full dataset, I put the dataset in the same level as plot4.R new_hpc = read.table('household_power_consumption.txt', sep=';', header = TRUE, colClasses = c('character', 'character', 'numeric', 'numeric', 'numeric', 'numeric','numeric', 'numeric', 'numeric'), na.strings = '?') ## Converting time new_hpc$Date_Time = strptime(paste(new_hpc$Date, new_hpc$Time),"%d/%m/%Y %H:%M:%S") ## Subsetting the data new_hpc = subset(new_hpc,as.Date(Date_Time) >= as.Date("2007-02-01") &as.Date(Date_Time) <= as.Date("2007-02-02")) #Set mfrow and mar par(mfrow=c(2,2)) #Plot4 png("plot4.png", height=480, width=480) plot(new_hpc$Date_Time, new_hpc$Global_active_power, pch=NA, xlab="", ylab="Global Active Power (kilowatts)") lines(new_hpc$Date_Time, new_hpc$Global_active_power) plot(new_hpc$Date_Time, new_hpc$Voltage, ylab="Voltage", xlab="datetime", pch=NA) lines(new_hpc$Date_Time, new_hpc$Voltage) plot(new_hpc$Date_Time,new_hpc$Sub_metering_1, pch=NA,xlab="", ylab="Energy sub metering") lines(new_hpc$Date_Time, new_hpc$Sub_metering_1) lines(new_hpc$Date_Time, new_hpc$Sub_metering_2, col='red') lines(new_hpc$Date_Time, new_hpc$Sub_metering_3, col='blue') legend('topright', c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),lty = c(1,1,1),col = c('black', 'red', 'blue'), bty = 'n') with(new_hpc, plot(Date_Time, Global_reactive_power, xlab='datetime', pch=NA)) with(new_hpc, lines(Date_Time, Global_reactive_power)) dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/opsworkscm_service.R \name{opsworkscm} \alias{opsworkscm} \title{AWS OpsWorks CM} \usage{ opsworkscm(config = list()) } \arguments{ \item{config}{Optional configuration of credentials, endpoint, and/or region.} } \value{ A client for the service. You can call the service's operations using syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned to the client. The available operations are listed in the Operations section. } \description{ AWS OpsWorks for configuration management (CM) is a service that runs and manages configuration management servers. You can use AWS OpsWorks CM to create and manage AWS OpsWorks for Chef Automate and AWS OpsWorks for Puppet Enterprise servers, and add or remove nodes for the servers to manage. \strong{Glossary of terms} \itemize{ \item \strong{Server}: A configuration management server that can be highly-available. The configuration management server runs on an Amazon Elastic Compute Cloud (EC2) instance, and may use various other AWS services, such as Amazon Relational Database Service (RDS) and Elastic Load Balancing. A server is a generic abstraction over the configuration manager that you want to use, much like Amazon RDS. In AWS OpsWorks CM, you do not start or stop servers. After you create servers, they continue to run until they are deleted. \item \strong{Engine}: The engine is the specific configuration manager that you want to use. Valid values in this release include \code{ChefAutomate} and \code{Puppet}. \item \strong{Backup}: This is an application-level backup of the data that the configuration manager stores. AWS OpsWorks CM creates an S3 bucket for backups when you launch the first server. A backup maintains a snapshot of a server's configuration-related attributes at the time the backup starts. \item \strong{Events}: Events are always related to a server. Events are written during server creation, when health checks run, when backups are created, when system maintenance is performed, etc. When you delete a server, the server's events are also deleted. \item \strong{Account attributes}: Every account has attributes that are assigned in the AWS OpsWorks CM database. These attributes store information about configuration limits (servers, backups, etc.) and your customer account. } \strong{Endpoints} AWS OpsWorks CM supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Your servers can only be accessed or managed within the endpoint in which they are created. \itemize{ \item opsworks-cm.us-east-1.amazonaws.com \item opsworks-cm.us-east-2.amazonaws.com \item opsworks-cm.us-west-1.amazonaws.com \item opsworks-cm.us-west-2.amazonaws.com \item opsworks-cm.ap-northeast-1.amazonaws.com \item opsworks-cm.ap-southeast-1.amazonaws.com \item opsworks-cm.ap-southeast-2.amazonaws.com \item opsworks-cm.eu-central-1.amazonaws.com \item opsworks-cm.eu-west-1.amazonaws.com } For more information, see \href{https://docs.aws.amazon.com/general/latest/gr/opsworks-service.html}{AWS OpsWorks endpoints and quotas} in the AWS General Reference. \strong{Throttling limits} All API operations allow for five requests per second with a burst of 10 requests per second. } \section{Service syntax}{ \preformatted{svc <- opsworkscm( config = list( credentials = list( creds = list( access_key_id = "string", secret_access_key = "string", session_token = "string" ), profile = "string" ), endpoint = "string", region = "string" ) ) } } \section{Operations}{ \tabular{ll}{ \link[=opsworkscm_associate_node]{associate_node} \tab Associates a new node with the server\cr \link[=opsworkscm_create_backup]{create_backup} \tab Creates an application-level backup of a server\cr \link[=opsworkscm_create_server]{create_server} \tab Creates and immedately starts a new server\cr \link[=opsworkscm_delete_backup]{delete_backup} \tab Deletes a backup\cr \link[=opsworkscm_delete_server]{delete_server} \tab Deletes the server and the underlying AWS CloudFormation stacks (including the server's EC2 instance)\cr \link[=opsworkscm_describe_account_attributes]{describe_account_attributes} \tab Describes your OpsWorks-CM account attributes\cr \link[=opsworkscm_describe_backups]{describe_backups} \tab Describes backups\cr \link[=opsworkscm_describe_events]{describe_events} \tab Describes events for a specified server\cr \link[=opsworkscm_describe_node_association_status]{describe_node_association_status} \tab Returns the current status of an existing association or disassociation request\cr \link[=opsworkscm_describe_servers]{describe_servers} \tab Lists all configuration management servers that are identified with your account\cr \link[=opsworkscm_disassociate_node]{disassociate_node} \tab Disassociates a node from an AWS OpsWorks CM server, and removes the node from the server's managed nodes\cr \link[=opsworkscm_export_server_engine_attribute]{export_server_engine_attribute} \tab Exports a specified server engine attribute as a base64-encoded string\cr \link[=opsworkscm_list_tags_for_resource]{list_tags_for_resource} \tab Returns a list of tags that are applied to the specified AWS OpsWorks for Chef Automate or AWS OpsWorks for Puppet Enterprise servers or backups\cr \link[=opsworkscm_restore_server]{restore_server} \tab Restores a backup to a server that is in a CONNECTION_LOST, HEALTHY, RUNNING, UNHEALTHY, or TERMINATED state\cr \link[=opsworkscm_start_maintenance]{start_maintenance} \tab Manually starts server maintenance\cr \link[=opsworkscm_tag_resource]{tag_resource} \tab Applies tags to an AWS OpsWorks for Chef Automate or AWS OpsWorks for Puppet Enterprise server, or to server backups\cr \link[=opsworkscm_untag_resource]{untag_resource} \tab Removes specified tags from an AWS OpsWorks-CM server or backup\cr \link[=opsworkscm_update_server]{update_server} \tab Updates settings for a server\cr \link[=opsworkscm_update_server_engine_attributes]{update_server_engine_attributes} \tab Updates engine-specific attributes on a specified server } } \examples{ \dontrun{ svc <- opsworkscm() svc$associate_node( Foo = 123 ) } }
/cran/paws.management/man/opsworkscm.Rd
permissive
williazo/paws
R
false
true
6,244
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/opsworkscm_service.R \name{opsworkscm} \alias{opsworkscm} \title{AWS OpsWorks CM} \usage{ opsworkscm(config = list()) } \arguments{ \item{config}{Optional configuration of credentials, endpoint, and/or region.} } \value{ A client for the service. You can call the service's operations using syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned to the client. The available operations are listed in the Operations section. } \description{ AWS OpsWorks for configuration management (CM) is a service that runs and manages configuration management servers. You can use AWS OpsWorks CM to create and manage AWS OpsWorks for Chef Automate and AWS OpsWorks for Puppet Enterprise servers, and add or remove nodes for the servers to manage. \strong{Glossary of terms} \itemize{ \item \strong{Server}: A configuration management server that can be highly-available. The configuration management server runs on an Amazon Elastic Compute Cloud (EC2) instance, and may use various other AWS services, such as Amazon Relational Database Service (RDS) and Elastic Load Balancing. A server is a generic abstraction over the configuration manager that you want to use, much like Amazon RDS. In AWS OpsWorks CM, you do not start or stop servers. After you create servers, they continue to run until they are deleted. \item \strong{Engine}: The engine is the specific configuration manager that you want to use. Valid values in this release include \code{ChefAutomate} and \code{Puppet}. \item \strong{Backup}: This is an application-level backup of the data that the configuration manager stores. AWS OpsWorks CM creates an S3 bucket for backups when you launch the first server. A backup maintains a snapshot of a server's configuration-related attributes at the time the backup starts. \item \strong{Events}: Events are always related to a server. Events are written during server creation, when health checks run, when backups are created, when system maintenance is performed, etc. When you delete a server, the server's events are also deleted. \item \strong{Account attributes}: Every account has attributes that are assigned in the AWS OpsWorks CM database. These attributes store information about configuration limits (servers, backups, etc.) and your customer account. } \strong{Endpoints} AWS OpsWorks CM supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Your servers can only be accessed or managed within the endpoint in which they are created. \itemize{ \item opsworks-cm.us-east-1.amazonaws.com \item opsworks-cm.us-east-2.amazonaws.com \item opsworks-cm.us-west-1.amazonaws.com \item opsworks-cm.us-west-2.amazonaws.com \item opsworks-cm.ap-northeast-1.amazonaws.com \item opsworks-cm.ap-southeast-1.amazonaws.com \item opsworks-cm.ap-southeast-2.amazonaws.com \item opsworks-cm.eu-central-1.amazonaws.com \item opsworks-cm.eu-west-1.amazonaws.com } For more information, see \href{https://docs.aws.amazon.com/general/latest/gr/opsworks-service.html}{AWS OpsWorks endpoints and quotas} in the AWS General Reference. \strong{Throttling limits} All API operations allow for five requests per second with a burst of 10 requests per second. } \section{Service syntax}{ \preformatted{svc <- opsworkscm( config = list( credentials = list( creds = list( access_key_id = "string", secret_access_key = "string", session_token = "string" ), profile = "string" ), endpoint = "string", region = "string" ) ) } } \section{Operations}{ \tabular{ll}{ \link[=opsworkscm_associate_node]{associate_node} \tab Associates a new node with the server\cr \link[=opsworkscm_create_backup]{create_backup} \tab Creates an application-level backup of a server\cr \link[=opsworkscm_create_server]{create_server} \tab Creates and immedately starts a new server\cr \link[=opsworkscm_delete_backup]{delete_backup} \tab Deletes a backup\cr \link[=opsworkscm_delete_server]{delete_server} \tab Deletes the server and the underlying AWS CloudFormation stacks (including the server's EC2 instance)\cr \link[=opsworkscm_describe_account_attributes]{describe_account_attributes} \tab Describes your OpsWorks-CM account attributes\cr \link[=opsworkscm_describe_backups]{describe_backups} \tab Describes backups\cr \link[=opsworkscm_describe_events]{describe_events} \tab Describes events for a specified server\cr \link[=opsworkscm_describe_node_association_status]{describe_node_association_status} \tab Returns the current status of an existing association or disassociation request\cr \link[=opsworkscm_describe_servers]{describe_servers} \tab Lists all configuration management servers that are identified with your account\cr \link[=opsworkscm_disassociate_node]{disassociate_node} \tab Disassociates a node from an AWS OpsWorks CM server, and removes the node from the server's managed nodes\cr \link[=opsworkscm_export_server_engine_attribute]{export_server_engine_attribute} \tab Exports a specified server engine attribute as a base64-encoded string\cr \link[=opsworkscm_list_tags_for_resource]{list_tags_for_resource} \tab Returns a list of tags that are applied to the specified AWS OpsWorks for Chef Automate or AWS OpsWorks for Puppet Enterprise servers or backups\cr \link[=opsworkscm_restore_server]{restore_server} \tab Restores a backup to a server that is in a CONNECTION_LOST, HEALTHY, RUNNING, UNHEALTHY, or TERMINATED state\cr \link[=opsworkscm_start_maintenance]{start_maintenance} \tab Manually starts server maintenance\cr \link[=opsworkscm_tag_resource]{tag_resource} \tab Applies tags to an AWS OpsWorks for Chef Automate or AWS OpsWorks for Puppet Enterprise server, or to server backups\cr \link[=opsworkscm_untag_resource]{untag_resource} \tab Removes specified tags from an AWS OpsWorks-CM server or backup\cr \link[=opsworkscm_update_server]{update_server} \tab Updates settings for a server\cr \link[=opsworkscm_update_server_engine_attributes]{update_server_engine_attributes} \tab Updates engine-specific attributes on a specified server } } \examples{ \dontrun{ svc <- opsworkscm() svc$associate_node( Foo = 123 ) } }
#' @title Retrieve SB item #' #' @export #' @template manipulate_item #' @return An object of class \code{sbitem} #' #' @description #' Retrieves an item and its metadata from ScienceBase based on its #' unique ID. Errors if the requested item ID does not exist or #' access is restricted due to permissions. #' #' #' @examples #' \donttest{ #' # Get an item #' item_get("4f4e4b24e4b07f02db6aea14") #' #' # Search for item IDs, then pass to item_get #' library("httr") #' res <- query_items(list(s = "Search", q = "water", format = "json")) #' #' if(res$status != 404) { #' ids <- vapply(httr::content(res)$items, "[[", "", "id") #' lapply(ids[1:3], item_get) #' } #' #' } item_get <- function(sb_id, ..., session=current_session()) { get_item(as.sbitem(sb_id)$id, ..., session = session) } get_item <- function(id, ..., session=current_session()) { res <- sbtools_GET(url = paste0(pkg.env$url_item, id), ..., query = list(type = 'json'), session = session) if(is(res, "list")) { if(res$status == 404) return(NULL) } return(as.sbitem(content(res))) }
/R/item_get.R
permissive
PatrickEslick/sbtools
R
false
false
1,089
r
#' @title Retrieve SB item #' #' @export #' @template manipulate_item #' @return An object of class \code{sbitem} #' #' @description #' Retrieves an item and its metadata from ScienceBase based on its #' unique ID. Errors if the requested item ID does not exist or #' access is restricted due to permissions. #' #' #' @examples #' \donttest{ #' # Get an item #' item_get("4f4e4b24e4b07f02db6aea14") #' #' # Search for item IDs, then pass to item_get #' library("httr") #' res <- query_items(list(s = "Search", q = "water", format = "json")) #' #' if(res$status != 404) { #' ids <- vapply(httr::content(res)$items, "[[", "", "id") #' lapply(ids[1:3], item_get) #' } #' #' } item_get <- function(sb_id, ..., session=current_session()) { get_item(as.sbitem(sb_id)$id, ..., session = session) } get_item <- function(id, ..., session=current_session()) { res <- sbtools_GET(url = paste0(pkg.env$url_item, id), ..., query = list(type = 'json'), session = session) if(is(res, "list")) { if(res$status == 404) return(NULL) } return(as.sbitem(content(res))) }
context("Palm") library(flipStandardCharts) library(flipChartBasics) set.seed(12345) dat1 <- matrix(1:12, 3, 4) dat2 <- matrix(1:60, 12, 5) dat2[3, 3] <- NA colnames(dat2) <- c("Bayswater", "Queensway", "Hammersmith", "Holland Park", "Notting Hill") rownames(dat2) <- LETTERS[1:12] dat3 <- matrix(runif(100), 5, 20) rownames(dat3) <- c("Mon", "Tue", "Wed", "Thu", "Fri") tourist <- structure(c(0.52, 0.09, 0.06, 0.36, 0.04, 0.37, 0.51, 0.07, 0.06, 0.39, 0.05, 0.45, 0.79, 0.15, 0.1, 0.78, 0.11, 0.46, 0.09, 0.6, 0.51, 0.44, 0.57, 0.52, 0.23, 0.07, 0.11, 0.28, 0.06, 0.3, 0.17, 0.43, 0.05, 0.48, 0.03, 0.6, 0.11, 0.4, 0.08, 0.34, 0.06, 0.25, 0.03, 0.05, 0.05, 0.04, 0.01, 0.05), statistic = "%", .Dim = c(6L, 8L), .Dimnames = list(c("Mexico", "France", "Great Britain", "Egypt", "Australia", "China"), c("Cleanliness", "Health", "Safety", "Cost", "Food", "Not being understood", "Friendliness of the people", "Boredom"))) q5cc <- structure(c(0.252327048028994, 0.312881504763389, 0.309835063713764, 0.175546469946982, 0.332850525773139, 0.213918060868462, 0.148891326905278, 0.99999999992966, 0.321856718881157, 0.390384865558457, 0.387008828633533, 0.23043980297302, 0.412332045095876, 0.276781998955081, 0.197445739902877, 0.999999999949984, 0.193901751154243, 0.245030129463578, 0.2424112651755, 0.131766253337998, 0.26232192530017, 0.162453846830681, 0.110864828737829, 0.999999999901313, 0.223980226347181, 0.280282076203562, 0.277424891486241, 0.154048099182854, 0.299076106591684, 0.188796593933267, 0.130142006255211, 0.999999999917754, 0.256383779654847, 0.317498370032718, 0.314427546535753, 0.17866375634708, 0.337617270166789, 0.21753694692716, 0.151622330335653, 0.999999999931149, 0.253986416353672, 0.314771442945425, 0.311714948165763, 0.17682032200925, 0.334802311923131, 0.215397609802415, 0.150006948800344, 0.999999999930275, 0.186950249396378, 0.236784065423265, 0.23422599150159, 0.126692243563911, 0.253689477903583, 0.156411012525024, 0.106496959686249, 0.999999999896761, 0.251773988570791, 0.312251150086532, 0.309208084668817, 0.175122283403448, 0.332199390423663, 0.21342515643933, 0.148519946407419, 0.999999999929453, 0.383662387414414, 0.456491679510049, 0.452968741467223, 0.281990522244833, 0.479233721374194, 0.334197641302466, 0.243955306686821, 0.999999999961865), name = "q5 - column comparisons", questions = c("q5", "SUMMARY"), statistic = "Expected %", .Dim = 8:9, .Dimnames = list( c("Coke", "Diet Coke", "Coke Zero", "Pepsi", "Diet Pepsi", "Pepsi Max", "None of these", "NET"), c("Feminine", "Health-conscious", "Innocent", "Older", "Open to new experiences", "Rebellious", "Sleepy", "Traditional", "Weight-conscious"))) opts <- c('fonts' = 'x.title = "MY X-AXIS IS MINE", x.title.font.family = "Century Gothic", x.title.font.size = 18, legend.font.family = "Arial Black", legend.font.size = 18, y.tick.font.family = "Georgia", y.tick.font.size = 18, y.title = "MY Y-AXIS IS MINE", y.title.font.family = "Open Sans", y.title.font.size = 18', 'percent' = 'y.tick.format = ".3%", y.tick.prefix = "PRE", y.tick.suffix = "SUF"', 'colorsnums' = 'colors = ChartColors(10, "Blues"), y.tick.format = ".1f"', 'titlenoaxis' = 'y.axis.show = FALSE, x.title = "something here", y.title = "something else"') dat.list <- c("dat1", "dat2", "dat3", "tourist", "q5cc") for (dat in dat.list) { for (ii in 1:length(opts)) { # Create name which will appear in the error message if test fails # Filestem should be prefixed by test file name to avoid name conflicts filestem <- paste0("palm-", dat, "-", names(opts)[ii]) test_that(filestem, { # Create command that will create widget cmd <- paste0("pp <- Palm(", dat, ",", opts[ii], ")") # Run command and check outputs expect_error(suppressWarnings(eval(parse(text=cmd))), NA) #print(pp) #readline(prompt=paste0(filestem, ": press [enter] to continue: ")) }) } } test_that("No data", { expect_error(Palm(matrix(NA, 0, 0)), "There is not enough data to create a plot") expect_error(Palm(c()), "The data is not in an appropriate format") }) test_that("Strip alpha values", { expect_warning(Palm(tourist, global.font.color = "#0000FF80"), "Alpha values for colors in Palm trees are ignored") })
/tests/testthat/test-palm.R
no_license
Displayr/flipStandardCharts
R
false
false
5,834
r
context("Palm") library(flipStandardCharts) library(flipChartBasics) set.seed(12345) dat1 <- matrix(1:12, 3, 4) dat2 <- matrix(1:60, 12, 5) dat2[3, 3] <- NA colnames(dat2) <- c("Bayswater", "Queensway", "Hammersmith", "Holland Park", "Notting Hill") rownames(dat2) <- LETTERS[1:12] dat3 <- matrix(runif(100), 5, 20) rownames(dat3) <- c("Mon", "Tue", "Wed", "Thu", "Fri") tourist <- structure(c(0.52, 0.09, 0.06, 0.36, 0.04, 0.37, 0.51, 0.07, 0.06, 0.39, 0.05, 0.45, 0.79, 0.15, 0.1, 0.78, 0.11, 0.46, 0.09, 0.6, 0.51, 0.44, 0.57, 0.52, 0.23, 0.07, 0.11, 0.28, 0.06, 0.3, 0.17, 0.43, 0.05, 0.48, 0.03, 0.6, 0.11, 0.4, 0.08, 0.34, 0.06, 0.25, 0.03, 0.05, 0.05, 0.04, 0.01, 0.05), statistic = "%", .Dim = c(6L, 8L), .Dimnames = list(c("Mexico", "France", "Great Britain", "Egypt", "Australia", "China"), c("Cleanliness", "Health", "Safety", "Cost", "Food", "Not being understood", "Friendliness of the people", "Boredom"))) q5cc <- structure(c(0.252327048028994, 0.312881504763389, 0.309835063713764, 0.175546469946982, 0.332850525773139, 0.213918060868462, 0.148891326905278, 0.99999999992966, 0.321856718881157, 0.390384865558457, 0.387008828633533, 0.23043980297302, 0.412332045095876, 0.276781998955081, 0.197445739902877, 0.999999999949984, 0.193901751154243, 0.245030129463578, 0.2424112651755, 0.131766253337998, 0.26232192530017, 0.162453846830681, 0.110864828737829, 0.999999999901313, 0.223980226347181, 0.280282076203562, 0.277424891486241, 0.154048099182854, 0.299076106591684, 0.188796593933267, 0.130142006255211, 0.999999999917754, 0.256383779654847, 0.317498370032718, 0.314427546535753, 0.17866375634708, 0.337617270166789, 0.21753694692716, 0.151622330335653, 0.999999999931149, 0.253986416353672, 0.314771442945425, 0.311714948165763, 0.17682032200925, 0.334802311923131, 0.215397609802415, 0.150006948800344, 0.999999999930275, 0.186950249396378, 0.236784065423265, 0.23422599150159, 0.126692243563911, 0.253689477903583, 0.156411012525024, 0.106496959686249, 0.999999999896761, 0.251773988570791, 0.312251150086532, 0.309208084668817, 0.175122283403448, 0.332199390423663, 0.21342515643933, 0.148519946407419, 0.999999999929453, 0.383662387414414, 0.456491679510049, 0.452968741467223, 0.281990522244833, 0.479233721374194, 0.334197641302466, 0.243955306686821, 0.999999999961865), name = "q5 - column comparisons", questions = c("q5", "SUMMARY"), statistic = "Expected %", .Dim = 8:9, .Dimnames = list( c("Coke", "Diet Coke", "Coke Zero", "Pepsi", "Diet Pepsi", "Pepsi Max", "None of these", "NET"), c("Feminine", "Health-conscious", "Innocent", "Older", "Open to new experiences", "Rebellious", "Sleepy", "Traditional", "Weight-conscious"))) opts <- c('fonts' = 'x.title = "MY X-AXIS IS MINE", x.title.font.family = "Century Gothic", x.title.font.size = 18, legend.font.family = "Arial Black", legend.font.size = 18, y.tick.font.family = "Georgia", y.tick.font.size = 18, y.title = "MY Y-AXIS IS MINE", y.title.font.family = "Open Sans", y.title.font.size = 18', 'percent' = 'y.tick.format = ".3%", y.tick.prefix = "PRE", y.tick.suffix = "SUF"', 'colorsnums' = 'colors = ChartColors(10, "Blues"), y.tick.format = ".1f"', 'titlenoaxis' = 'y.axis.show = FALSE, x.title = "something here", y.title = "something else"') dat.list <- c("dat1", "dat2", "dat3", "tourist", "q5cc") for (dat in dat.list) { for (ii in 1:length(opts)) { # Create name which will appear in the error message if test fails # Filestem should be prefixed by test file name to avoid name conflicts filestem <- paste0("palm-", dat, "-", names(opts)[ii]) test_that(filestem, { # Create command that will create widget cmd <- paste0("pp <- Palm(", dat, ",", opts[ii], ")") # Run command and check outputs expect_error(suppressWarnings(eval(parse(text=cmd))), NA) #print(pp) #readline(prompt=paste0(filestem, ": press [enter] to continue: ")) }) } } test_that("No data", { expect_error(Palm(matrix(NA, 0, 0)), "There is not enough data to create a plot") expect_error(Palm(c()), "The data is not in an appropriate format") }) test_that("Strip alpha values", { expect_warning(Palm(tourist, global.font.color = "#0000FF80"), "Alpha values for colors in Palm trees are ignored") })
#Plot1 # Created by Shailesh Nair, 07 November 2015 # Coursera- Exploratory Data Analysis Course- Assignment1 # This r-Script reads in data from Local Working Directory and genrates a histogram of "Global Active Power" over a time span of 2 days # from First Feb 2007 to 02 Feb 2007. # Data Source : https://archive.ics.uci.edu/ml/datasets/Individual+household+electric+power+consumption # Dataset: Electric power consumption [20Mb] # Description: Measurements of electric power consumption in one household with a one-minute sampling rate over a period of almost 4 years. Different electrical quantities and some sub-metering values are available. # Read data and assign to m, missing data denoted by "?" m<-read.csv("/home/shailesh/Coursera_R/household_power_consumption.txt",header=T,sep=";",na.strings = "?") # Combine data and time data newdate<-paste(m$Date,m$Time) # convert to date time format datetime<-strptime(newdate,"%d/%m/%Y %H:%M:%S") # Commbine with original data set n<-cbind(m,datetime) # Create a subset of dataset for dates from 2007-02-01 to 2007-02-02 o<-subset(n,as.POSIXct(n$datetime)>=as.POSIXct("2007-02-01") & as.POSIXct(n$datetime)<as.POSIXct("2007-02-03")) # activate graphics file device png of resolution 480x480 png(file="plot1.png", width=480, height=480) #plot Histogram of "Global Active Power" hist(o$Global_active_power,col='red',xlab='Global Active Power (kilowatts', main='Global Active Power', ylim=c(0,1200)) # close graphics device dev.off()
/plot1.R
no_license
Shailf117/ExData_Plotting1
R
false
false
1,494
r
#Plot1 # Created by Shailesh Nair, 07 November 2015 # Coursera- Exploratory Data Analysis Course- Assignment1 # This r-Script reads in data from Local Working Directory and genrates a histogram of "Global Active Power" over a time span of 2 days # from First Feb 2007 to 02 Feb 2007. # Data Source : https://archive.ics.uci.edu/ml/datasets/Individual+household+electric+power+consumption # Dataset: Electric power consumption [20Mb] # Description: Measurements of electric power consumption in one household with a one-minute sampling rate over a period of almost 4 years. Different electrical quantities and some sub-metering values are available. # Read data and assign to m, missing data denoted by "?" m<-read.csv("/home/shailesh/Coursera_R/household_power_consumption.txt",header=T,sep=";",na.strings = "?") # Combine data and time data newdate<-paste(m$Date,m$Time) # convert to date time format datetime<-strptime(newdate,"%d/%m/%Y %H:%M:%S") # Commbine with original data set n<-cbind(m,datetime) # Create a subset of dataset for dates from 2007-02-01 to 2007-02-02 o<-subset(n,as.POSIXct(n$datetime)>=as.POSIXct("2007-02-01") & as.POSIXct(n$datetime)<as.POSIXct("2007-02-03")) # activate graphics file device png of resolution 480x480 png(file="plot1.png", width=480, height=480) #plot Histogram of "Global Active Power" hist(o$Global_active_power,col='red',xlab='Global Active Power (kilowatts', main='Global Active Power', ylim=c(0,1200)) # close graphics device dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/creating.trait.R \name{creating.trait} \alias{creating.trait} \title{Generation of genomic traits} \usage{ creating.trait( population, real.bv.add = NULL, real.bv.mult = NULL, real.bv.dice = NULL, bv.total = 0, polygenic.variance = 100, bve.mult.factor = NULL, bve.poly.factor = NULL, base.bv = NULL, new.phenotype.correlation = NULL, new.residual.correlation = NULL, new.breeding.correlation = NULL, n.additive = 0, n.equal.additive = 0, n.dominant = 0, n.equal.dominant = 0, n.qualitative = 0, n.quantitative = 0, dominant.only.positive = FALSE, var.additive.l = NULL, var.dominant.l = NULL, var.qualitative.l = NULL, var.quantitative.l = NULL, effect.size.equal.add = 1, effect.size.equal.dom = 1, exclude.snps = NULL, randomSeed = NULL, shuffle.traits = NULL, shuffle.cor = NULL, replace.traits = FALSE, trait.name = NULL, remove.invalid.qtl = TRUE, bv.standard = FALSE, mean.target = NULL, var.target = NULL, verbose = TRUE, is.maternal = NULL, is.paternal = NULL ) } \arguments{ \item{population}{Population list} \item{real.bv.add}{Single Marker effects} \item{real.bv.mult}{Two Marker effects} \item{real.bv.dice}{Multi-marker effects} \item{bv.total}{Number of traits (If more than traits via real.bv.X use traits with no directly underlying QTL)} \item{polygenic.variance}{Genetic variance of traits with no underlying QTL} \item{bve.mult.factor}{Multiplicate trait value times this} \item{bve.poly.factor}{Potency trait value over this} \item{base.bv}{Average genetic value of a trait} \item{new.phenotype.correlation}{(OLD! - use new.residual.correlation) Correlation of the simulated enviromental variance} \item{new.residual.correlation}{Correlation of the simulated enviromental variance} \item{new.breeding.correlation}{Correlation of the simulated genetic variance (child share! heritage is not influenced!} \item{n.additive}{Number of additive QTL with effect size drawn from a gaussian distribution} \item{n.equal.additive}{Number of additive QTL with equal effect size (effect.size)} \item{n.dominant}{Number of dominant QTL with effect size drawn from a gaussian distribution} \item{n.equal.dominant}{Number of n.equal.dominant QTL with equal effect size} \item{n.qualitative}{Number of qualitative epistatic QTL} \item{n.quantitative}{Number of quantitative epistatic QTL} \item{dominant.only.positive}{Set to TRUE to always asign the heterozygous variant with the higher of the two homozygous effects (e.g. hybrid breeding); default: FALSE} \item{var.additive.l}{Variance of additive QTL} \item{var.dominant.l}{Variance of dominante QTL} \item{var.qualitative.l}{Variance of qualitative epistatic QTL} \item{var.quantitative.l}{Variance of quantitative epistatic QTL} \item{effect.size.equal.add}{Effect size of the QTLs in n.equal.additive} \item{effect.size.equal.dom}{Effect size of the QTLs in n.equal.dominant} \item{exclude.snps}{Marker were no QTL are simulated on} \item{randomSeed}{Set random seed of the process} \item{shuffle.traits}{Combine different traits into a joined trait} \item{shuffle.cor}{Target Correlation between shuffeled traits} \item{replace.traits}{If TRUE delete the simulated traits added before} \item{trait.name}{Name of the trait generated} \item{remove.invalid.qtl}{Set to FALSE to deactive the automatic removal of QTLs on markers that do not exist} \item{bv.standard}{Set TRUE to standardize trait mean and variance via bv.standardization()} \item{mean.target}{Target mean} \item{var.target}{Target variance} \item{verbose}{Set to FALSE to not display any prints} \item{is.maternal}{Vector coding if a trait is caused by a maternal effect (Default: all FALSE)} \item{is.paternal}{Vector coding if a trait is caused by a paternal effect (Default: all FALSE)} } \value{ Population-list with one or more additional new traits } \description{ Generation of the trait in a starting population } \examples{ population <- creating.diploid(nsnp=1000, nindi=100) population <- creating.trait(population, n.additive=100) }
/man/creating.trait.Rd
no_license
cran/MoBPS
R
false
true
4,297
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/creating.trait.R \name{creating.trait} \alias{creating.trait} \title{Generation of genomic traits} \usage{ creating.trait( population, real.bv.add = NULL, real.bv.mult = NULL, real.bv.dice = NULL, bv.total = 0, polygenic.variance = 100, bve.mult.factor = NULL, bve.poly.factor = NULL, base.bv = NULL, new.phenotype.correlation = NULL, new.residual.correlation = NULL, new.breeding.correlation = NULL, n.additive = 0, n.equal.additive = 0, n.dominant = 0, n.equal.dominant = 0, n.qualitative = 0, n.quantitative = 0, dominant.only.positive = FALSE, var.additive.l = NULL, var.dominant.l = NULL, var.qualitative.l = NULL, var.quantitative.l = NULL, effect.size.equal.add = 1, effect.size.equal.dom = 1, exclude.snps = NULL, randomSeed = NULL, shuffle.traits = NULL, shuffle.cor = NULL, replace.traits = FALSE, trait.name = NULL, remove.invalid.qtl = TRUE, bv.standard = FALSE, mean.target = NULL, var.target = NULL, verbose = TRUE, is.maternal = NULL, is.paternal = NULL ) } \arguments{ \item{population}{Population list} \item{real.bv.add}{Single Marker effects} \item{real.bv.mult}{Two Marker effects} \item{real.bv.dice}{Multi-marker effects} \item{bv.total}{Number of traits (If more than traits via real.bv.X use traits with no directly underlying QTL)} \item{polygenic.variance}{Genetic variance of traits with no underlying QTL} \item{bve.mult.factor}{Multiplicate trait value times this} \item{bve.poly.factor}{Potency trait value over this} \item{base.bv}{Average genetic value of a trait} \item{new.phenotype.correlation}{(OLD! - use new.residual.correlation) Correlation of the simulated enviromental variance} \item{new.residual.correlation}{Correlation of the simulated enviromental variance} \item{new.breeding.correlation}{Correlation of the simulated genetic variance (child share! heritage is not influenced!} \item{n.additive}{Number of additive QTL with effect size drawn from a gaussian distribution} \item{n.equal.additive}{Number of additive QTL with equal effect size (effect.size)} \item{n.dominant}{Number of dominant QTL with effect size drawn from a gaussian distribution} \item{n.equal.dominant}{Number of n.equal.dominant QTL with equal effect size} \item{n.qualitative}{Number of qualitative epistatic QTL} \item{n.quantitative}{Number of quantitative epistatic QTL} \item{dominant.only.positive}{Set to TRUE to always asign the heterozygous variant with the higher of the two homozygous effects (e.g. hybrid breeding); default: FALSE} \item{var.additive.l}{Variance of additive QTL} \item{var.dominant.l}{Variance of dominante QTL} \item{var.qualitative.l}{Variance of qualitative epistatic QTL} \item{var.quantitative.l}{Variance of quantitative epistatic QTL} \item{effect.size.equal.add}{Effect size of the QTLs in n.equal.additive} \item{effect.size.equal.dom}{Effect size of the QTLs in n.equal.dominant} \item{exclude.snps}{Marker were no QTL are simulated on} \item{randomSeed}{Set random seed of the process} \item{shuffle.traits}{Combine different traits into a joined trait} \item{shuffle.cor}{Target Correlation between shuffeled traits} \item{replace.traits}{If TRUE delete the simulated traits added before} \item{trait.name}{Name of the trait generated} \item{remove.invalid.qtl}{Set to FALSE to deactive the automatic removal of QTLs on markers that do not exist} \item{bv.standard}{Set TRUE to standardize trait mean and variance via bv.standardization()} \item{mean.target}{Target mean} \item{var.target}{Target variance} \item{verbose}{Set to FALSE to not display any prints} \item{is.maternal}{Vector coding if a trait is caused by a maternal effect (Default: all FALSE)} \item{is.paternal}{Vector coding if a trait is caused by a paternal effect (Default: all FALSE)} } \value{ Population-list with one or more additional new traits } \description{ Generation of the trait in a starting population } \examples{ population <- creating.diploid(nsnp=1000, nindi=100) population <- creating.trait(population, n.additive=100) }
source("lnviD2.R") twostate_D <- function(dname){ dat <- get(load(paste0(dname,".rda"))) dat <- dat/10e10 lowerV <- c(-2,-2,.8,.8,0.001,-5) upperV <- c(2,2,.999,.999,5,5) const_mat <- matrix(0,length(lowerV),length(lowerV)) diag(const_mat) <- 1 const_mat <- rbind(const_mat,-const_mat) const_mat <- cbind(const_mat,c(lowerV,-upperV)) inits <- c(1.2, 1.7, 0.9, 0.9, 0.01, 0.1) processdat <- function(ser){ datrange <- range(which(!is.na(ser))) ser <- ser[datrange[1]:datrange[2]] if(any(is.na(ser))){ x <- zoo(ser) ser <- na.approx(x,1:length(ser))} ser } ress <- lapply(1:ncol(dat), function(i) {cat(paste0(i,"\n")) tryCatch(constrOptim(inits, function(p) -lnviD2(p,processdat(dat[,i])), NULL, ui = const_mat[,-7], const_mat[,7]),error=function(e) list(NA,NA))}) save(ress, file = paste0("output/",dname,"_2stateD_ress.rda")) estms <- sapply(ress,function(res) c(res$par,res$value,res$convergence)) colnames(estms) <- colnames(dat) estms <- t(estms) colnames(estms) <- c("d_1","d_2","P_11","P_22","sigma","mu","Neg_lklihood","Convergence") write.csv(estms, paste0("output/",dname,"2_stateD_ress.csv")) }
/twostate_D.R
permissive
fuadcan/dmarkovCAD
R
false
false
1,202
r
source("lnviD2.R") twostate_D <- function(dname){ dat <- get(load(paste0(dname,".rda"))) dat <- dat/10e10 lowerV <- c(-2,-2,.8,.8,0.001,-5) upperV <- c(2,2,.999,.999,5,5) const_mat <- matrix(0,length(lowerV),length(lowerV)) diag(const_mat) <- 1 const_mat <- rbind(const_mat,-const_mat) const_mat <- cbind(const_mat,c(lowerV,-upperV)) inits <- c(1.2, 1.7, 0.9, 0.9, 0.01, 0.1) processdat <- function(ser){ datrange <- range(which(!is.na(ser))) ser <- ser[datrange[1]:datrange[2]] if(any(is.na(ser))){ x <- zoo(ser) ser <- na.approx(x,1:length(ser))} ser } ress <- lapply(1:ncol(dat), function(i) {cat(paste0(i,"\n")) tryCatch(constrOptim(inits, function(p) -lnviD2(p,processdat(dat[,i])), NULL, ui = const_mat[,-7], const_mat[,7]),error=function(e) list(NA,NA))}) save(ress, file = paste0("output/",dname,"_2stateD_ress.rda")) estms <- sapply(ress,function(res) c(res$par,res$value,res$convergence)) colnames(estms) <- colnames(dat) estms <- t(estms) colnames(estms) <- c("d_1","d_2","P_11","P_22","sigma","mu","Neg_lklihood","Convergence") write.csv(estms, paste0("output/",dname,"2_stateD_ress.csv")) }