content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
# dist.default <- dist # dist <- function(x,...) UseMethod("dist") dist.speclib <- function( x, method="sam", ... ) { if (class(x)!="Speclib") stop("x must be of class 'Speclib'") if (method=="sam") { distance <- sam_distance(x) distance <- as.dist(distance) } else { if (attr(x, "setmask")) x <- interpolate.mask(x) spec <- spectra(x) distance <- dist(spec, method = method, ...) } return(distance) } sam <- function( x, ref ) { if (class(x)!="Speclib") stop("x must be of class 'Speclib'") if (class(ref)!="Speclib") stop("ref must be of class 'Speclib'") spec <- spectra(x) wlx <- x@wavelength specref <- spectra(ref) wlref <- ref@wavelength if (length(wlref) != length(wlx)) { stop("Wavelength between speclibs differ") } spec <- as.matrix(spec) specref <- as.matrix(specref) if (max(spec, na.rm = TRUE)>1) { spec <- spec/100 specref <- specref/100 } if (max(spec, na.rm = TRUE)>1) stop("Spectra in x must be in range [0,1]") if (max(specref, na.rm = TRUE)>1) stop("Spectra in ref must be in range [0,1]") nspec <- nrow(spec) nref <- nrow(specref) nbands <- ncol(spec) specang <- array(0, dim = c(nspec,nref)) storage.mode(nspec) <- "integer" storage.mode(nref) <- "integer" storage.mode(nbands) <- "integer" storage.mode(spec) <- "double" storage.mode(specref) <- "double" storage.mode(specang) <- "double" distance <- .Fortran("sam", nspec=nspec, nref=nref, nbands=nbands, spec=spec, specref=specref, specang=specang, PACKAGE="hsdar" )$specang distance <- as.matrix(distance) colnames(distance) <- rownames(specref) rownames(distance) <- rownames(spec) return(distance) } sam_distance <- function (x) { if (class(x)!="Speclib") stop("x must be of class 'Speclib'") spec <- spectra(x) if (attr(x, "setmask")) x <- interpolate.mask(x) spec <- as.matrix(spec) nspec <- nrow(spec) nbands <- ncol(spec) specang <- array(0, dim = c(nspec,nspec)) if (max(spec)>1) spec <- spec/100 storage.mode(nspec) <- "integer" storage.mode(nbands) <- "integer" storage.mode(spec) <- "double" storage.mode(specang) <- "double" distance <- .Fortran("sam", nspec=nspec, nref=nspec, nbands=nbands, spec=spec, specref=spec, specang=specang, PACKAGE="hsdar" )$specang distance <- as.matrix(distance) colnames(distance) <- rownames(spec) rownames(distance) <- rownames(spec) diag(distance) <- 0 return(distance) }
/R/distance.R
no_license
arturochian/hsdar
R
false
false
3,148
r
# dist.default <- dist # dist <- function(x,...) UseMethod("dist") dist.speclib <- function( x, method="sam", ... ) { if (class(x)!="Speclib") stop("x must be of class 'Speclib'") if (method=="sam") { distance <- sam_distance(x) distance <- as.dist(distance) } else { if (attr(x, "setmask")) x <- interpolate.mask(x) spec <- spectra(x) distance <- dist(spec, method = method, ...) } return(distance) } sam <- function( x, ref ) { if (class(x)!="Speclib") stop("x must be of class 'Speclib'") if (class(ref)!="Speclib") stop("ref must be of class 'Speclib'") spec <- spectra(x) wlx <- x@wavelength specref <- spectra(ref) wlref <- ref@wavelength if (length(wlref) != length(wlx)) { stop("Wavelength between speclibs differ") } spec <- as.matrix(spec) specref <- as.matrix(specref) if (max(spec, na.rm = TRUE)>1) { spec <- spec/100 specref <- specref/100 } if (max(spec, na.rm = TRUE)>1) stop("Spectra in x must be in range [0,1]") if (max(specref, na.rm = TRUE)>1) stop("Spectra in ref must be in range [0,1]") nspec <- nrow(spec) nref <- nrow(specref) nbands <- ncol(spec) specang <- array(0, dim = c(nspec,nref)) storage.mode(nspec) <- "integer" storage.mode(nref) <- "integer" storage.mode(nbands) <- "integer" storage.mode(spec) <- "double" storage.mode(specref) <- "double" storage.mode(specang) <- "double" distance <- .Fortran("sam", nspec=nspec, nref=nref, nbands=nbands, spec=spec, specref=specref, specang=specang, PACKAGE="hsdar" )$specang distance <- as.matrix(distance) colnames(distance) <- rownames(specref) rownames(distance) <- rownames(spec) return(distance) } sam_distance <- function (x) { if (class(x)!="Speclib") stop("x must be of class 'Speclib'") spec <- spectra(x) if (attr(x, "setmask")) x <- interpolate.mask(x) spec <- as.matrix(spec) nspec <- nrow(spec) nbands <- ncol(spec) specang <- array(0, dim = c(nspec,nspec)) if (max(spec)>1) spec <- spec/100 storage.mode(nspec) <- "integer" storage.mode(nbands) <- "integer" storage.mode(spec) <- "double" storage.mode(specang) <- "double" distance <- .Fortran("sam", nspec=nspec, nref=nspec, nbands=nbands, spec=spec, specref=spec, specang=specang, PACKAGE="hsdar" )$specang distance <- as.matrix(distance) colnames(distance) <- rownames(spec) rownames(distance) <- rownames(spec) diag(distance) <- 0 return(distance) }
#Import data from csv file hpc <- read.csv("D:/Coursera/Data Science Specialization/Exploratory Data Analysis/household_power_consumption.txt", sep=";") #Convert Date from Factor to Date hpc$Date <- format(hpc$Date) hpc$Date <- as.Date(hpc$Date, "%d/%m/%Y") #Create the subset with data for both dates hpcsubsettemp <- subset(hpc, hpc$Date == "2007-2-1") hpcsubset <- rbind(hpcsubsettemp, subset(hpc, hpc$Date == "2007-2-2")) #plot 2 hpcsubset$DateTime <- strptime(paste(hpcsubset$Date, hpcsubset$Time), "%Y-%m-%d %H:%M:%S") png("plot2.png", width=480, height=480) plot(hpcsubset$DateTime, hpcsubset$Global_active_power, type='l',ylab="Global Active Power (Kilowatts)", xlab="") dev.off()
/plot2.R
no_license
ragsmen/ExData_Plotting1
R
false
false
708
r
#Import data from csv file hpc <- read.csv("D:/Coursera/Data Science Specialization/Exploratory Data Analysis/household_power_consumption.txt", sep=";") #Convert Date from Factor to Date hpc$Date <- format(hpc$Date) hpc$Date <- as.Date(hpc$Date, "%d/%m/%Y") #Create the subset with data for both dates hpcsubsettemp <- subset(hpc, hpc$Date == "2007-2-1") hpcsubset <- rbind(hpcsubsettemp, subset(hpc, hpc$Date == "2007-2-2")) #plot 2 hpcsubset$DateTime <- strptime(paste(hpcsubset$Date, hpcsubset$Time), "%Y-%m-%d %H:%M:%S") png("plot2.png", width=480, height=480) plot(hpcsubset$DateTime, hpcsubset$Global_active_power, type='l',ylab="Global Active Power (Kilowatts)", xlab="") dev.off()
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/randtest.R \name{plotCorr.randtest} \alias{plotCorr.randtest} \title{Correlation plot for randomization test results} \usage{ \method{plotCorr}{randtest}(obj, comp = NULL, main = NULL, xlab = expression(r^2), ylab = "Test statistic", ...) } \arguments{ \item{obj}{results of randomization test (object of class `randtest`)} \item{comp}{number of component to make the plot for} \item{main}{main title for the plot} \item{xlab}{label for x axis} \item{ylab}{label for y axis} \item{...}{other optional arguments} } \description{ Makes a plot with statistic values vs. coefficient of determination between permuted and reference y-values. } \details{ See examples in help for \code{\link{randtest}} function. }
/man/plotCorr.randtest.Rd
no_license
zeehio/mdatools
R
false
false
803
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/randtest.R \name{plotCorr.randtest} \alias{plotCorr.randtest} \title{Correlation plot for randomization test results} \usage{ \method{plotCorr}{randtest}(obj, comp = NULL, main = NULL, xlab = expression(r^2), ylab = "Test statistic", ...) } \arguments{ \item{obj}{results of randomization test (object of class `randtest`)} \item{comp}{number of component to make the plot for} \item{main}{main title for the plot} \item{xlab}{label for x axis} \item{ylab}{label for y axis} \item{...}{other optional arguments} } \description{ Makes a plot with statistic values vs. coefficient of determination between permuted and reference y-values. } \details{ See examples in help for \code{\link{randtest}} function. }
#' Rao-Blackwell Ledoit-Wolf Estimator #' #' Authors propose to estimate covariance matrix by minimizing mean squared error with the following formula, #' \deqn{\hat{\Sigma} = \rho \hat{F} + (1-\rho) \hat{S}} #' where \eqn{\rho \in (0,1)} a control parameter/weight, \eqn{\hat{S}} an empirical covariance matrix, and \eqn{\hat{F}} a \emph{target} matrix. #' It is proposed to use a structured estimate \eqn{\hat{F} = \textrm{Tr} (\hat{S}/p) \cdot I_{p\times p}} where \eqn{I_{p\times p}} is an identity matrix of dimension \eqn{p}. #' #' @param X an \eqn{(n\times p)} matrix where each row is an observation. #' #' @return a named list containing: \describe{ #' \item{S}{a \eqn{(p\times p)} covariance matrix estimate.} #' \item{rho}{an estimate for convex combination weight.} #' } #' #' @examples #' ## CRAN-purpose small computation #' # set a seed for reproducibility #' set.seed(11) #' #' # small data with identity covariance #' pdim <- 10 #' dat.small <- matrix(rnorm(5*pdim), ncol=pdim) #' #' # run the code #' out.small <- CovEst.2010RBLW(dat.small) #' #' # visualize #' opar <- par(no.readonly=TRUE) #' par(mfrow=c(1,3), pty="s") #' image(diag(pdim)[,pdim:1], main="true cov") #' image(cov(dat.small)[,pdim:1], main="sample cov") #' image(out.small$S[,pdim:1], main="estimated cov") #' par(opar) #' #' \dontrun{ #' ## want to see how delta is determined according to #' # the number of observations we have. #' nsamples = seq(from=5, to=200, by=5) #' nnsample = length(nsamples) #' #' # we will record two values; rho and norm difference #' vec.rho = rep(0, nnsample) #' vec.normd = rep(0, nnsample) #' for (i in 1:nnsample){ #' dat.norun <- matrix(rnorm(nsamples[i]*pdim), ncol=pdim) # sample in R^5 #' out.norun <- CovEst.2010RBLW(dat.norun) # run with default #' #' vec.rho[i] = out.norun$rho #' vec.normd[i] = norm(out.norun$S - diag(5),"f") # Frobenius norm #' } #' #' # let's visualize the results #' opar <- par(mfrow=c(1,2)) #' plot(nsamples, vec.rho, lwd=2, type="b", col="red", main="estimated rhos") #' plot(nsamples, vec.normd, lwd=2, type="b", col="blue",main="Frobenius error") #' par(opar) #' } #' #' @references #' \insertRef{chen_shrinkage_2010}{CovTools} #' #' @export CovEst.2010RBLW <- function(X){ #----------------------------------------------------- ## PREPROCESSING fname = "CovEst.2010RBLW" checker1 = invisible_datamatrix(X, fname) n = nrow(X) # number of observations p = ncol(X) #----------------------------------------------------- ## COMPUTATION # 1. MLE for Sigma Shat = stats::cov(X)*(nrow(X)-1)/(nrow(X)) # and some related values trS = aux_trace(Shat) trS2 = aux_trace(Shat%*%Shat) # 2. structured estimate Fhat = (trS/p)*diag(p) # 3. rho.. wow.. term1 = ((n-2)/n)*trS2 + (trS^2) term2 = (n+2)*(trS2 - ((trS^2)/p)) rhohat = term1/term2 rhohat = max(min(rhohat, 1),0) # adjust as in LW case #----------------------------------------------------- ## RETURN THE OUTPUT output = list() output$S = (1-rhohat)*Shat + rhohat*Fhat output$rho = rhohat return(output) } # auxiliary --------------------------------------------------------------- #' @keywords internal #' @noRd aux_trace <- function(X){ return(sum(diag(X))) }
/R/CovEst.2010RBLW.R
no_license
vishalbelsare/CovTools
R
false
false
3,296
r
#' Rao-Blackwell Ledoit-Wolf Estimator #' #' Authors propose to estimate covariance matrix by minimizing mean squared error with the following formula, #' \deqn{\hat{\Sigma} = \rho \hat{F} + (1-\rho) \hat{S}} #' where \eqn{\rho \in (0,1)} a control parameter/weight, \eqn{\hat{S}} an empirical covariance matrix, and \eqn{\hat{F}} a \emph{target} matrix. #' It is proposed to use a structured estimate \eqn{\hat{F} = \textrm{Tr} (\hat{S}/p) \cdot I_{p\times p}} where \eqn{I_{p\times p}} is an identity matrix of dimension \eqn{p}. #' #' @param X an \eqn{(n\times p)} matrix where each row is an observation. #' #' @return a named list containing: \describe{ #' \item{S}{a \eqn{(p\times p)} covariance matrix estimate.} #' \item{rho}{an estimate for convex combination weight.} #' } #' #' @examples #' ## CRAN-purpose small computation #' # set a seed for reproducibility #' set.seed(11) #' #' # small data with identity covariance #' pdim <- 10 #' dat.small <- matrix(rnorm(5*pdim), ncol=pdim) #' #' # run the code #' out.small <- CovEst.2010RBLW(dat.small) #' #' # visualize #' opar <- par(no.readonly=TRUE) #' par(mfrow=c(1,3), pty="s") #' image(diag(pdim)[,pdim:1], main="true cov") #' image(cov(dat.small)[,pdim:1], main="sample cov") #' image(out.small$S[,pdim:1], main="estimated cov") #' par(opar) #' #' \dontrun{ #' ## want to see how delta is determined according to #' # the number of observations we have. #' nsamples = seq(from=5, to=200, by=5) #' nnsample = length(nsamples) #' #' # we will record two values; rho and norm difference #' vec.rho = rep(0, nnsample) #' vec.normd = rep(0, nnsample) #' for (i in 1:nnsample){ #' dat.norun <- matrix(rnorm(nsamples[i]*pdim), ncol=pdim) # sample in R^5 #' out.norun <- CovEst.2010RBLW(dat.norun) # run with default #' #' vec.rho[i] = out.norun$rho #' vec.normd[i] = norm(out.norun$S - diag(5),"f") # Frobenius norm #' } #' #' # let's visualize the results #' opar <- par(mfrow=c(1,2)) #' plot(nsamples, vec.rho, lwd=2, type="b", col="red", main="estimated rhos") #' plot(nsamples, vec.normd, lwd=2, type="b", col="blue",main="Frobenius error") #' par(opar) #' } #' #' @references #' \insertRef{chen_shrinkage_2010}{CovTools} #' #' @export CovEst.2010RBLW <- function(X){ #----------------------------------------------------- ## PREPROCESSING fname = "CovEst.2010RBLW" checker1 = invisible_datamatrix(X, fname) n = nrow(X) # number of observations p = ncol(X) #----------------------------------------------------- ## COMPUTATION # 1. MLE for Sigma Shat = stats::cov(X)*(nrow(X)-1)/(nrow(X)) # and some related values trS = aux_trace(Shat) trS2 = aux_trace(Shat%*%Shat) # 2. structured estimate Fhat = (trS/p)*diag(p) # 3. rho.. wow.. term1 = ((n-2)/n)*trS2 + (trS^2) term2 = (n+2)*(trS2 - ((trS^2)/p)) rhohat = term1/term2 rhohat = max(min(rhohat, 1),0) # adjust as in LW case #----------------------------------------------------- ## RETURN THE OUTPUT output = list() output$S = (1-rhohat)*Shat + rhohat*Fhat output$rho = rhohat return(output) } # auxiliary --------------------------------------------------------------- #' @keywords internal #' @noRd aux_trace <- function(X){ return(sum(diag(X))) }
# DESCRIPTION: # This file defines some helper functions related to the Capital Bike Sharing # dataset, such as plotting "cnt" graphs and compute error measures. source("bike-imputation.R") # Define some helper functions to calculate errors # Compute RMSE compute.rmse <- function(pred, actual) { sqrt(mean((pred-actual)^2, na.rm=TRUE)) } # Compute RMSLE compute.rmsle <- function(pred, actual) { sqrt(mean((log(pred+1)-log(actual+1))^2, na.rm=TRUE)) } # Plot "cnt" for a range of days bike.plot <- function(index.begin, index.end, data, feature="cnt", add=FALSE, ...) { begin <- index.begin end <- index.end indices <- begin:end if (isTRUE(add)) points(indices, data[indices, feature], type="o", ...) else { plot(indices, data[indices, feature], type="o", xaxt="n", main=paste0("Plotting ", data[begin,"date"], " (", data[begin,"weekday"], ") to ", data[end,"date"], " (", data[end, "weekday"], ")"), ...) axis(1, at=seq(begin, end, 24), labels=FALSE) axis(1, at=seq(begin+12, end, 24), tick=FALSE, labels=paste0(data[seq(begin,end,24), "weekday"], " h=", data[seq(begin,end,24), "holiday"], "\n", data[seq(begin,end,24),"date"])) } } bike.plot.dates <- function(date.begin, date.end, data, feature="cnt", add=FALSE, ...) { begin <- which(data$date == as.POSIXct(date.begin, tz="UTC"))[[1]] end <- which(data$date == as.POSIXct(date.end, tz="UTC"))[[1]] end <- end + 23 bike.plot(begin, end, data=data, feature=feature, add=add, ...) }
/src/bike-helper.R
permissive
philparadis/final-report-bike-sharing
R
false
false
1,592
r
# DESCRIPTION: # This file defines some helper functions related to the Capital Bike Sharing # dataset, such as plotting "cnt" graphs and compute error measures. source("bike-imputation.R") # Define some helper functions to calculate errors # Compute RMSE compute.rmse <- function(pred, actual) { sqrt(mean((pred-actual)^2, na.rm=TRUE)) } # Compute RMSLE compute.rmsle <- function(pred, actual) { sqrt(mean((log(pred+1)-log(actual+1))^2, na.rm=TRUE)) } # Plot "cnt" for a range of days bike.plot <- function(index.begin, index.end, data, feature="cnt", add=FALSE, ...) { begin <- index.begin end <- index.end indices <- begin:end if (isTRUE(add)) points(indices, data[indices, feature], type="o", ...) else { plot(indices, data[indices, feature], type="o", xaxt="n", main=paste0("Plotting ", data[begin,"date"], " (", data[begin,"weekday"], ") to ", data[end,"date"], " (", data[end, "weekday"], ")"), ...) axis(1, at=seq(begin, end, 24), labels=FALSE) axis(1, at=seq(begin+12, end, 24), tick=FALSE, labels=paste0(data[seq(begin,end,24), "weekday"], " h=", data[seq(begin,end,24), "holiday"], "\n", data[seq(begin,end,24),"date"])) } } bike.plot.dates <- function(date.begin, date.end, data, feature="cnt", add=FALSE, ...) { begin <- which(data$date == as.POSIXct(date.begin, tz="UTC"))[[1]] end <- which(data$date == as.POSIXct(date.end, tz="UTC"))[[1]] end <- end + 23 bike.plot(begin, end, data=data, feature=feature, add=add, ...) }
library(dplyr) library(ggplot2) set.seed(10) # Functions ------------------------------------------------------------------- # Defines age categories that group corresponds to labels = c("(15-20]", "(20-25]", "(25-30]", "(30-35]", "(35-40]", "(40-45]", "(45-50]", "(50-55]", "(55-60]", "(60-65]", "(65-70]", "(70-75]", "(75-80]", "80+") # Age groups grouping <- function(age){ if (age >= 15 & age < 20){ return (1) } else if (age >= 20 & age < 25){ return (2) } else if (age >=25 & age < 30){ return (3) } else if (age >= 30 & age < 35){ return (4) } else if (age >= 35 & age < 40){ return (5) } else if (age >= 40 & age < 45){ return (6) } else if (age >= 45 & age < 50){ return (7) } else if (age >= 50 & age < 55){ return (8) } else if (age >= 55 & age < 60){ return (9) } else if (age >= 60 & age < 65){ return (10) } else if (age >= 65 & age < 70){ return (11) } else if (age >= 70 & age < 75){ return (12) } else if (age >= 75 & age < 80){ return(13) } else { return (14) } } age_spent <- function(row, y){ id <- as.numeric(row[2]) age_month <- as.numeric(row[9]) age <- age_month - 5*(y-1) time_age_cat <- vector(mode = "numeric", length = 15) if (age > 960){ if (age > 960 + 60){ time_age_cat[15] = 60 } else { age_in_category <- age - 960 time_age_cat[15] <- age_in_category time_age_cat[14] <- 5*12 - age_in_category } } else if (age > 900){ age_in_category <- age - 900 time_age_cat[14] <- age_in_category time_age_cat[13] <- 5*12 - age_in_category } else if (age > 840){ age_in_category <- age - 840 time_age_cat[13] <- age_in_category time_age_cat[12] <- 5*12 - age_in_category } else if (age > 780){ age_in_category <- age - 780 time_age_cat[12] <- age_in_category time_age_cat[11] <- 5*12 - age_in_category } else if (age > 720){ age_in_category <- age - 720 time_age_cat[11] <- age_in_category time_age_cat[10] <- 5*12 - age_in_category } else if (age > 660){ age_in_category <- age - 660 time_age_cat[10] <- age_in_category time_age_cat[9] <- 5*12 - age_in_category } else if (age > 600){ age_in_category <- age - 600 time_age_cat[9] <- age_in_category time_age_cat[8] <- 5*12 - age_in_category } else if (age > 540){ age_in_category <- age - 540 time_age_cat[8] <- age_in_category time_age_cat[7] <- 5*12 - age_in_category } else if (age > 480){ age_in_category <- age - 480 time_age_cat[7] <- age_in_category time_age_cat[6] <- 5*12 - age_in_category } else if (age > 420){ age_in_category <- age - 420 time_age_cat[6] <- age_in_category time_age_cat[5] <- 5*12 - age_in_category } else if (age > 360){ age_in_category <- age - 360 time_age_cat[5] <- age_in_category time_age_cat[4] <- 5*12 - age_in_category } else if (age > 300){ age_in_category <- age - 300 time_age_cat[4] <- age_in_category time_age_cat[3] <- 5*12 - age_in_category } else if (age > 240){ age_in_category <- age - 240 time_age_cat[3] <- age_in_category time_age_cat[2] <- 5*12 - age_in_category } else if (age > 180){ age_in_category <- age - 180 time_age_cat[2] <- age_in_category time_age_cat[1] <- 5*12 - age_in_category } else { time_age_cat[1] <- 60 } if(sum(time_age_cat) != 60){ print(id) print((time_age_cat)) stop() } return (c(id, time_age_cat)) } #----------------------------------------------------------------------------- all_fertility <- NULL for (y in 1:3){ # Read in DHS data if (y == 1){ all_data <- readRDS("data/DHS/mw_fathers_15.RDS") } else if (y == 2){ all_data <- readRDS("data/DHS/mw_fathers_10.RDS") } else { all_data <- readRDS("data/DHS/mw_fathers_04.RDS") } all_data <- select(all_data, - father_wi, -father_bmi, -father_smoking, -household_smoking, -father_anemia) # Survey year year <- floor(all_data$survey_date[1]) # Add one to year to make labelling easier later year_tmp <- year + 1 # Separate out men who are fathers father_data <- all_data[which(!is.na(all_data$age_child_now)),] # Separate out full data full <- father_data[which(father_data$age_father_now > 0 & father_data$age_mother_now >0 & father_data$father_status == 1),] not_nec_mothers <- father_data[which(father_data$age_father_now > 0),] # Makes a null fertility vector fertility <- NULL # Repeat bootstrapping 10 times for (j in 1:10){ # Assigns fathers to missing children for (i in 1:length(father_data$id_household)){ # Only adjust those with missing father data if (is.na(father_data$age_father_now[i])){ # What is age of this child age_child <- father_data$age_child_now[i] # What is age of mother age_mother <- father_data$age_mother_now[i] # If father status is missing or unknown - work out if father should be alive if (father_data$father_status[i] > 1){ other_children <- father_data[which(father_data$age_child_now == age_child),] if (!is.na(age_mother)){ if (age_mother != 0){ other_children <- other_children[which(other_children$age_mother_now == age_mother),] } } prop_live = sum(other_children$father_status == 1) / length(other_children$father_status) p <- runif(1, 0, 1) if (p < prop_live){ father_data$father_status[i] = 1 } else { father_data$father_status[i] = 0 } } # Find other children with data that matches if (age_mother == 0 | is.na(age_mother)){ others <- not_nec_mothers[which(not_nec_mothers$age_child_now == age_child),] } else { others <- full[which(full$age_child_now == age_child & full$age_mother_now),] } # Won't mater if can't match up older children since subset last 5 years if(length(others$id_household) > 0){ # Randomly select a child from the others list new_father <- others[sample(1:length(others$id_household), 1),] father_data$age_father_now[i] <- new_father$age_father_now father_data$new_id[i] <- new_father$id } } } # Removes children with dead father father_data <- father_data[which(father_data$father_status == 1),] # Data where every child has age of father father_data$age_father_birth <- father_data$age_father_now - father_data$age_child_now # select children born in last 5 year if (y == 1 | y == 3){ selected_children <- father_data[which(father_data$age_child_now < 5),] } else { selected_children <- father_data[which(father_data$age_child_now >= 5*(y-1) & father_data$age_child_now < 5*y),] } # Summarise number of children in each age summ <- selected_children %>% group_by(age_father_birth) %>% summarize(count = n()) # Plots a histogram #p <- ggplot(summ, aes(age_father_birth, count)) + geom_col() + theme_bw() #print(p) # Calculate number of children in each age category summ$group <- sapply(summ$age_father_birth, grouping) band_children <- summ %>% group_by(group) %>% summarise("births" = sum(count)) band_children$label <- labels[1:length(band_children$group)] # Work out exposure # Removes all lines where father doesn't have an age as we match them to other fathers exposure <- all_data[!is.na(all_data$age_father_now),] # Subset so only have one line per man unique_men <- exposure %>% distinct(id_man, .keep_all= TRUE) # Randomly assign each man a birth month unique_men$man_age_month <- sample(0:11, length(unique_men$id_man), replace = TRUE) # Work out mens age in months unique_men$age_months <- unique_men$age_father_now*12 + unique_men$man_age_month # Remove men (boys) under the age of 5*years unique_men <- unique_men[which(unique_men$age_father_now > 5*y),] # Calculate number of years spent in each age category exposure_months <- t(apply(unique_men, 1, age_spent, y=y)) exposure_years <- data.frame("exposure_years" = colSums( exposure_months[,2:16])/12) exposure_years$label <- c("under 15", labels) # Calculate fertility data <- left_join(exposure_years, band_children) data$fertility <- data$births/data$exposure_years fertility <- cbind(fertility, data$fertility) } fertility_rate = rowMeans(fertility) if (y == 2){ for (i in 1:6){ all_fertility <- cbind(all_fertility, fertility_rate) } } else { for (i in 1:5){ all_fertility <- cbind(all_fertility, fertility_rate) } } #p <- ggplot(all_fertility %>% filter(!is.na(fertility_rate))) + # geom_point(aes(age, fertility_rate)) + theme_bw() + # ylab("Male fertility rate") + xlab("Age category") #print(p) } df_fertility <- data.frame(all_fertility) names(df_fertility) <- paste0("y", 2015 - 0:15) df_fertility$ages <- factor(exposure_years$label, levels = c("under 15", labels)) saveRDS(df_fertility, file = "data/Malawi/male_fertility.RDS") p <- ggplot(df_fertility) + geom_point(aes(ages, y2015, col = "2015")) + geom_point(aes(ages, y2010, col = "2010")) + geom_point(aes(ages, y2004, col = "2004")) + scale_color_manual(values = c("2015" = "black", "2010" = "blue", "2004" = "red")) + theme_bw() print(p)
/TheLancet_global_minimum_estimates_2021/R/DHS/mw_impute_fathers.R
permissive
ImperialCollegeLondon/covid19_orphans
R
false
false
9,525
r
library(dplyr) library(ggplot2) set.seed(10) # Functions ------------------------------------------------------------------- # Defines age categories that group corresponds to labels = c("(15-20]", "(20-25]", "(25-30]", "(30-35]", "(35-40]", "(40-45]", "(45-50]", "(50-55]", "(55-60]", "(60-65]", "(65-70]", "(70-75]", "(75-80]", "80+") # Age groups grouping <- function(age){ if (age >= 15 & age < 20){ return (1) } else if (age >= 20 & age < 25){ return (2) } else if (age >=25 & age < 30){ return (3) } else if (age >= 30 & age < 35){ return (4) } else if (age >= 35 & age < 40){ return (5) } else if (age >= 40 & age < 45){ return (6) } else if (age >= 45 & age < 50){ return (7) } else if (age >= 50 & age < 55){ return (8) } else if (age >= 55 & age < 60){ return (9) } else if (age >= 60 & age < 65){ return (10) } else if (age >= 65 & age < 70){ return (11) } else if (age >= 70 & age < 75){ return (12) } else if (age >= 75 & age < 80){ return(13) } else { return (14) } } age_spent <- function(row, y){ id <- as.numeric(row[2]) age_month <- as.numeric(row[9]) age <- age_month - 5*(y-1) time_age_cat <- vector(mode = "numeric", length = 15) if (age > 960){ if (age > 960 + 60){ time_age_cat[15] = 60 } else { age_in_category <- age - 960 time_age_cat[15] <- age_in_category time_age_cat[14] <- 5*12 - age_in_category } } else if (age > 900){ age_in_category <- age - 900 time_age_cat[14] <- age_in_category time_age_cat[13] <- 5*12 - age_in_category } else if (age > 840){ age_in_category <- age - 840 time_age_cat[13] <- age_in_category time_age_cat[12] <- 5*12 - age_in_category } else if (age > 780){ age_in_category <- age - 780 time_age_cat[12] <- age_in_category time_age_cat[11] <- 5*12 - age_in_category } else if (age > 720){ age_in_category <- age - 720 time_age_cat[11] <- age_in_category time_age_cat[10] <- 5*12 - age_in_category } else if (age > 660){ age_in_category <- age - 660 time_age_cat[10] <- age_in_category time_age_cat[9] <- 5*12 - age_in_category } else if (age > 600){ age_in_category <- age - 600 time_age_cat[9] <- age_in_category time_age_cat[8] <- 5*12 - age_in_category } else if (age > 540){ age_in_category <- age - 540 time_age_cat[8] <- age_in_category time_age_cat[7] <- 5*12 - age_in_category } else if (age > 480){ age_in_category <- age - 480 time_age_cat[7] <- age_in_category time_age_cat[6] <- 5*12 - age_in_category } else if (age > 420){ age_in_category <- age - 420 time_age_cat[6] <- age_in_category time_age_cat[5] <- 5*12 - age_in_category } else if (age > 360){ age_in_category <- age - 360 time_age_cat[5] <- age_in_category time_age_cat[4] <- 5*12 - age_in_category } else if (age > 300){ age_in_category <- age - 300 time_age_cat[4] <- age_in_category time_age_cat[3] <- 5*12 - age_in_category } else if (age > 240){ age_in_category <- age - 240 time_age_cat[3] <- age_in_category time_age_cat[2] <- 5*12 - age_in_category } else if (age > 180){ age_in_category <- age - 180 time_age_cat[2] <- age_in_category time_age_cat[1] <- 5*12 - age_in_category } else { time_age_cat[1] <- 60 } if(sum(time_age_cat) != 60){ print(id) print((time_age_cat)) stop() } return (c(id, time_age_cat)) } #----------------------------------------------------------------------------- all_fertility <- NULL for (y in 1:3){ # Read in DHS data if (y == 1){ all_data <- readRDS("data/DHS/mw_fathers_15.RDS") } else if (y == 2){ all_data <- readRDS("data/DHS/mw_fathers_10.RDS") } else { all_data <- readRDS("data/DHS/mw_fathers_04.RDS") } all_data <- select(all_data, - father_wi, -father_bmi, -father_smoking, -household_smoking, -father_anemia) # Survey year year <- floor(all_data$survey_date[1]) # Add one to year to make labelling easier later year_tmp <- year + 1 # Separate out men who are fathers father_data <- all_data[which(!is.na(all_data$age_child_now)),] # Separate out full data full <- father_data[which(father_data$age_father_now > 0 & father_data$age_mother_now >0 & father_data$father_status == 1),] not_nec_mothers <- father_data[which(father_data$age_father_now > 0),] # Makes a null fertility vector fertility <- NULL # Repeat bootstrapping 10 times for (j in 1:10){ # Assigns fathers to missing children for (i in 1:length(father_data$id_household)){ # Only adjust those with missing father data if (is.na(father_data$age_father_now[i])){ # What is age of this child age_child <- father_data$age_child_now[i] # What is age of mother age_mother <- father_data$age_mother_now[i] # If father status is missing or unknown - work out if father should be alive if (father_data$father_status[i] > 1){ other_children <- father_data[which(father_data$age_child_now == age_child),] if (!is.na(age_mother)){ if (age_mother != 0){ other_children <- other_children[which(other_children$age_mother_now == age_mother),] } } prop_live = sum(other_children$father_status == 1) / length(other_children$father_status) p <- runif(1, 0, 1) if (p < prop_live){ father_data$father_status[i] = 1 } else { father_data$father_status[i] = 0 } } # Find other children with data that matches if (age_mother == 0 | is.na(age_mother)){ others <- not_nec_mothers[which(not_nec_mothers$age_child_now == age_child),] } else { others <- full[which(full$age_child_now == age_child & full$age_mother_now),] } # Won't mater if can't match up older children since subset last 5 years if(length(others$id_household) > 0){ # Randomly select a child from the others list new_father <- others[sample(1:length(others$id_household), 1),] father_data$age_father_now[i] <- new_father$age_father_now father_data$new_id[i] <- new_father$id } } } # Removes children with dead father father_data <- father_data[which(father_data$father_status == 1),] # Data where every child has age of father father_data$age_father_birth <- father_data$age_father_now - father_data$age_child_now # select children born in last 5 year if (y == 1 | y == 3){ selected_children <- father_data[which(father_data$age_child_now < 5),] } else { selected_children <- father_data[which(father_data$age_child_now >= 5*(y-1) & father_data$age_child_now < 5*y),] } # Summarise number of children in each age summ <- selected_children %>% group_by(age_father_birth) %>% summarize(count = n()) # Plots a histogram #p <- ggplot(summ, aes(age_father_birth, count)) + geom_col() + theme_bw() #print(p) # Calculate number of children in each age category summ$group <- sapply(summ$age_father_birth, grouping) band_children <- summ %>% group_by(group) %>% summarise("births" = sum(count)) band_children$label <- labels[1:length(band_children$group)] # Work out exposure # Removes all lines where father doesn't have an age as we match them to other fathers exposure <- all_data[!is.na(all_data$age_father_now),] # Subset so only have one line per man unique_men <- exposure %>% distinct(id_man, .keep_all= TRUE) # Randomly assign each man a birth month unique_men$man_age_month <- sample(0:11, length(unique_men$id_man), replace = TRUE) # Work out mens age in months unique_men$age_months <- unique_men$age_father_now*12 + unique_men$man_age_month # Remove men (boys) under the age of 5*years unique_men <- unique_men[which(unique_men$age_father_now > 5*y),] # Calculate number of years spent in each age category exposure_months <- t(apply(unique_men, 1, age_spent, y=y)) exposure_years <- data.frame("exposure_years" = colSums( exposure_months[,2:16])/12) exposure_years$label <- c("under 15", labels) # Calculate fertility data <- left_join(exposure_years, band_children) data$fertility <- data$births/data$exposure_years fertility <- cbind(fertility, data$fertility) } fertility_rate = rowMeans(fertility) if (y == 2){ for (i in 1:6){ all_fertility <- cbind(all_fertility, fertility_rate) } } else { for (i in 1:5){ all_fertility <- cbind(all_fertility, fertility_rate) } } #p <- ggplot(all_fertility %>% filter(!is.na(fertility_rate))) + # geom_point(aes(age, fertility_rate)) + theme_bw() + # ylab("Male fertility rate") + xlab("Age category") #print(p) } df_fertility <- data.frame(all_fertility) names(df_fertility) <- paste0("y", 2015 - 0:15) df_fertility$ages <- factor(exposure_years$label, levels = c("under 15", labels)) saveRDS(df_fertility, file = "data/Malawi/male_fertility.RDS") p <- ggplot(df_fertility) + geom_point(aes(ages, y2015, col = "2015")) + geom_point(aes(ages, y2010, col = "2010")) + geom_point(aes(ages, y2004, col = "2004")) + scale_color_manual(values = c("2015" = "black", "2010" = "blue", "2004" = "red")) + theme_bw() print(p)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/filtersels.R \name{filtersels} \alias{filtersels} \title{Subset selection data frames based on manually filtered image files} \usage{ filtersels(X, path = NULL, lspec = FALSE, img.suffix = NULL, it = "jpeg", incl.wav = TRUE, missing = FALSE, index = FALSE) } \arguments{ \item{X}{'selection.table' object or data frame with the following columns: 1) "sound.files": name of the .wav files, 2) "sel": number of the selections. The ouptut of \code{\link{manualoc}} or \code{\link{autodetec}} can be used as the input data frame.} \item{path}{Character string containing the directory path where the sound files are located. If \code{NULL} (default) then the current working directory is used.} \item{lspec}{A logical argument indicating if the image files to be use for filtering were produced by the function \code{\link{lspec}}. All the image files that correspond to a sound file must be deleted in order to be filtered out.} \item{img.suffix}{A character vector of length 1 with the suffix (label) at the end of the names of the image files. Default is \code{NULL} (i.e. no suffix as in the images produced by \code{\link{specreator}}). Ignored if \code{lspec = TRUE}.} \item{it}{A character vector of length 1 giving the image type ("tiff", "jpeg" or "pdf") Default is "jpeg". Note that pdf files can only be generated by \code{\link{lspec2pdf}}.} \item{incl.wav}{Logical. To indicate if sound files extensions (".wav") are included ( \code{TRUE}, default) or not in the image file names.} \item{missing}{Logical. Controls whether the output data frame (or row index if is \code{index = TRUE}) contains the selections with images in the working directory (Default, \code{missing = FALSE}) or the ones with no image.} \item{index}{Logical. If \code{TRUE} and \code{missing = FALSE} the row index for the selections with images in the working directory is returned. If \code{missing = TRUE}) then the row index of the ones with no image is returned instead. Default is \code{FALSE}.} } \value{ If all .wav files are ok, returns message "All files are ok!". Otherwise returns "These file(s) cannot be read" message with names of the corrupted .wav files. } \description{ \code{filtersels} subsets selection data frames based on image files that have been manually filtered. } \details{ This function subsets selections (or sound files if \code{lspec} is \code{TRUE}) listed in a data frame based on the image files from spectrogram-creating functions (e.g. \code{\link{specreator}}) in the working directory. Only the selections/sound files with and image in the working directory will remain. This is useful for excluding selections from undesired signals. Note that the image files should be in the working directory (or the directory provided in 'path'). } \examples{ { # First set temporary folder setwd(tempdir()) # save wav file examples data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "selec.table")) writeWave(Phae.long1,"Phae.long1.wav") writeWave(Phae.long2,"Phae.long2.wav") writeWave(Phae.long3,"Phae.long3.wav") specreator(selec.table, flim = c(0, 11), inner.mar = c(4,4.5,2,1), outer.mar = c(4,2,2,1), picsize = 2, res = 300, cexlab = 2, mar = 0.05, wl = 300) #go to the working directory and delete some images #filter selection data frame fmloc <- filtersels(X = selec.table) #this data frame does not have the selections corresponding to the images that were deleted fmloc #now using lspec images lspec(sxrow = 2, rows = 8, pal = reverse.heat.colors, wl = 300, ovlp = 10) #go to the working directory and delete lspec images (the ones with several rows of spectrograms) #filter selection data frame } } \author{ Marcelo Araya-Salas (\email{araya-salas@cornell.edu}) }
/man/filtersels.Rd
no_license
TheoreticalEcology/warbleR
R
false
true
3,807
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/filtersels.R \name{filtersels} \alias{filtersels} \title{Subset selection data frames based on manually filtered image files} \usage{ filtersels(X, path = NULL, lspec = FALSE, img.suffix = NULL, it = "jpeg", incl.wav = TRUE, missing = FALSE, index = FALSE) } \arguments{ \item{X}{'selection.table' object or data frame with the following columns: 1) "sound.files": name of the .wav files, 2) "sel": number of the selections. The ouptut of \code{\link{manualoc}} or \code{\link{autodetec}} can be used as the input data frame.} \item{path}{Character string containing the directory path where the sound files are located. If \code{NULL} (default) then the current working directory is used.} \item{lspec}{A logical argument indicating if the image files to be use for filtering were produced by the function \code{\link{lspec}}. All the image files that correspond to a sound file must be deleted in order to be filtered out.} \item{img.suffix}{A character vector of length 1 with the suffix (label) at the end of the names of the image files. Default is \code{NULL} (i.e. no suffix as in the images produced by \code{\link{specreator}}). Ignored if \code{lspec = TRUE}.} \item{it}{A character vector of length 1 giving the image type ("tiff", "jpeg" or "pdf") Default is "jpeg". Note that pdf files can only be generated by \code{\link{lspec2pdf}}.} \item{incl.wav}{Logical. To indicate if sound files extensions (".wav") are included ( \code{TRUE}, default) or not in the image file names.} \item{missing}{Logical. Controls whether the output data frame (or row index if is \code{index = TRUE}) contains the selections with images in the working directory (Default, \code{missing = FALSE}) or the ones with no image.} \item{index}{Logical. If \code{TRUE} and \code{missing = FALSE} the row index for the selections with images in the working directory is returned. If \code{missing = TRUE}) then the row index of the ones with no image is returned instead. Default is \code{FALSE}.} } \value{ If all .wav files are ok, returns message "All files are ok!". Otherwise returns "These file(s) cannot be read" message with names of the corrupted .wav files. } \description{ \code{filtersels} subsets selection data frames based on image files that have been manually filtered. } \details{ This function subsets selections (or sound files if \code{lspec} is \code{TRUE}) listed in a data frame based on the image files from spectrogram-creating functions (e.g. \code{\link{specreator}}) in the working directory. Only the selections/sound files with and image in the working directory will remain. This is useful for excluding selections from undesired signals. Note that the image files should be in the working directory (or the directory provided in 'path'). } \examples{ { # First set temporary folder setwd(tempdir()) # save wav file examples data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "selec.table")) writeWave(Phae.long1,"Phae.long1.wav") writeWave(Phae.long2,"Phae.long2.wav") writeWave(Phae.long3,"Phae.long3.wav") specreator(selec.table, flim = c(0, 11), inner.mar = c(4,4.5,2,1), outer.mar = c(4,2,2,1), picsize = 2, res = 300, cexlab = 2, mar = 0.05, wl = 300) #go to the working directory and delete some images #filter selection data frame fmloc <- filtersels(X = selec.table) #this data frame does not have the selections corresponding to the images that were deleted fmloc #now using lspec images lspec(sxrow = 2, rows = 8, pal = reverse.heat.colors, wl = 300, ovlp = 10) #go to the working directory and delete lspec images (the ones with several rows of spectrograms) #filter selection data frame } } \author{ Marcelo Araya-Salas (\email{araya-salas@cornell.edu}) }
#Libraries ------------------------------------------------------------------------------------------------------------------------------------------------------- library(data.table) library(ggplot2) library(scales) library(plyr) library(outliers) library(mvoutlier) #Input ------------------------------------------------------------------------------------------------------------------------------------------------------- #Integrated Sales Data (Integrated Sales) Input <- fread("F:/Dynamic Pricing/Dynamic Pricing/Data/Output_BOGMADRT.csv", header = T, sep = ";") #Subsets ------------------------------------------------------------------------------------------------------------------------------------------------------- # Test <- subset(Input, OriginDestination == "BOGMAD" & CabinCode == "Y" & HSE == "Dia Normal" & AdvancePurchase == 1) #Graficas ------------------------------------------------------------------------------------------------------------------------------------------------------- #Scatterplot - AveragePrice vs Demand ggplot(Test, aes(x = AveragePrice, y = Demand)) + geom_point(alpha = I(0.5)) + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(Test$AveragePrice), by = 200),200), labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(Test$Demand), by = 2),2), labels = comma) + labs(x = "Average Price", y = "Demand") ggplot(Test, aes(x = AveragePrice, y = Demand)) + geom_point(alpha = I(0.5)) + geom_smooth(method = "lm") + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(Test$AveragePrice), by = 20000),20000), labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(Test$Demand), by = 1),1), labels = comma) + labs(x = "Average Price", y = "Demand") #Scatterplot - SeatAvailability vs Demand ggplot(Test, aes(x = SeatAvailability, y = Demand)) + geom_point(alpha = I(0.5)) + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(Test$SeatAvailability), by = 50),50), labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(Test$Demand), by = 1),1), labels = comma) + labs(x = "Seat Availability", y = "Demand") #Outliers ---------------------------------------------------------------------------------------------------------------------------------------------------- # MAD Test$OutliersMAD <- scores(Test$SeatAvailability, type = "mad", prob = 0.95) ggplot(Test, aes(x = AveragePrice, y = Demand, colour = OutliersMAD)) + geom_point() + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(Test$AveragePrice), by = 20000),20000), labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(Test$Demand), by = 1),1), labels = comma) + labs(x = "Average Price", y = "Demand") TestWMAD <- subset(Test, OutliersMAD == FALSE) ggplot(TestWMAD, aes(x = AveragePrice, y = Demand)) + geom_point() + geom_smooth(method = "lm") + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(TestWMAD$AveragePrice), by = 20000),20000), labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(TestWMAD$Demand), by = 1),1), labels = comma) + labs(x = "Average Price", y = "Demand") #Adjusted Quantile Plot LogOutlierVariables <- data.frame(AveragePrice = log(Test$AveragePrice), SeatAvailability = log(Test$SeatAvailability)) LogOutlierVariables$Outlier <- aq.plot(LogOutlierVariables)$outliers Test$OutliersAQ <-LogOutlierVariables$Outlier ggplot(Test, aes(x = AveragePrice, y = Demand, colour = OutliersAQ)) + geom_point() + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(Test$AveragePrice), by = 20000),20000), labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(Test$Demand), by = 1),1), labels = comma) + labs(x = "Average Price", y = "Demand") TestWAQ <- subset(Test, OutliersAQ == FALSE) ggplot(TestWAQ, aes(x = AveragePrice, y = Demand)) + geom_point() + geom_smooth(method = "lm") + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(TestWAQ$AveragePrice), by = 20000),20000), labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(TestWAQ$Demand), by = 1),1), labels = comma) + labs(x = "Average Price", y = "Demand") #Multivariate Gaussian Distribution LogOutlierVariables <- data.frame(AveragePrice = log(Test$AveragePrice), SeatAvailability = log(Test$SeatAvailability)) miu <- matrix(c(mean(LogOutlierVariables$AveragePrice), mean(LogOutlierVariables$SeatAvailability))) cov <- cov(LogOutlierVariables) Matrix <- as.matrix(LogOutlierVariables-miu)%*%cov^(-1)%*%(t(as.matrix(LogOutlierVariables-miu))) Test$OutliersMG <- (1/(sqrt(det(cov)*(2*pi)^2)))*(exp((-1/2)*diag(Matrix))) > 0.99 ggplot(Test, aes(x = AveragePrice, y = Demand, colour = OutliersMG)) + geom_point() + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(Test$AveragePrice), by = 30000),30000),labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(Test$Demand), by = 1),1), labels = comma) + labs(x = "Average Price", y = "Demand") TestWMG <- subset(Test, OutliersMG == FALSE) ggplot(TestWMG, aes(x = AveragePrice, y = Demand)) + geom_point() + geom_smooth(method = "lm") + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(TestWMG$AveragePrice), by = 30000),30000), labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(TestWMG$Demand), by = 1),1), labels = comma) + labs(x = "Average Price", y = "Demand") #Tests ------------------------------------------------------------------------------------------------------------------------------------------------------- #Test Normalidad - Average Price shapiro.test(Test$AveragePrice) shapiro.test(log(Test$AveragePrice)) #Test Normalidad - Seat Availability shapiro.test(Test$SeatAvailability) shapiro.test(log(Test$SeatAvailability)) #Test Normalidad - Demanda shapiro.test(Test$Demand) shapiro.test(log(Test$Demand))
/2 Stage Least Squares/DynamicPricing_Graphs&Tests_AdvancePurchase_Code.R
no_license
cdavalos970/DynamicPricing
R
false
false
8,258
r
#Libraries ------------------------------------------------------------------------------------------------------------------------------------------------------- library(data.table) library(ggplot2) library(scales) library(plyr) library(outliers) library(mvoutlier) #Input ------------------------------------------------------------------------------------------------------------------------------------------------------- #Integrated Sales Data (Integrated Sales) Input <- fread("F:/Dynamic Pricing/Dynamic Pricing/Data/Output_BOGMADRT.csv", header = T, sep = ";") #Subsets ------------------------------------------------------------------------------------------------------------------------------------------------------- # Test <- subset(Input, OriginDestination == "BOGMAD" & CabinCode == "Y" & HSE == "Dia Normal" & AdvancePurchase == 1) #Graficas ------------------------------------------------------------------------------------------------------------------------------------------------------- #Scatterplot - AveragePrice vs Demand ggplot(Test, aes(x = AveragePrice, y = Demand)) + geom_point(alpha = I(0.5)) + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(Test$AveragePrice), by = 200),200), labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(Test$Demand), by = 2),2), labels = comma) + labs(x = "Average Price", y = "Demand") ggplot(Test, aes(x = AveragePrice, y = Demand)) + geom_point(alpha = I(0.5)) + geom_smooth(method = "lm") + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(Test$AveragePrice), by = 20000),20000), labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(Test$Demand), by = 1),1), labels = comma) + labs(x = "Average Price", y = "Demand") #Scatterplot - SeatAvailability vs Demand ggplot(Test, aes(x = SeatAvailability, y = Demand)) + geom_point(alpha = I(0.5)) + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(Test$SeatAvailability), by = 50),50), labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(Test$Demand), by = 1),1), labels = comma) + labs(x = "Seat Availability", y = "Demand") #Outliers ---------------------------------------------------------------------------------------------------------------------------------------------------- # MAD Test$OutliersMAD <- scores(Test$SeatAvailability, type = "mad", prob = 0.95) ggplot(Test, aes(x = AveragePrice, y = Demand, colour = OutliersMAD)) + geom_point() + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(Test$AveragePrice), by = 20000),20000), labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(Test$Demand), by = 1),1), labels = comma) + labs(x = "Average Price", y = "Demand") TestWMAD <- subset(Test, OutliersMAD == FALSE) ggplot(TestWMAD, aes(x = AveragePrice, y = Demand)) + geom_point() + geom_smooth(method = "lm") + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(TestWMAD$AveragePrice), by = 20000),20000), labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(TestWMAD$Demand), by = 1),1), labels = comma) + labs(x = "Average Price", y = "Demand") #Adjusted Quantile Plot LogOutlierVariables <- data.frame(AveragePrice = log(Test$AveragePrice), SeatAvailability = log(Test$SeatAvailability)) LogOutlierVariables$Outlier <- aq.plot(LogOutlierVariables)$outliers Test$OutliersAQ <-LogOutlierVariables$Outlier ggplot(Test, aes(x = AveragePrice, y = Demand, colour = OutliersAQ)) + geom_point() + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(Test$AveragePrice), by = 20000),20000), labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(Test$Demand), by = 1),1), labels = comma) + labs(x = "Average Price", y = "Demand") TestWAQ <- subset(Test, OutliersAQ == FALSE) ggplot(TestWAQ, aes(x = AveragePrice, y = Demand)) + geom_point() + geom_smooth(method = "lm") + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(TestWAQ$AveragePrice), by = 20000),20000), labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(TestWAQ$Demand), by = 1),1), labels = comma) + labs(x = "Average Price", y = "Demand") #Multivariate Gaussian Distribution LogOutlierVariables <- data.frame(AveragePrice = log(Test$AveragePrice), SeatAvailability = log(Test$SeatAvailability)) miu <- matrix(c(mean(LogOutlierVariables$AveragePrice), mean(LogOutlierVariables$SeatAvailability))) cov <- cov(LogOutlierVariables) Matrix <- as.matrix(LogOutlierVariables-miu)%*%cov^(-1)%*%(t(as.matrix(LogOutlierVariables-miu))) Test$OutliersMG <- (1/(sqrt(det(cov)*(2*pi)^2)))*(exp((-1/2)*diag(Matrix))) > 0.99 ggplot(Test, aes(x = AveragePrice, y = Demand, colour = OutliersMG)) + geom_point() + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(Test$AveragePrice), by = 30000),30000),labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(Test$Demand), by = 1),1), labels = comma) + labs(x = "Average Price", y = "Demand") TestWMG <- subset(Test, OutliersMG == FALSE) ggplot(TestWMG, aes(x = AveragePrice, y = Demand)) + geom_point() + geom_smooth(method = "lm") + theme_bw(base_size = 20) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + scale_x_continuous(breaks = round_any(seq(0, max(TestWMG$AveragePrice), by = 30000),30000), labels = comma) + scale_y_continuous(breaks = round_any(seq(0, max(TestWMG$Demand), by = 1),1), labels = comma) + labs(x = "Average Price", y = "Demand") #Tests ------------------------------------------------------------------------------------------------------------------------------------------------------- #Test Normalidad - Average Price shapiro.test(Test$AveragePrice) shapiro.test(log(Test$AveragePrice)) #Test Normalidad - Seat Availability shapiro.test(Test$SeatAvailability) shapiro.test(log(Test$SeatAvailability)) #Test Normalidad - Demanda shapiro.test(Test$Demand) shapiro.test(log(Test$Demand))
# various useful functions... ## Make time stamp in standardized format. make_time_stamp <- function(time_stamp) { run_date <- gsub("-","_",Sys.time()) run_date <- gsub(":","_",run_date) run_date <- gsub(" ","_",run_date) if(time_stamp==FALSE) run_date <- 'scratch' return(run_date) } firstDay <- function (year, month) { # given a year and month, return a Date object of the first day of that month date_string <- paste(year, month, '01', sep = '-') date <- as.Date (date_string) return (date) } lastDay <- function (year, month) { # given a year and month, return a Aate object of the last day of that month next_month <- ifelse(month == 12, 1, month + 1) next_year <- ifelse(month == 12, year + 1, year) next_date_string <- paste(next_year, next_month, '01', sep = '-') next_date <- as.Date(next_date_string) date <- next_date - 1 return (date) } sentenceCase <- function (text) { # given a vector of text strings `text`, convert to sentence case # convert all to lower case text <- tolower(text) # split at spaces text_list <- strsplit(text, ' ') text_list <- lapply(text_list, function(x) { x[1] <- paste(toupper(substring(x[1], 1, 1)), substring(x[1], 2), sep = "") x <- paste(x, collapse = ' ') return(x) }) text_vector <- unlist(text_list) return (text_vector) } # define functions firstTwo <- function (text) { # given a vector of text strings `text` subset each to only the first two # words (bits separated by spaces) and return this as a vector. text <- as.character(text) text_list <- strsplit(text, ' ') text_list <- lapply(text_list, '[', 1:2) text_list <- lapply(text_list, paste, collapse = ' ') text_vector <- unlist(text_list) return (text_vector) } rasterizeSpecies <- function(species, shape, raster, buffer = NULL, folder = 'bats/range_buff/') { # first buffer, then rasterize IUCN range map for species shp <- shape[shape$BINOMIAL == species, ] if (!is.null(buffer)) { # convert buffer from kilometres to decimal degrees, assuming at the equator buffer <- buffer / 111.32 # buffer by this amount shp <- gBuffer(shp, width = buffer) } # rasterize the shapefile tmp <- rasterize(shp, raster, field = 1, background = 0, fun = 'first') writeRaster(tmp, filename = paste0('~/Z/zhi/ebola/', folder, '/', gsub(' ', '_', species)), format = 'GTiff', overwrite = TRUE) rm(tmp) return (NULL) } tidySpecies <- function (filename, template) { # load a raster if it contains any of the species' range, # mask and resave it, else delete it tmp <- raster(filename) if (!is.na(maxValue(tmp)) && maxValue(tmp) == 1) { tmp <- mask(tmp, template) writeRaster(tmp, file = filename, overwrite = TRUE) } else { rm(tmp) file.remove(filename) } return (NULL) } subsamplePolys <- function (data, ...) { # given a presence-background dataset, with multiple rows for some of the # occurrence records, subset it so that there's only one randomly selected # point from each polygon and then take a bootstrap it using `subsample`. # Dots argument is passed to subsample. # index for background records (outbreak id = 0) bg_idx <- data$outbreak_id == 0 # subset to get occurrence section only occ <- data[!bg_idx, ] # get the different outbreaks u <- unique(occ$outbreak_id) # loop through, picking an index for each based on the number available occ_idx <- sapply(u, function (id, occ) { idx <- which(occ$outbreak_id == id) sample(idx, 1) }, occ) # get the subsetted dataset dat <- rbind(occ[occ_idx, ], data[bg_idx, ]) # randomly subsample the dataset ans <- subsample(dat, n = nrow(dat), ...) # remove the outbreak ID column ans <- ans[, -which(names(ans) == 'outbreak_id')] return (ans) } # change the polygon IDs of an SPDF so it can be rbinded to something else # nicked from # http://gis.stackexchange.com/questions/32732/proper-way-to-rbind-spatialpolygonsdataframes-with-identical-polygon-ids makeUniform <- function (SPDF) { pref <- substitute(SPDF) #just putting the file name in front. newSPDF <- spChFIDs(SPDF, as.character(paste(pref, rownames(as(SPDF, "data.frame")), sep = "_"))) return (newSPDF) } summarizeStats <- function (path) { # load validation stats stats <- read.csv(paste0(path, 'stats.csv'), row.names = 1) auc <- c(as.character(round(mean(stats$auc, na.rm = TRUE), 2)), as.character(round(sd(stats$auc, na.rm = TRUE), 2))) # load relative influence stats relinf <- read.csv(paste0(path, 'relative_influence.csv'), stringsAsFactors = FALSE) ans <- c(auc_mean = auc[1], auc_sd = auc[2], cov1 = relinf[1, 1], relinf1 = as.character(round(relinf[1, 2], 1)), cov2 = relinf[2, 1], relinf2 = as.character(round(relinf[2, 2], 1)), cov3 = relinf[3, 1], relinf3 = as.character(round(relinf[3, 2], 1)), cov4 = relinf[4, 1], relinf4 = as.character(round(relinf[4, 2], 1)), cov5 = relinf[5, 1], relinf5 = as.character(round(relinf[5, 2], 1))) return (ans) } thresholdRisk <- function (risk_raster, occ, proportion = 1) { # given a raster `risk_raster` giving risk on the (0,1] level and a 2d # dataframe `occ` with columns named 'lat' and 'long' giving the latitudes # and longitudes of known occurrence records, # find the threshold value so that `proportion` fraction of the records # fall in areas classified as 'at risk' and return the thresholded map # extract risk values for the occurrence data occ_risk <- extract(risk_raster[[1]], occ[, c('long', 'lat')]) # remove any missing data occ_risk <- na.omit(occ_risk) # get the relevant quantile thresh <- quantile(occ_risk, 1 - proportion, na.rm = TRUE) # classify the raster at_risk_raster <- risk_raster > thresh # return this return (at_risk_raster) } substrRight <- function(x, n){ substr(x, nchar(x)-n+1, nchar(x)) } path_converter <- function(winpath, disc){ if (disc == "J"){ path <- gsub("J:", "/snfs1", gsub("\\\\", "/", winpath)) } return(path) } # copied from sfsmisc integrate.xy <- function(x,fx, a,b, use.spline = TRUE, xtol = 2e-8) { if(is.list(x)) { fx <- x$y; x <- x$x if(length(x) == 0) stop("list 'x' has no valid $x component") } if((n <- length(x)) != length(fx)) stop("'fx' must have same length as 'x'") if(is.unsorted(x)) { i <- sort.list(x); x <- x[i]; fx <- fx[i] } if(any(i <- duplicated(x))) { n <- length(x <- x[!i]) ## we might have to check that the same fx[] are duplicated ## otherwise either give an error or take the mean() of those... fx <- fx[!i] } if(any(diff(x) == 0)) stop("bug in 'duplicated()' killed me: have still multiple x[]!") if(missing(a)) a <- x[1] else if(any(a < x[1])) stop("'a' must NOT be smaller than min(x)") if(missing(b)) b <- x[n] else if(any(b > x[n])) stop("'b' must NOT be larger than max(x)") if(length(a) != 1 && length(b) != 1 && length(a) != length(b)) stop("'a' and 'b' must have length 1 or same length !") else { k <- max(length(a),length(b)) if(any(b < a)) stop("'b' must be elementwise >= 'a'") } if(use.spline) { xy <- spline(x,fx, n = max(1024, 3*n)) ##-- Work around spline(.) BUG: (ex.: range(spline(1:20,1:20,n=95))) if(xy$x[length(xy$x)] < x[n]) { if(TRUE) cat("working around spline(.) BUG --- hmm, really?\n\n") xy$x <- c(xy$x, x[n]) xy$y <- c(xy$y, fx[n]) } ## END if work around ---- x <- xy$x; fx <- xy$y n <- length(x) } ab <- unique(c(a,b)) BB <- abs(outer(x,ab,"-")) < (xtol * max(b - a)) if(any(j <- 0 == colSums(BB))) { # the j-th element(s) of ab are not in x[] y <- approx(x,fx, xout = ab[j])$y x <- c(ab[j],x) i <- sort.list(x) x <- x[i]; fx <- c(y,fx)[i]; n <- length(x) } ##--- now we could use 'Simpson's formula IFF the x[i] are equispaced... -- ##--- Since this may well be wrong, just use 'trapezoid formula': dig0 <- floor(-log10(xtol)) # f.match <- function(x,table,dig) match(signif(x,dig), signif(table,dig)) ## was (S+) f.match <- function(x,table) match(as.single(x), as.single(table)) d <- dig0; while(anyNA(ai <- f.match(a,x, d))) d <- d - 1/8 ; ai <- rep_len(ai, k) d <- dig0; while(anyNA(bi <- f.match(b,x, d))) d <- d - 1/8 ; bi <- rep_len(bi, k) dfx <- fx[-c(1,n)] * diff(x,lag = 2) r <- numeric(k) for (i in 1:k) { a <- ai[i]; b <- bi[i] r[i] <- (x[a+1] - x[a])*fx[a] + (x[b] - x[b-1])*fx[b] + sum(dfx[seq(a, length = max(0,b-a-1))]) } r/2 } # sample while checking for length of 1 safe_sample <- function(x) { if (length(x) <= 1) { return(x) } else { return(sample(x,1)) } } threshold_buffer <- function(buffer, raster, threshold) { #threshold should be 0-100 raster_final <- rasterize(buffer, raster, getCover = T) raster_final[raster_final < threshold] <- 0 raster_final[raster_final >= threshold] <- 1 return(raster_final) } get_fit_stats <- function(dat_all, pred_raster, mo = "00") { monthly <- FALSE if (mo != "00") monthly <- TRUE dat0.pts <- dat_all[dat_all$PA==0, c('long', 'lat')] dat0.preds <- raster::extract(pred_raster, dat0.pts) dat1.pts <- dat_all[dat_all$PA==1, c('long', 'lat')] dat1.preds <- raster::extract(pred_raster, dat1.pts) pos_mean_preds <- na.omit(dat1.preds) neg_mean_preds <- na.omit(dat0.preds) sensitivity <- c() fpr <- c() dif = 0.01 for (thresh in seq(0.00, 1, dif)){ tp <- length(pos_mean_preds[which(pos_mean_preds >= thresh)]) fn <- length(pos_mean_preds[which(pos_mean_preds < thresh)]) tn <- length(neg_mean_preds[which(neg_mean_preds <= thresh)]) fp <- length(neg_mean_preds[which(neg_mean_preds > thresh)]) sensitivity <- append(sensitivity, tp / (tp + fn)) fpr <- append(fpr, 1 - (tn / (fp + tn))) } distance <- c() for (i in 1:length(fpr)){ distance <- append(distance, dist(rbind(c(fpr[i], sensitivity[i]), c(0, 1)))) } opt_thresh <- max(seq(0.00, 1, dif)[which(distance == min(distance))]) auc <- integrate.xy(fpr, sensitivity, 0, 1) thresh <- opt_thresh tp <- length(pos_mean_preds[which(pos_mean_preds >= thresh)]) fn <- length(pos_mean_preds[which(pos_mean_preds < thresh)]) tn <- length(neg_mean_preds[which(neg_mean_preds <= thresh)]) fp <- length(neg_mean_preds[which(neg_mean_preds > thresh)]) rmse <- sqrt(mean(append((1-pos_mean_preds), (neg_mean_preds - 0))^2)) error_rate <- (fp+fn)/(tp+tn+fp+fn) fit_stats <- data.frame(opt_thresh, auc, rmse, error_rate) return(fit_stats) }
/helper_functions.R
no_license
jshosborne/pandemic_preparedness
R
false
false
12,354
r
# various useful functions... ## Make time stamp in standardized format. make_time_stamp <- function(time_stamp) { run_date <- gsub("-","_",Sys.time()) run_date <- gsub(":","_",run_date) run_date <- gsub(" ","_",run_date) if(time_stamp==FALSE) run_date <- 'scratch' return(run_date) } firstDay <- function (year, month) { # given a year and month, return a Date object of the first day of that month date_string <- paste(year, month, '01', sep = '-') date <- as.Date (date_string) return (date) } lastDay <- function (year, month) { # given a year and month, return a Aate object of the last day of that month next_month <- ifelse(month == 12, 1, month + 1) next_year <- ifelse(month == 12, year + 1, year) next_date_string <- paste(next_year, next_month, '01', sep = '-') next_date <- as.Date(next_date_string) date <- next_date - 1 return (date) } sentenceCase <- function (text) { # given a vector of text strings `text`, convert to sentence case # convert all to lower case text <- tolower(text) # split at spaces text_list <- strsplit(text, ' ') text_list <- lapply(text_list, function(x) { x[1] <- paste(toupper(substring(x[1], 1, 1)), substring(x[1], 2), sep = "") x <- paste(x, collapse = ' ') return(x) }) text_vector <- unlist(text_list) return (text_vector) } # define functions firstTwo <- function (text) { # given a vector of text strings `text` subset each to only the first two # words (bits separated by spaces) and return this as a vector. text <- as.character(text) text_list <- strsplit(text, ' ') text_list <- lapply(text_list, '[', 1:2) text_list <- lapply(text_list, paste, collapse = ' ') text_vector <- unlist(text_list) return (text_vector) } rasterizeSpecies <- function(species, shape, raster, buffer = NULL, folder = 'bats/range_buff/') { # first buffer, then rasterize IUCN range map for species shp <- shape[shape$BINOMIAL == species, ] if (!is.null(buffer)) { # convert buffer from kilometres to decimal degrees, assuming at the equator buffer <- buffer / 111.32 # buffer by this amount shp <- gBuffer(shp, width = buffer) } # rasterize the shapefile tmp <- rasterize(shp, raster, field = 1, background = 0, fun = 'first') writeRaster(tmp, filename = paste0('~/Z/zhi/ebola/', folder, '/', gsub(' ', '_', species)), format = 'GTiff', overwrite = TRUE) rm(tmp) return (NULL) } tidySpecies <- function (filename, template) { # load a raster if it contains any of the species' range, # mask and resave it, else delete it tmp <- raster(filename) if (!is.na(maxValue(tmp)) && maxValue(tmp) == 1) { tmp <- mask(tmp, template) writeRaster(tmp, file = filename, overwrite = TRUE) } else { rm(tmp) file.remove(filename) } return (NULL) } subsamplePolys <- function (data, ...) { # given a presence-background dataset, with multiple rows for some of the # occurrence records, subset it so that there's only one randomly selected # point from each polygon and then take a bootstrap it using `subsample`. # Dots argument is passed to subsample. # index for background records (outbreak id = 0) bg_idx <- data$outbreak_id == 0 # subset to get occurrence section only occ <- data[!bg_idx, ] # get the different outbreaks u <- unique(occ$outbreak_id) # loop through, picking an index for each based on the number available occ_idx <- sapply(u, function (id, occ) { idx <- which(occ$outbreak_id == id) sample(idx, 1) }, occ) # get the subsetted dataset dat <- rbind(occ[occ_idx, ], data[bg_idx, ]) # randomly subsample the dataset ans <- subsample(dat, n = nrow(dat), ...) # remove the outbreak ID column ans <- ans[, -which(names(ans) == 'outbreak_id')] return (ans) } # change the polygon IDs of an SPDF so it can be rbinded to something else # nicked from # http://gis.stackexchange.com/questions/32732/proper-way-to-rbind-spatialpolygonsdataframes-with-identical-polygon-ids makeUniform <- function (SPDF) { pref <- substitute(SPDF) #just putting the file name in front. newSPDF <- spChFIDs(SPDF, as.character(paste(pref, rownames(as(SPDF, "data.frame")), sep = "_"))) return (newSPDF) } summarizeStats <- function (path) { # load validation stats stats <- read.csv(paste0(path, 'stats.csv'), row.names = 1) auc <- c(as.character(round(mean(stats$auc, na.rm = TRUE), 2)), as.character(round(sd(stats$auc, na.rm = TRUE), 2))) # load relative influence stats relinf <- read.csv(paste0(path, 'relative_influence.csv'), stringsAsFactors = FALSE) ans <- c(auc_mean = auc[1], auc_sd = auc[2], cov1 = relinf[1, 1], relinf1 = as.character(round(relinf[1, 2], 1)), cov2 = relinf[2, 1], relinf2 = as.character(round(relinf[2, 2], 1)), cov3 = relinf[3, 1], relinf3 = as.character(round(relinf[3, 2], 1)), cov4 = relinf[4, 1], relinf4 = as.character(round(relinf[4, 2], 1)), cov5 = relinf[5, 1], relinf5 = as.character(round(relinf[5, 2], 1))) return (ans) } thresholdRisk <- function (risk_raster, occ, proportion = 1) { # given a raster `risk_raster` giving risk on the (0,1] level and a 2d # dataframe `occ` with columns named 'lat' and 'long' giving the latitudes # and longitudes of known occurrence records, # find the threshold value so that `proportion` fraction of the records # fall in areas classified as 'at risk' and return the thresholded map # extract risk values for the occurrence data occ_risk <- extract(risk_raster[[1]], occ[, c('long', 'lat')]) # remove any missing data occ_risk <- na.omit(occ_risk) # get the relevant quantile thresh <- quantile(occ_risk, 1 - proportion, na.rm = TRUE) # classify the raster at_risk_raster <- risk_raster > thresh # return this return (at_risk_raster) } substrRight <- function(x, n){ substr(x, nchar(x)-n+1, nchar(x)) } path_converter <- function(winpath, disc){ if (disc == "J"){ path <- gsub("J:", "/snfs1", gsub("\\\\", "/", winpath)) } return(path) } # copied from sfsmisc integrate.xy <- function(x,fx, a,b, use.spline = TRUE, xtol = 2e-8) { if(is.list(x)) { fx <- x$y; x <- x$x if(length(x) == 0) stop("list 'x' has no valid $x component") } if((n <- length(x)) != length(fx)) stop("'fx' must have same length as 'x'") if(is.unsorted(x)) { i <- sort.list(x); x <- x[i]; fx <- fx[i] } if(any(i <- duplicated(x))) { n <- length(x <- x[!i]) ## we might have to check that the same fx[] are duplicated ## otherwise either give an error or take the mean() of those... fx <- fx[!i] } if(any(diff(x) == 0)) stop("bug in 'duplicated()' killed me: have still multiple x[]!") if(missing(a)) a <- x[1] else if(any(a < x[1])) stop("'a' must NOT be smaller than min(x)") if(missing(b)) b <- x[n] else if(any(b > x[n])) stop("'b' must NOT be larger than max(x)") if(length(a) != 1 && length(b) != 1 && length(a) != length(b)) stop("'a' and 'b' must have length 1 or same length !") else { k <- max(length(a),length(b)) if(any(b < a)) stop("'b' must be elementwise >= 'a'") } if(use.spline) { xy <- spline(x,fx, n = max(1024, 3*n)) ##-- Work around spline(.) BUG: (ex.: range(spline(1:20,1:20,n=95))) if(xy$x[length(xy$x)] < x[n]) { if(TRUE) cat("working around spline(.) BUG --- hmm, really?\n\n") xy$x <- c(xy$x, x[n]) xy$y <- c(xy$y, fx[n]) } ## END if work around ---- x <- xy$x; fx <- xy$y n <- length(x) } ab <- unique(c(a,b)) BB <- abs(outer(x,ab,"-")) < (xtol * max(b - a)) if(any(j <- 0 == colSums(BB))) { # the j-th element(s) of ab are not in x[] y <- approx(x,fx, xout = ab[j])$y x <- c(ab[j],x) i <- sort.list(x) x <- x[i]; fx <- c(y,fx)[i]; n <- length(x) } ##--- now we could use 'Simpson's formula IFF the x[i] are equispaced... -- ##--- Since this may well be wrong, just use 'trapezoid formula': dig0 <- floor(-log10(xtol)) # f.match <- function(x,table,dig) match(signif(x,dig), signif(table,dig)) ## was (S+) f.match <- function(x,table) match(as.single(x), as.single(table)) d <- dig0; while(anyNA(ai <- f.match(a,x, d))) d <- d - 1/8 ; ai <- rep_len(ai, k) d <- dig0; while(anyNA(bi <- f.match(b,x, d))) d <- d - 1/8 ; bi <- rep_len(bi, k) dfx <- fx[-c(1,n)] * diff(x,lag = 2) r <- numeric(k) for (i in 1:k) { a <- ai[i]; b <- bi[i] r[i] <- (x[a+1] - x[a])*fx[a] + (x[b] - x[b-1])*fx[b] + sum(dfx[seq(a, length = max(0,b-a-1))]) } r/2 } # sample while checking for length of 1 safe_sample <- function(x) { if (length(x) <= 1) { return(x) } else { return(sample(x,1)) } } threshold_buffer <- function(buffer, raster, threshold) { #threshold should be 0-100 raster_final <- rasterize(buffer, raster, getCover = T) raster_final[raster_final < threshold] <- 0 raster_final[raster_final >= threshold] <- 1 return(raster_final) } get_fit_stats <- function(dat_all, pred_raster, mo = "00") { monthly <- FALSE if (mo != "00") monthly <- TRUE dat0.pts <- dat_all[dat_all$PA==0, c('long', 'lat')] dat0.preds <- raster::extract(pred_raster, dat0.pts) dat1.pts <- dat_all[dat_all$PA==1, c('long', 'lat')] dat1.preds <- raster::extract(pred_raster, dat1.pts) pos_mean_preds <- na.omit(dat1.preds) neg_mean_preds <- na.omit(dat0.preds) sensitivity <- c() fpr <- c() dif = 0.01 for (thresh in seq(0.00, 1, dif)){ tp <- length(pos_mean_preds[which(pos_mean_preds >= thresh)]) fn <- length(pos_mean_preds[which(pos_mean_preds < thresh)]) tn <- length(neg_mean_preds[which(neg_mean_preds <= thresh)]) fp <- length(neg_mean_preds[which(neg_mean_preds > thresh)]) sensitivity <- append(sensitivity, tp / (tp + fn)) fpr <- append(fpr, 1 - (tn / (fp + tn))) } distance <- c() for (i in 1:length(fpr)){ distance <- append(distance, dist(rbind(c(fpr[i], sensitivity[i]), c(0, 1)))) } opt_thresh <- max(seq(0.00, 1, dif)[which(distance == min(distance))]) auc <- integrate.xy(fpr, sensitivity, 0, 1) thresh <- opt_thresh tp <- length(pos_mean_preds[which(pos_mean_preds >= thresh)]) fn <- length(pos_mean_preds[which(pos_mean_preds < thresh)]) tn <- length(neg_mean_preds[which(neg_mean_preds <= thresh)]) fp <- length(neg_mean_preds[which(neg_mean_preds > thresh)]) rmse <- sqrt(mean(append((1-pos_mean_preds), (neg_mean_preds - 0))^2)) error_rate <- (fp+fn)/(tp+tn+fp+fn) fit_stats <- data.frame(opt_thresh, auc, rmse, error_rate) return(fit_stats) }
context("Creating jobs") test_that("R jobs can be created", { rjob <- job_r({1+1}) expect_s3_class(rjob, "job") expect_s3_class(rjob, "job_r") expect_identical(evaluate(rjob), 2) rjob <- job_r(function() 1+1) expect_s3_class(rjob, "job") expect_s3_class(rjob, "job_r") expect_identical(evaluate(rjob), 2) rjob <- job_r("1+1") expect_s3_class(rjob, "job") expect_s3_class(rjob, "job_r") expect_identical(evaluate(rjob), 2) rjob <- job_r_file("../jobs/rjob1.R") expect_s3_class(rjob, "job") expect_s3_class(rjob, "job_r") expect_s3_class(rjob, "job_file") expect_s3_class(rjob, "job_r_file") expect_equal(evaluate(rjob), 10L) }) context("Detecting dependencies") test_that("node names are identified in R expressions", { rexpr1 <- expression({.RFLOW[["ENV.node1"]]}) expect_identical("ENV.node1", detect_deps(rexpr1, "ENV.node1")) rexpr2 <- parse(text = '#.RFLOW[["ENV.node1"]]') expect_identical(character(), detect_deps(rexpr2, "ENV.node1")) })
/tests/testthat/test_04_verification.R
permissive
vh-d/Rflow
R
false
false
997
r
context("Creating jobs") test_that("R jobs can be created", { rjob <- job_r({1+1}) expect_s3_class(rjob, "job") expect_s3_class(rjob, "job_r") expect_identical(evaluate(rjob), 2) rjob <- job_r(function() 1+1) expect_s3_class(rjob, "job") expect_s3_class(rjob, "job_r") expect_identical(evaluate(rjob), 2) rjob <- job_r("1+1") expect_s3_class(rjob, "job") expect_s3_class(rjob, "job_r") expect_identical(evaluate(rjob), 2) rjob <- job_r_file("../jobs/rjob1.R") expect_s3_class(rjob, "job") expect_s3_class(rjob, "job_r") expect_s3_class(rjob, "job_file") expect_s3_class(rjob, "job_r_file") expect_equal(evaluate(rjob), 10L) }) context("Detecting dependencies") test_that("node names are identified in R expressions", { rexpr1 <- expression({.RFLOW[["ENV.node1"]]}) expect_identical("ENV.node1", detect_deps(rexpr1, "ENV.node1")) rexpr2 <- parse(text = '#.RFLOW[["ENV.node1"]]') expect_identical(character(), detect_deps(rexpr2, "ENV.node1")) })
# Plot function plotComparison <- function(x, ks, tocompareName, ntimes = 100, threshold = 800){ # Define times as x-labels for each k value times <- sapply(ks, function(k) { # Create labels and extract the relevant information op <- microbenchmark( New_PIKK = PIKKVF(x, k, threshold), RSample = RSample(x, k), times = ntimes ) by(op$time, op$expr, function(t) mean(t) / 1000) } ) # Transponse k-values and create a data frame times <- t(times) times <- as.data.frame(cbind(times, k = ks)) # Define the time values and keys of the plot times <- gather(times, -k, key = "fun", value = "time") pd <- position_dodge(width = 0.2) # Create the plot ggplot(times, aes(x = k, y = time, group = fun, color = fun)) + theme(axis.line = element_line(size = 1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + geom_point(position = pd) + geom_line(position = pd) + xlab("k Values") + ylab("Time [microseconds]") + ggtitle(paste("PIKK Comparison [x size: ", as.character(length(x)), "]", sep = "")) } # Plot function (With original PIKK) plotComparisonAll <- function(x, ks, tocompareName, ntimes = 100, threshold = 800){ # Define times as x-labels for each k value times <- sapply(ks, function(k) { # Create labels and extract the relevant information op <- microbenchmark( PIKK = PIKK(x, k), New_PIKK = PIKKVF(x, k, threshold), RSample = RSample(x, k), times = ntimes ) by(op$time, op$expr, function(t) mean(t) / 1000) } ) # Transponse k-values and create a data frame times <- t(times) times <- as.data.frame(cbind(times, k = ks)) # Define the time values and keys of the plot times <- gather(times, -k, key = "fun", value = "time") pd <- position_dodge(width = 0.2) # Create the plot ggplot(times, aes(x = k, y = time, group = fun, color = fun)) + theme(axis.line = element_line(size = 1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + geom_point(position = pd) + geom_line(position = pd) + xlab("k Values") + ylab("Time [microseconds]") + ggtitle(paste("PIKK Comparison [x size: ", as.character(length(x)), "]", sep = "")) } # Plot function (With original PIKK) plotComparisonPIKKAll_A <- function(x, ks, tocompareName, ntimes = 100, threshold = 800){ # Define times as x-labels for each k value times <- sapply(ks, function(k) { # Create labels and extract the relevant information op <- microbenchmark( PIKK = PIKK(x, k), New_PIKK1 = PIKKV1(x, k), New_PIKK2 = PIKKV2(x, k), New_PIKKVF = PIKKVF(x, k, threshold), RSample = RSample(x, k), times = ntimes ) by(op$time, op$expr, function(t) mean(t) / 1000) } ) # Transponse k-values and create a data frame times <- t(times) times <- as.data.frame(cbind(times, k = ks)) # Define the time values and keys of the plot times <- gather(times, -k, key = "fun", value = "time") pd <- position_dodge(width = 0.2) # Create the plot ggplot(times, aes(x = k, y = time, group = fun, color = fun)) + theme(axis.line = element_line(size = 1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + geom_point(position = pd) + geom_line(position = pd) + xlab("k Values") + ylab("Time [microseconds]") + ggtitle(paste("PIKK Comparison [x size: ", as.character(length(x)), "]", sep = "")) }
/ps4/plotPIKKVF.R
no_license
cpaismz89/stats243
R
false
false
4,099
r
# Plot function plotComparison <- function(x, ks, tocompareName, ntimes = 100, threshold = 800){ # Define times as x-labels for each k value times <- sapply(ks, function(k) { # Create labels and extract the relevant information op <- microbenchmark( New_PIKK = PIKKVF(x, k, threshold), RSample = RSample(x, k), times = ntimes ) by(op$time, op$expr, function(t) mean(t) / 1000) } ) # Transponse k-values and create a data frame times <- t(times) times <- as.data.frame(cbind(times, k = ks)) # Define the time values and keys of the plot times <- gather(times, -k, key = "fun", value = "time") pd <- position_dodge(width = 0.2) # Create the plot ggplot(times, aes(x = k, y = time, group = fun, color = fun)) + theme(axis.line = element_line(size = 1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + geom_point(position = pd) + geom_line(position = pd) + xlab("k Values") + ylab("Time [microseconds]") + ggtitle(paste("PIKK Comparison [x size: ", as.character(length(x)), "]", sep = "")) } # Plot function (With original PIKK) plotComparisonAll <- function(x, ks, tocompareName, ntimes = 100, threshold = 800){ # Define times as x-labels for each k value times <- sapply(ks, function(k) { # Create labels and extract the relevant information op <- microbenchmark( PIKK = PIKK(x, k), New_PIKK = PIKKVF(x, k, threshold), RSample = RSample(x, k), times = ntimes ) by(op$time, op$expr, function(t) mean(t) / 1000) } ) # Transponse k-values and create a data frame times <- t(times) times <- as.data.frame(cbind(times, k = ks)) # Define the time values and keys of the plot times <- gather(times, -k, key = "fun", value = "time") pd <- position_dodge(width = 0.2) # Create the plot ggplot(times, aes(x = k, y = time, group = fun, color = fun)) + theme(axis.line = element_line(size = 1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + geom_point(position = pd) + geom_line(position = pd) + xlab("k Values") + ylab("Time [microseconds]") + ggtitle(paste("PIKK Comparison [x size: ", as.character(length(x)), "]", sep = "")) } # Plot function (With original PIKK) plotComparisonPIKKAll_A <- function(x, ks, tocompareName, ntimes = 100, threshold = 800){ # Define times as x-labels for each k value times <- sapply(ks, function(k) { # Create labels and extract the relevant information op <- microbenchmark( PIKK = PIKK(x, k), New_PIKK1 = PIKKV1(x, k), New_PIKK2 = PIKKV2(x, k), New_PIKKVF = PIKKVF(x, k, threshold), RSample = RSample(x, k), times = ntimes ) by(op$time, op$expr, function(t) mean(t) / 1000) } ) # Transponse k-values and create a data frame times <- t(times) times <- as.data.frame(cbind(times, k = ks)) # Define the time values and keys of the plot times <- gather(times, -k, key = "fun", value = "time") pd <- position_dodge(width = 0.2) # Create the plot ggplot(times, aes(x = k, y = time, group = fun, color = fun)) + theme(axis.line = element_line(size = 1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + geom_point(position = pd) + geom_line(position = pd) + xlab("k Values") + ylab("Time [microseconds]") + ggtitle(paste("PIKK Comparison [x size: ", as.character(length(x)), "]", sep = "")) }
x <- seq(-3, 3, length = 300) y <- dnorm(x) plot(x, dnorm(x), type = 'l', axes = F, xlab = '', ylab = '') abline(h = 0) x <- seq(-1.5, 1.5, length = 100) y <- dnorm(x) polygon(c(-1.5, x, 1.5), c(0, y, 0), border = NA, col = 8) x <- seq(-3, -2, length = 100) y <- dnorm(x) polygon(c(-3, x, -2), c(0, y, 0), border = NA, col = 'green') x <- seq(2, 3, length = 100) y <- dnorm(x) polygon(c(2, x, 3), c(0, y, 0), border = NA, col = 'green')
/code/fraud.R
permissive
gaolei786/gaolei786.github.com
R
false
false
440
r
x <- seq(-3, 3, length = 300) y <- dnorm(x) plot(x, dnorm(x), type = 'l', axes = F, xlab = '', ylab = '') abline(h = 0) x <- seq(-1.5, 1.5, length = 100) y <- dnorm(x) polygon(c(-1.5, x, 1.5), c(0, y, 0), border = NA, col = 8) x <- seq(-3, -2, length = 100) y <- dnorm(x) polygon(c(-3, x, -2), c(0, y, 0), border = NA, col = 'green') x <- seq(2, 3, length = 100) y <- dnorm(x) polygon(c(2, x, 3), c(0, y, 0), border = NA, col = 'green')
\name{NetSelectivity} \alias{NetSelectivity} \title{Net selectivity of the return distribution} \usage{ NetSelectivity(Ra, Rb, Rf = 0, ...) } \arguments{ \item{Ra}{an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns} \item{Rb}{return vector of the benchmark asset} \item{Rf}{risk free rate, in same period as your returns} \item{\dots}{any other passthru parameters} } \description{ Net selectivity is the remaining selectivity after deducting the amount of return require to justify not being fully diversified } \details{ If net selectivity is negative the portfolio manager has not justified the loss of diversification \deqn{Net selectivity = \alpha - d}{Net selectivity = Selectity - diversification} where \eqn{\alpha} is the selectivity and \eqn{d} is the diversification } \examples{ data(portfolio_bacon) print(NetSelectivity(portfolio_bacon[,1], portfolio_bacon[,2])) #expected -0.017 data(managers) print(NetSelectivity(managers['1996',1], managers['1996',8])) print(NetSelectivity(managers['1996',1:5], managers['1996',8])) } \author{ Matthieu Lestel } \references{ Carl Bacon, \emph{Practical portfolio performance measurement and attribution}, second edition 2008 p.78 } \keyword{distribution} \keyword{models} \keyword{multivariate} \keyword{ts}
/man/NetSelectivity.Rd
no_license
guillermozbta/portafolio-master
R
false
false
1,304
rd
\name{NetSelectivity} \alias{NetSelectivity} \title{Net selectivity of the return distribution} \usage{ NetSelectivity(Ra, Rb, Rf = 0, ...) } \arguments{ \item{Ra}{an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns} \item{Rb}{return vector of the benchmark asset} \item{Rf}{risk free rate, in same period as your returns} \item{\dots}{any other passthru parameters} } \description{ Net selectivity is the remaining selectivity after deducting the amount of return require to justify not being fully diversified } \details{ If net selectivity is negative the portfolio manager has not justified the loss of diversification \deqn{Net selectivity = \alpha - d}{Net selectivity = Selectity - diversification} where \eqn{\alpha} is the selectivity and \eqn{d} is the diversification } \examples{ data(portfolio_bacon) print(NetSelectivity(portfolio_bacon[,1], portfolio_bacon[,2])) #expected -0.017 data(managers) print(NetSelectivity(managers['1996',1], managers['1996',8])) print(NetSelectivity(managers['1996',1:5], managers['1996',8])) } \author{ Matthieu Lestel } \references{ Carl Bacon, \emph{Practical portfolio performance measurement and attribution}, second edition 2008 p.78 } \keyword{distribution} \keyword{models} \keyword{multivariate} \keyword{ts}
########################################################################################## # # MMC Network & Acquisitions # # Create acquisition logit model covariates arrays # # @author sdowning.bm02g@nctu.edu.tw # # @export [list] cb # # # ## update founded_on,closed_on dates - Jin-Su's Email 2018-04-23 # ## C:\\Users\\T430\\Google Drive\\PhD\\Dissertation\\competition networks\\compnet2\\founded_on_date_edit # co.date <- cb$readCsv('founded_on_date_edit/missing_companies_20180330.csv') # ########################################################################################## library(network, quietly = T) library(texreg, quietly = T) library(igraph, quietly = T) library(plyr, quietly = T) library(reshape2) library(intergraph) ##=================== ## DIRECTORIES ##------------------- .compustat_dir <- '/home/sdowning/data/compustat' ##=================== ## PARAMS ##------------------- ### ABSORB LEVELS -- ego net order of acquisition target ### 1 = only direct competitors of acquisition target ### 2 = direct and 2nd-order indirect competitors of acquisition target ### ... etc. absorb.levels <- 1 ##-------------------------------------------------------------- ## ## CREATE FIRM NETWORK PERIOD LISTS ## ##-------------------------------------------------------------- ## get date periods and ego network based on settings times <- sapply(years, function(x)paste0(x,'-01-01')) start <- times[1] end <- times[length(times)] ## EGO NETWORK g.ego <- igraph::make_ego_graph(graph = g.full, nodes = V(g.full)[V(g.full)$name==name_i], order = d, mode = 'all')[[1]] ## NETWORKS IN TIMEFRAME TO PROCESS NODE COLLAPSE AND POCESS COVARIATES g.pd <- acf$makePdGraph(g.ego, start, end, isolates.remove=TRUE) ## ego network g.full.pd <- acf$makePdGraph(g.full, start, end, isolates.remove=TRUE) ## full network ## CHECK NETWORK PERIOD SIZES sapply(2:length(times), function(i){gi=acf$makePdGraph(g.ego, times[i-1], times[i], TRUE); return(c(e=ecount(gi),v=vcount(gi)))}) ##-------------------------------------------------- ## LOAD IN EGO NETWORK AND GLOBAL NETWORK ##-------------------------------------------------- g.pd.file <- file.path(.data_dir,sprintf('g_%s_d%s_NCINIT_%s_%s.graphml',name_i,d,start,end)) g.full.pd.file <- file.path(.data_dir,sprintf('g_full_NCINIT_%s_%s.graphml',start,end)) cat(sprintf('qsub_2: loading graphml files %s %s',g.pd.file, g.full.pd.file)) g.pd <- igraph::read.graph(g.pd.file, format='graphml') g.full.pd <- igraph::read.graph(g.full.pd.file, format='graphml') ## Full timeframe Clusters V(g.pd)$nc <- as.integer(igraph::multilevel.community(g.pd)$membership) V(g.full.pd)$nc <- as.integer(igraph::multilevel.community(g.full.pd)$membership) # ## assign UUIDs if not exists # idx.uuid.na <- which(is.na(V(g.pd)$company_uuid) | V(g.pd)$company_uuid=="") # for (idx in idx.uuid.na) { # V(g.pd)$company_uuid[idx] <- cb$uuid() # } # idx.full.uuid.na <- which(is.na(V(g.full.pd)$company_uuid) | V(g.full.pd)$company_uuid=="") # for (idx in idx.full.uuid.na) { # V(g.full.pd)$company_uuid[idx] <- cb$uuid() # } ## cache original clusters V(g.pd)$nc.orig <- V(g.pd)$nc V(g.full.pd)$nc.orig <- V(g.full.pd)$nc ## keep original timeframe graph g.pd.orig <- g.pd g.full.pd.orig <- g.full.pd ##---------------------------------- ##============================================ ## ## DATA FOR CONTROLS AND PROPENSITY SCORES ## ##-------------------------------------------- ##-------------------------------------------- ## Load updated compustat data ##-------------------------------------------- csfunda.file <- file.path(.compustat_dir,'fundamentals-annual-UPDATED.csv') if (!file.exists(csfunda.file)) { ## if file not exists, then run script to create it stop(sprintf('stop: cannot find csfunda.file %s',csfunda.file)) #source(file.path(.script_dir,'acqlogit_compustat_update.R')) } csa2.all <- cb$readCsv(csfunda.file) minyr <- min(unique(csa2.all$fyear), na.rm = T) csa2 <- csa2.all[which(csa2.all$fyear != minyr & !is.na(csa2.all$fyear)), ] ##-------------------------------------------- ## LOAD SEGMENTS DATA FOR DIVERSIFICATION ##-------------------------------------------- seg <- read.csv(file.path(.compustat_dir,'segments.csv'), na=c(NA,'','NA'), stringsAsFactors = F, fill = T) # segcus <- read.csv(file.path(getwd(),'compustat','segments-customer.csv'), na=c(NA,'','NA'), stringsAsFactors = F, fill = T) col.seg <- c('conm','tic','datadate','srcdate','stype','snms','soptp1','geotp','sic','SICS1','SICS2','sales','revts','nis') seg2 <- seg[seg$soptp1=='PD_SRVC',col.seg] ## exclude geographic MARKET segments; include PD_SRVC product/service segments seg2$date <- sapply(seg2$datadate, function(x){ x <- as.character(x) return(sprintf('%s-%s-%s',str_sub(x,1,4),str_sub(x,5,6),str_sub(x,7,8))) }) seg2$year <- sapply(seg2$date, function(x)as.integer(str_sub(x,1,4))) # ### # print(head(seg2[which(seg2$tic=='GOOGL'),],12)) # View(seg2[which(seg2$tic=='GOOGL'),]) ##-------------------------------------------- ## MERGE Compustat Data into Compnet Dataframe ##-------------------------------------------- ## EGO GRAPH VERTEX DATARAME MERGE WITH COMPUSTAT DATA g.full.pd.df <- as_data_frame(g.full.pd, what = 'vertices') ## rename graph vertex name to company_name_unique names(g.full.pd.df)[which(names(g.full.pd.df)=='name')] <- 'company_name_unique' ## crunchbase ipo data fields for crunchbase compnet firms ipocols <- c('company_name_unique','stock_symbol','stock_exchange_symbol','went_public_on') g.full.pd.df <- merge(g.full.pd.df, cb$co_ipo[,ipocols], by.x='company_name_unique', by.y='company_name_unique', all.x=T, all.y=F) ##============================================ ## MANUAL CORRECTIONS (COMPUSTAT STOCK SYMBOLS CHANGED AFTER CRUNCHBASE DATA) ##-------------------------------------------- # ## SEARCH COMPUSTAT NAMES # csa2[grep('SONY',csa2$conm),] # ## SEARCH COMPUSTAT TICKER SYMBOLS # csa2[grep('software',csa2$conm,ignore.case = T),c('conm','tic')] # ## SEARCH CRUNCHBASE IPOS # cb$co_ipo[grep('software-ag',cb$co_ipo$company_name_unique),c('company_name_unique','stock_symbol','stock_exchange_symbol')] # # > unique(as.character(df.sub$i[is.na(df.sub$roa)])) # # [1] "ask-com" ?? "bazaarvoice"- # # [3] "bmc-software"- "compuware"- # # [5] "csc"-[bought by DXC] "forcepoint"- # # [7] "fujitsu"- "google"- # # [9] "htc" ?? "mcafee"- # # [11] "naspers"- "netsuite" # # [13] "opera-software"- "qlik-technologies"- # # [15] "responsys"- "rightnow-technologies"- # # [17] "samsung-electronics"?? "servicepower" ?? # # [19] "siemens"- "software-ag" # # [21] "solarwinds"- "sony"- ### MAP: [COMPUSTAT]::CONM |--> [CrunchBase]::ipo.stock_symbol ### to replace the COMPUSTAT `tic` value with the CrunchBase `stock_symbol` ### in order to merge COMPUSTAT financials into CrunchBase firm data cs.conm_cb.stock <- c( `ALPHABET INC`='GOOG', `SONY CORP`='6758', ## Tokyo exchange symbol -- just using it here to map Compustat data to CrunchBase for Sony `BMC SOFTWARE INC`='BMC', `DXC TECHNOLOGY COMPANY`='csc', ## CSC was acquired by DXC `FUJITSU LTD`='6702', ## Tokyo exchange symbol `NASPERS LTD`='NPN', ## Johannesburg stock exchange `OPERA LTD -ADR`='OPESF', `RESPONSYS INC`='MKTG', `SIEMENS AG`='SIEMENS', `SOLARWINDS INC`='SWI', `SONY CORP SNE`='6758', `BAZAARVOICE INC`='BV', `COMPUWARE CORP`='CPWR', `WEBSENSE INC`='WBSN', ##Forcepoint or Websense ? `MCAFEE INC`='MFE', `QLIK TECHNOLOGIES INC`='QLIK', `RIGHTNOW TECHNOLOGIES INC`='RNOW' ) csa2$stock_symbol <- csa2$tic for (conm in names(cs.conm_cb.stock)) { csa2$stock_symbol[csa2$conm==conm] <- cs.conm_cb.stock[conm] ## set the } ##=============================== ## MERGE COMPUSTAT DATA INTO CRUNCHBASE DATA ##------------------------------- ## merge in COMPUSTAT data by stock_exchange symbol csa2.tmp <- csa2[csa2$stock_symbol %in% g.full.pd.df$stock_symbol & !is.na(csa2$stock_symbol),] df.cs <- merge(g.full.pd.df, csa2.tmp, by.x='stock_symbol',by.y='stock_symbol', all.x=T, all.y=F) ## SEGMENTS DATA company_name_unique to merge df.conm.u <- data.frame() for (conm in unique(df.cs$conm)) { names <- df.cs[which(df.cs$conm==conm),'company_name_unique'] df.conm.u <- rbind(df.conm.u, data.frame(conm=conm, company_name_unique=unique(names)[1]) ) } ## drop NA df.conm.u <- na.omit(df.conm.u) ## MERGE IN company_name_unique seg3 <- merge(seg2, df.conm.u, by.x='conm', by.y='conm', all.x=T, all.y=F) ## filter only segments data for firms with company_name_unique matched from CrunchBase data seg4 <- seg3[which(!is.na(seg3$company_name_unique)),] ##=================================== ## ## AQUISITIONS FILTER ## ##----------------------------------- ## GET ALL ACQ EVENT VERTICES ## keep only acquisitions with acquirer in ego network and target in global competition network acq.src <- co_acq[ co_acq$acquirer_name_unique %in% V(g.pd)$name & co_acq$acquiree_name_unique %in% V(g.full.pd)$name, ] acq.src.allpd <- acq.src[ acq.src$acquired_on >= start & acq.src$acquired_on < end , ] acq.src.allpd <- acq.src.allpd[order(acq.src.allpd$acquired_on, decreasing = F), ] ##--------------------------------------------- ## LOAD DATA AFTER PROPENSITIES ARE COMPUTED ##--------------------------------------------- l.prop <- readRDS(file.path(.data_dir, sprintf('acqlogit_propensity_score_comp_list_%s_d%s_ctrl.rds',name_i,d))) g.prop <- l.prop$g.prop g.full.prop <- l.prop$g.full.prop # g.prop.nc <- l.prop$g.prop.nc ## ? # g.full.prop.nc <- l.prop$g.full.prop.nc ## ? a.df <- l.prop$a.df a.df.ctrl <- l.prop$a.df.ctrl t.df <- l.prop$t.df a.prop <- l.prop$a.prop t.prop <- l.prop$t.prop ##============================================= ## YEAR PERIODS: DEFINE NICHE CLUSTERS ##--------------------------------------------- l <- list() l.cov <- list() ## covariates data to compute regression dataframe df.mmc <- data.frame() df.rem <- data.frame() # df.reg <- data.frame() ## replaced by l.cov lidx <- 0 ## acquisition list index timeval <- timeval.last <- 0 ## set graphs to use to process node collapse from g.pd.nc <- g.pd.orig ## ego network to node collapse for network covariates g.full.pd.nc <- g.full.pd.orig ## full netowrk to node collapse for network covariates ##=============================== ## ## MAIN LOOP: COMPUTE COVARIATES ## ##------------------------------- ## ACQUISITION EVENTS: UPDATE MMC & DYNAMIC EFFs do.node.collapse <- TRUE ## START WITH FALSE AND SET TO TRUE ON FIRST LOOP for (j in 1:nrow(acq.src.allpd)) { df.acq.j <- acq.src.allpd[j,] ## this acquisition row in the acquisition dataframe date_j <- acq.src.allpd$acquired_on[j] year_j <- as.integer(str_sub(date_j,1,4)) uuid_j <- df.acq.j$acquisition_uuid ## g.pd d2 updated each acquisition ## g.pd.orig d2 original ## g.full.pd.orig global network within timeframe start, end cat(sprintf('\n\n%s %s-->%s: acquisition %s (%.2f%s)\n\n',date_j, df.acq.j$acquirer_name_unique, df.acq.j$acquiree_name_unique, j,100*j/nrow(acq.src.allpd),'%')) ##------------------------------------------- ## NODE COLLAPSE PREVIOUS ACQUISITION IF IT WAS SKIPPED ##------------------------------------------- if (do.node.collapse & j > 1) { cat(sprintf('node collapsing previous skipped acquisition %s:\n',(j-1))) g.pd.nc <- acf$nodeCollapseGraph(g.pd.nc, acq.src.allpd[(j-1),]) # g.pd.nc <- acf$nodeCollapseGraphAbsorbSubgraph(g.pd.nc, g.full.pd.nc, acq.src.allpd[(j-1),], absorb.levels) g.full.pd.nc <- acf$nodeCollapseGraph(g.full.pd.nc, acq.src.allpd[(j-1),]) ## FLAG TO NODE COLLAPSE NEXT LOOP do.node.collapse <- TRUE } else { ## DONT NODE COLLAPSE PREVIOUS ACQUISITION IF IT WAS ALREADY NODE COLLAPSED (NOT SKIPPED) ## FLAG TO NODE COLLAPSE NEXT LOOP do.node.collapse <- TRUE } ##------------------------------------------- ## CHECKS TO SKIP THIS ACQUISITION ##------------------------------------------- ## ACQUISITION MUST BE IN PROPENSITY SCORES DATAFRAMES if ( !(uuid_j %in% a.prop$acquisition_uuid) | !(uuid_j %in% t.prop$acquisition_uuid)) next # SKIP IF ALL ACQUIRER ALTERNATIVES HAVE NO COMPUSTAT FINANICALS (check m2b all NA) if (all(is.na(a.prop$m2b[which(a.prop$acquisition_uuid==uuid_j)]))) next ## SKIP IF EITHER ACQUIRER OR TARGET IS NOT IN NETWORK if ( !(acq.src.allpd$acquiree_name_unique[j] %in% V(g.full.pd.nc)$name) ) next if ( !(acq.src.allpd$acquirer_name_unique[j] %in% V(g.pd.nc)$name) ) next ## SKIP IF ACQUIRER IS NOT PUBLIC isPublicAcq <- (acq.src.allpd$acquirer_name_unique[j] %in% cb$co_ipo$company_name_unique & cb$co_ipo$went_public_on[cb$co_ipo$company_name_unique==acq.src.allpd$acquirer_name_unique[j]] <= acq.src.allpd$acquired_on[j]) if (length(isPublicAcq)==0) next if ( ! isPublicAcq) next lidx <- length(l) + 1 l[[lidx]] <- list() ##------------------------------------- ## Absorb subgraph from global network to ego firm network, if not exists ##------------------------------------- ## cache network before absorbing target subgraph to compute counterfactual newtorks g.pd.nc.cf <- g.pd.nc if ( ! acq.src.allpd$acquiree_uuid[j] %in% V(g.pd.nc)$company_uuid) { targ.g.full.pd.nc.vid <- which(V(g.full.pd.nc)$company_uuid == acq.src.allpd$acquiree_uuid[j]) ## target subgraph from global network (using absorb.levels number of indirect competitor levels) g.full.pd.nc.sub.l <- igraph::make_ego_graph(graph=g.full.pd.nc, order=absorb.levels, nodes=targ.g.full.pd.nc.vid) ## only absorb if target subgraph exists if (length(g.full.pd.nc.sub.l)>0 & class(g.full.pd.nc.sub.l[[1]])=='igraph') { g.full.pd.nc.sub <- g.full.pd.nc.sub.l[[1]] ## igraph "+" operator combines graphs .verts1 <- as_data_frame(g.pd.nc, "vertices") .verts2 <- as_data_frame(g.full.pd.nc.sub, "vertices") .names.2.add.1 <- .verts2$name[which( ! .verts2$name %in% .verts1$name)] .names.2.add.1 <- .names.2.add.1[!is.na(.names.2.add.1)] for (.name in .names.2.add.1) { vr <- nrow(.verts1)+1 .verts1[vr, ] <- NA for (col in names(.verts1)) { if (col %in% names(.verts2)) { .verts1[vr, col] <- .verts2[which(.verts2$name==.name), col] } } } .verts <- unique(.verts1) .el <- rbind(as_data_frame(g.pd.nc), as_data_frame(g.full.pd.nc.sub)) g.pd.nc <- graph_from_data_frame(d = .el, directed = FALSE, vertices = .verts) } } ##------------------------------------- ## Set Network Cluster for period network (after having node collapsed) ##------------------------------------- V(g.pd.nc)$nc <- as.integer(igraph::multilevel.community(g.pd.nc)$membership) V(g.full.pd.nc)$nc <- as.integer(igraph::multilevel.community(g.full.pd.nc)$membership) ##===================================== ## Subset Year Period Network ##------------------------------------- cat(' subsetting network edges for year period of acquisition...') ## Period network removes competitive relations ended < year_j OR started >= year_j+1 g.pd <- asIgraph(acf$makePdNetwork(asNetwork(g.pd.nc), year_j-1, year_j+1, isolates.remove = F)) g.full.pd <- asIgraph(acf$makePdNetwork(asNetwork(g.full.pd.nc), year_j-1, year_j+1, isolates.remove = F)) V(g.pd)$name <- V(g.pd)$vertex.names V(g.full.pd)$name <- V(g.full.pd)$vertex.names cat('done.') ##------------------------------- ## Compute Focal Firm Ego Network MMC Measures ##------------------------------- ## GET FIRM x FRIM MMC MATRIX TO USE IN FM-MMC COMPUTATION m.mmc <- acf$getFirmFirmMmc(g.pd, as.integer(V(g.pd)$nc)) ## Update MMC after acquisition l[[lidx]]$mmc <- acf$getFmMmc(g.pd, as.integer(V(g.pd)$nc)) ## MMC degree: number of mmc dyads linked to each firm i V(g.pd)$num.mmc.comps <- acf$getNumMmcRivalsByMembership(g.pd, as.integer(V(g.pd)$nc), m.mmc) ## SUM FM MMC over markets ?????? V(g.pd)$fm.mmc.sum <- rowSums(l[[lidx]]$mmc) V(g.pd)$num.mkts <- apply(l[[lidx]]$mmc, MARGIN=1, FUN=function(x){ return(length(x[x>0])) }) ##----------------------------------------- ## GET DATAFRAME VARS ##----------------------------------------- # ## Acquirer d2 original org.vid # xi.new.vid <- which(V(g.pd)$name == acq.src.allpd$acquirer_name_unique[j]) # ## target d2 original org.vid # xj.new.vid <- which(V(g.pd)$name == acq.src.allpd$acquiree_name_unique[j]) ## acquirer d2 t=j id xi <- which(V(g.pd)$name==acq.src.allpd$acquirer_name_unique[j]) ## target d2 t=j id xj <- which(V(g.pd)$name==acq.src.allpd$acquiree_name_unique[j]) ## CHECKS if (length(xi)==0) stop(sprintf('acquirer firm `%s` not in g.pd focal firm ego network\n',acq.src.allpd$acquirer_name_unique[j])) if (length(xj)==0) stop(sprintf('target firm `%s` not in g.pd focal firm ego network\n',acq.src.allpd$acquiree_name_unique[j])) # ## acquirer id in original graph (at start of period) # xi.orig <- as.integer(V(g.pd.orig)[V(g.pd.orig)$name==acq.src.allpd$acquirer_name_unique[j]]) # xi.nc <- as.integer(V(g.pd.orig)$nc[xi.orig]) ## original nc for the period xi.nc <- V(g.pd)$nc[xi] # xi.mmc.sum <- V(g.pd)$fm.mmc.sum[xi] xi.num.mkts <- V(g.pd)$num.mkts[xi] num.mmc.comps <- V(g.pd)$num.mmc.comps[xi] # ## # xj.orig <- ifelse( !is.na(xj.orig.vid), as.integer(V(g.pd.orig)[V(g.pd.orig)$orig.vid==xj.orig.vid]), NA) # xj.orig <- ifelse(length(xj.orig) > 1, xj.orig, NA) # xj.nc <- ifelse(length(xj)==0,NA, V(g.pd.orig)$nc[xj.orig] ) ## original nc for the period xj.nc <- V(g.pd)$nc[xj] ##-------------------------------------- ## ## TARGET ALTERNATIVES SET ## ##-------------------------------------- ## SELECT FROM PROPENSITY SCORES (if alternatives more than 5) t.prop.j <- t.prop[which(t.prop$acquisition_uuid==df.acq.j$acquisition_uuid & !is.na(t.prop$pred)),] t.prop.j <- t.prop.j[order(t.prop.j$pred, decreasing = T), ] if (nrow(t.prop.j)==0) { next } else if (nrow(t.prop.j)>6) { idx.1 <- which(t.prop.j$y==1) idx.0 <- which(t.prop.j$y==0) idx.0.sample <- idx.0[1:min(5,length(idx.0))] alt.targ.names <- t.prop.j$company_name_unique[c(idx.1, idx.0.sample)] } else { alt.targ.names <- t.prop.j$company_name_unique } ## ACTUAL TARGET ID targ.id <- xj ## START TARGET ALTERNATIVES DATAFRAME df.targ.alt <- cb$co[which(cb$co$company_name_unique %in% alt.targ.names),] ## MERGE IN y and d df.targ.alt <- merge(df.targ.alt, t.prop.j[,c('company_name_unique','y','d')], by.x='company_name_unique',by.y='company_name_unique',all.x=T,all.y=F) ## ipo status df.targ.alt$is.public <- sapply(1:nrow(df.targ.alt), function(x){ isNotOperating <- df.targ.alt$status[x] != 'operating' ipo.date <- cb$co_ipo$went_public_on[which(cb$co_ipo$company_name_unique == df.targ.alt$company_name_unique[x])] if (length(ipo.date)>1) ipo.date <- min(ipo.date, na.rm=TRUE) if (length(ipo.date)<1) return(0) return(ifelse( isNotOperating & ipo.date <= date_j, 1, 0)) }) ## set is.public NAs = 0 df.targ.alt$is.public[is.na(df.targ.alt$is.public)] <- 0 ## target had IPO df.targ <- df.targ.alt[which(df.targ.alt$company_name_unique == V(g.pd)$name[targ.id]), ] if (nrow(df.targ) != 1) next ## select based on ownership status df.targ.alt <- df.targ.alt[which(df.targ.alt$is.public == df.targ$is.public),] ## add MMC df.targ.alt$fm.mmc.sum <- sapply(df.targ.alt$company_name_unique, function(name){ ifelse(name %in% V(g.pd)$name, V(g.pd)$fm.mmc.sum[which(V(g.pd)$name == name)] , NA) }) df.targ.alt$num.mkts <- sapply(df.targ.alt$company_name_unique, function(name){ ifelse(name %in% V(g.pd)$name, V(g.pd)$num.mkts[which(V(g.pd)$name == name)] , NA) }) df.targ.alt$num.mmc.comps <- sapply(df.targ.alt$company_name_unique, function(name){ ifelse(name %in% V(g.pd)$name, V(g.pd)$num.mmc.comps[which(V(g.pd)$name == name)] , NA) }) ## ACQUISITIONS #df.targ.alt$acqs <- unname(sapply(df.targ.alt$company_name_unique, function(name){ # length(which(cb$co_acq$acquirer_name_unique==name & cb$co_acq$acquired_on <= date_j)) # })) df.targ.alt$acqs <- unname(sapply(df.targ.alt$company_name_unique, function(name){ x <- length(which(cb$co_acq$acquirer_name_unique==name & cb$co_acq$acquired_on <= date_j)) n <- length(which(cb$co_acq$acquirer_name_unique %in% V(g.full.pd)$name & cb$co_acq$acquired_on <= date_j)) return(x/n) })) ## VENTURE FUNDING df.targ.alt$fund.v.cnt <- unname(sapply(df.targ.alt$company_name_unique, function(name){ length(which(cb$co_rou$company_name_unique==name & cb$co_rou$announced_on <= date_j & cb$co_rou$funding_round_type=='venture')) })) df.targ.alt$fund.v.amt <- unname(sapply(df.targ.alt$company_name_unique, function(name){ idx <- which(cb$co_rou$company_name_unique==name & cb$co_rou$announced_on <= date_j & cb$co_rou$funding_round_type=='venture') return(sum(cb$co_rou$raised_amount_usd[idx], na.rm = T)) })) ## ALL FUNDING df.targ.alt$fund.cnt <- unname(sapply(df.targ.alt$company_name_unique, function(name){ length(which(cb$co_rou$company_name_unique==name & cb$co_rou$announced_on <= date_j)) })) df.targ.alt$fund.amt <- unname(sapply(df.targ.alt$company_name_unique, function(name){ idx <- which(cb$co_rou$company_name_unique==name & cb$co_rou$announced_on <= date_j) return(sum(cb$co_rou$raised_amount_usd[idx], na.rm = T)) })) ## USE EGO and GLOBAL NETWORK for DEGREE df.targ.alt$deg <- sapply(df.targ.alt$company_name_unique, function(name){ ifelse(name %in% V(g.pd)$name, igraph::degree(g.pd,which(V(g.pd)$name==name)) , NA) }) df.targ.alt$deg.full <- sapply(df.targ.alt$company_name_unique, function(name){ ifelse(name %in% V(g.full.pd)$name, igraph::degree(g.full.pd,which(V(g.full.pd)$name==name)) , NA) }) ##---------------------------------------------------------- ## DATA SAMPLE OF ALL ACQUIRERS (REAL + 5 ALTERNATIVES) l[[lidx]]$df.targ.alt <- df.targ.alt ##-------------------------------------- ## ## ACQUIRER ALTERNATIVES SET ## ##-------------------------------------- ## SELECT FROM PROPENSITY SCORES (if alternatives more than 5) a.prop.j <- a.prop[which(a.prop$acquisition_uuid==df.acq.j$acquisition_uuid & !is.na(a.prop$pred)),] a.prop.j <- a.prop.j[order(a.prop.j$pred, decreasing = T), ] if (nrow(a.prop.j)==0) { next } else if (nrow(a.prop.j)>6) { idx.1 <- which(a.prop.j$y==1) idx.0 <- which(a.prop.j$y==0) idx.0.sample <- idx.0[1:min(5,length(idx.0))] alt.acq.names <- a.prop.j$company_name_unique[c(idx.1, idx.0.sample)] } else { alt.acq.names <- a.prop.j$company_name_unique } ## ACTUAL TARGET ID acq.id <- xi ## START TARGET ALTERNATIVES DATAFRAME df.acq.alt <- cb$co[which(cb$co$company_name_unique %in% alt.acq.names),] ## MERGE IN y and d df.acq.alt <- merge(df.acq.alt, a.prop.j[,c('company_name_unique','y','d')], by.x='company_name_unique',by.y='company_name_unique',all.x=T,all.y=F) ## ipo status df.acq.alt$is.public <- sapply(1:nrow(df.acq.alt), function(x){ isNotOperating <- df.acq.alt$status[x] != 'operating' ipo.date <- cb$co_ipo$went_public_on[which(cb$co_ipo$company_name_unique == df.acq.alt$company_name_unique[x])] if (length(ipo.date)>1) ipo.date <- min(ipo.date, na.rm=TRUE) if (length(ipo.date)<1) return(0) return(ifelse( isNotOperating & ipo.date <= date_j, 1, 0)) }) ## set is.public NAs = 0 df.acq.alt$is.public[is.na(df.acq.alt$is.public)] <- 0 ## target had IPO df.acq <- df.acq.alt[which(df.acq.alt$company_name_unique == V(g.pd)$name[acq.id]), ] if (nrow(df.acq) != 1) next ##stop(sprintf('error: nrow df.acq %s > 1',nrow(df.acq))) ## select based on ownership status df.acq.alt <- df.acq.alt[which(df.acq.alt$is.public == df.acq$is.public), ] ## set MMC df.acq.alt$fm.mmc.sum <- sapply(df.acq.alt$company_name_unique, function(name){ ifelse(name %in% V(g.pd)$name, as.numeric(V(g.pd)$fm.mmc.sum[which(V(g.pd)$name == name)]) , NA) }) df.acq.alt$num.mkts <- sapply(df.acq.alt$company_name_unique, function(name){ ifelse(name %in% V(g.pd)$name, as.numeric(V(g.pd)$num.mkts[which(V(g.pd)$name == name)]) , NA) }) df.acq.alt$num.mmc.comps <- sapply(df.acq.alt$company_name_unique, function(name){ ifelse(name %in% V(g.pd)$name, as.numeric(V(g.pd)$num.mmc.comps[which(V(g.pd)$name == name)]) , NA) }) ## ACQUISITIONS #df.acq.alt$acqs <- unname(sapply(df.acq.alt$company_name_unique, function(name){ # length(which(cb$co_acq$acquirer_name_unique==name & cb$co_acq$acquired_on <= date_j)) #})) df.acq.alt$acqs <- unname(sapply(df.acq.alt$company_name_unique, function(name){ x <- length(which(cb$co_acq$acquirer_name_unique==name & cb$co_acq$acquired_on <= date_j)) n <- length(which(cb$co_acq$acquirer_name_unique %in% V(g.full.pd)$name & cb$co_acq$acquired_on <= date_j)) return(x/n) })) ## USE EGO and GLOBAL NETWORK for DEGREE df.acq.alt$deg <- sapply(df.acq.alt$company_name_unique, function(name){ ifelse(name %in% V(g.pd)$name, igraph::degree(g.pd,which(V(g.pd)$name==name)) , NA) }) df.acq.alt$deg.full <- sapply(df.acq.alt$company_name_unique, function(name){ ifelse(name %in% V(g.full.pd)$name, igraph::degree(g.full.pd,which(V(g.full.pd)$name==name)) , NA) }) ## KEEP SAME DIMENSIONS AS TARGET DATAFRAME df.acq.alt$fund.v.cnt <- NA df.acq.alt$fund.v.amt <- NA df.acq.alt$fund.cnt <- NA df.acq.alt$fund.amt <- NA ##---------------------------------------------------------- ## DATA SAMPLE OF ALL ACQUIRERS (REAL + 5 ALTERNATIVES) l[[lidx]]$df.acq.alt <- df.acq.alt ##-------------------------------------- ## ## NETWORK COVARIATES ## ##-------------------------------------- cat('computing network covariates...') df.acq.alt$set <- 'acquirer' df.acq.alt$event <- sapply(df.acq.alt$d, function(d)ifelse(as.integer(d)==0, 1, 0)) df.targ.alt$set <- 'target' df.targ.alt$event <- sapply(df.targ.alt$d, function(d)ifelse(as.integer(d)==0, 1, 0)) df.alt <- rbind(df.acq.alt, df.targ.alt) df.alt$t <- j ## acquisition index df.alt <- df.alt[order(which(V(g.full.pd.orig)$name %in% df.alt$company_name_unique )), ] ## confirm ascencing order if (!all(count(df.alt$set)$freq>1)) { cat('missing alternative match. skipping.\n') next ## SKIP IF NOT AT LEAST 1 ALTERNATIVE FOR ACQUIRER AND TARGET } ##----------------------------------------------- ### Create Diff Graph (removed|acquired nodes are represented as isolates) ## ## ******** TODO ********* ##----------------------------------------------- # vids <- which( V(g.pd)$name %in% df.alt$company_name_unique ) # vids.orig <- which( V(g.pd)$name %in% df.alt$company_name_unique ) # vids.orig.rm <- vids[which( !(vids.orig %in% vids))] # mapping <- V(g.pd)[which(V(g.pd)$orig.vid %in% V(g.full.pd)$orig.vid) ] # g.diff <- igraph::contract.vertices(g.full.pd, mapping = mapping) # V(g.diff)$name <- V(g.full.pd.orig)$name # vids.diff <- as.integer( V(g.diff)[which( V(g.diff)$name %in% df.alt$company_name_unique )] ) ##----------------------------------------------- ## global covars ##----------------------------------------------- cov.vids <- which(V(g.pd)$name %in% df.alt$company_name_unique) tmp.cov <- data.frame( company_name_unique = unlist(V(g.pd)$name[cov.vids]), closeness = unname(igraph::closeness(g.pd, vids = cov.vids,normalized = TRUE)), constraint = unname(unlist(igraph::constraint(g.pd, nodes = cov.vids))) ) df.alt <- merge(df.alt, tmp.cov, by = 'company_name_unique', all.x = T, all.y = F) # ## acquisition experience # df.alt$acq.experience <- unlist(sapply(1:nrow(df.alt), function(x){ return( # nrow(acq.src.allpd[which(acq.src.allpd$acquirer_name_unique == df.alt$company_name_unique[x] # & acq.src.allpd$acquired_on <= date_j), ]) / j ## scale experience to num observed acquisitions # )})) # ## local covars in pd graph # df.alt$fm.mmc.sum <- sapply(df.alt$company_name_unique, function(name){ # ifelse(name %in% V(g.pd)$name, as.numeric(V(g.pd)$fm.mmc.sum[which(V(g.pd)$name == name)]), NA) # }) # df.alt$num.mkts <- sapply(df.alt$company_name_unique, function(name){ # ifelse(name %in% V(g.pd)$name, as.numeric(V(g.pd)$num.mkts[which(V(g.pd)$name == name)]), NA) # }) # df.alt$num.mmc.comps <- sapply(df.alt$company_name_unique, function(name){ # ifelse(name %in% V(g.pd)$name, as.numeric(V(g.pd)$num.mmc.comps[which(V(g.pd)$name == name)]), NA) # }) cat('done.\n') ##--------------------------------------------- ##--------------------------------------------- ## MERGE IN PUBLIC FIRM FINANCIAL CONTROLS ##--------------------------------------------- cat('adding public firm financial controls...') ctrl.col <- c('company_name_unique','datayear','act','emp','ebitda','m2b','che') ctrl.idx <- which(df.cs$datayear == (df.acq.j$acquired_year-1) ) if (length(ctrl.idx)==0) next df.alt <- merge(df.alt, df.cs[ctrl.idx,ctrl.col], by.x='company_name_unique',by.y='company_name_unique',all.x=T,all.y=F) df.alt$ln_asset <- log(df.alt$act) df.alt$ln_emp <- log(df.alt$emp) df.alt$roa <- df.alt$ebitda / df.alt$act df.alt$cash <- df.alt$che / df.alt$act cat('done.\n') ##--------------------------------------------- ## RIVALS RECENT ACQUISITIONS ##--------------------------------------------- cat('computing acquirers rivals recent acquisitions...') df.alt$rival.acq.1 <- NA df.alt$rival.acq.2 <- NA df.alt$rival.acq.3 <- NA for (ri in 1:nrow(df.alt)) { x <- df.alt[ri,] xdate <- as.character(df.acq.j$acquired_on) ## date of this acquitision j if(is.na(xdate)) next parts <- str_split(xdate,'[-]')[[1]] xdate1 <- sprintf('%04d-%s-%s',as.numeric(parts[1])-1,parts[2],parts[3]) xdate2 <- sprintf('%04d-%s-%s',as.numeric(parts[1])-2,parts[2],parts[3]) xdate3 <- sprintf('%04d-%s-%s',as.numeric(parts[1])-3,parts[2],parts[3]) rivals <- names(neighbors(g.full.pd, v = which(V(g.pd)$name==x$company_name_unique))) df.alt$rival.acq.1[ri] <- sum(rivals %in% cb$co_acq$acquirer_name_unique[cb$co_acq$acquired_on < xdate & cb$co_acq$acquired_on >= xdate1]) df.alt$rival.acq.2[ri] <- sum(rivals %in% cb$co_acq$acquirer_name_unique[cb$co_acq$acquired_on < xdate & cb$co_acq$acquired_on >= xdate2]) df.alt$rival.acq.3[ri] <- sum(rivals %in% cb$co_acq$acquirer_name_unique[cb$co_acq$acquired_on < xdate & cb$co_acq$acquired_on >= xdate3]) } cat('done.\n') ##--------------------------------------------- ## COMPUTE PRODUCT SIMILARITY OF COUNTERFACTUAL acquirer|target TO ACTUAL target|acquirer ##--------------------------------------------- cat('computing product similarity...') df.alt$ij.sim <- NA df.alt$ij.cossim <- NA for (si in 1:nrow(df.alt)) { x <- df.alt[si,] if (x$set == 'target') { firm.i <- df.acq.j$acquirer_name_unique firm.j <- x$company_name_unique } else { firm.i <- x$company_name_unique firm.j <- df.acq.j$acquiree_name_unique } ## acquirer cats.i <- str_split(cb$co$category_list[cb$co$company_name_unique == firm.i], '[|]')[[1]] cg.i <- str_split(cb$co$category_group_list[cb$co$company_name_unique == firm.i], '[|]')[[1]] c.i <- unique(c(cats.i,cg.i)) ## target cats.j <- str_split(cb$co$category_list[cb$co$company_name_unique == firm.j], '[|]')[[1]] cg.j <- str_split(cb$co$category_group_list[cb$co$company_name_unique == firm.j], '[|]')[[1]] c.j <- unique(c(cats.j,cg.j)) ## similarity c.all <- unique(c(c.i,c.j)) v.i <- as.integer(c.all %in% c.i) v.j <- as.integer(c.all %in% c.j) df.alt$ij.sim[si] <- (v.i %*% v.j)[1,1] df.alt$ij.cossim[si] <- (v.i %*% v.j)[1,1] / (sqrt(sum(v.i^2)) * sqrt(sum(v.j^2))) } cat('done.\n') ##--------------------------------------------- ## Diversification Control ##--------------------------------------------- df.alt$div <- NA for (di in 1:nrow(df.alt)) { x = df.alt[di,] df.seg <- seg4[which(seg4$year==(df.acq.j$acquired_year-1) & seg4$company_name_unique==x$company_name_unique),] if (nrow(df.seg)==0) next sm <- sum(df.seg$sales) df.alt$div[di] <- sum(sapply(df.seg$sales, function(x) ifelse(x==0, 0, (x/sm) * log( 1/(x/sm) )) )) } ##--------------------------------------------- ## CACHE ALTERNATIVES DATAFRAME ##--------------------------------------------- l[[lidx]]$df.alt <- df.alt ##--------------------------------------------- ## SYNERGIES ##--------------------------------------------- cat('computing counterfactual networks for positional synergy:\n') ## ACQUIRER's POSITION acquirer <- acq.src.allpd$acquirer_name_unique[j] ## Counterfactual target graphs g.cf <- lapply(df.targ.alt$company_name_unique, function(name){ ## counterfactual target UUID cf.uuid <- cb$co$company_uuid[which(cb$co$company_name_unique==name)] ## absorb counterfactual target subgraph into cached actual graph, if not exists g.pd.nc.cf.x <- g.pd.nc.cf if ( ! cf.uuid %in% V(g.pd.nc.cf.x)$company_uuid) { targ.g.full.pd.nc.vid <- which(V(g.full.pd.nc)$company_uuid == cf.uuid) g.full.pd.nc.sub.l <- igraph::make_ego_graph(graph=g.full.pd.nc, order=absorb.levels, nodes=targ.g.full.pd.nc.vid) if (length(g.full.pd.nc.sub.l)>0 & class(g.full.pd.nc.sub.l[[1]])=='igraph'){ g.full.pd.nc.sub <- g.full.pd.nc.sub.l[[1]] .verts1 <- as_data_frame(g.pd.nc.cf.x, "vertices") .verts2 <- as_data_frame(g.full.pd.nc.sub, "vertices") cat(sprintf('vdf1 dim (%s, %s), vdf2 dim (%s, %s)\n',nrow(.verts1),ncol(.verts1),nrow(.verts2),ncol(.verts2))) cat(sprintf(' vdf1::%s\n vdf2::%s',paste(names(.verts1),collapse = '|'),paste(names(.verts2),collapse = '|'))) ## fix duplicates .names.2.add.1 <- .verts2$name[which( ! .verts2$name %in% .verts1$name)] .names.2.add.1 <- .names.2.add.1[!is.na(.names.2.add.1)] for (.name in .names.2.add.1) { vr <- nrow(.verts1)+1 .verts1[vr, ] <- NA for (col in names(.verts1)) { if (col %in% names(.verts2)) { .verts1[vr, col] <- .verts2[which(.verts2$name==.name), col] } } } .verts <- unique(.verts1) .el <- rbind(as_data_frame(g.pd.nc.cf.x), as_data_frame(g.full.pd.nc.sub)) g.pd.nc.cf.x <- graph_from_data_frame(d = .el, directed = FALSE, vertices = .verts) } } ## set network clusters V(g.pd.nc.cf.x)$nc <- as.integer(igraph::multilevel.community(g.pd.nc.cf.x)$membership) ## subset period graph (removing ) g.pd.cf.x <- asIgraph(acf$makePdNetwork(asNetwork(g.pd.nc.cf.x), year_j-1, year_j+1, isolates.remove = F)) V(g.pd.cf.x)$name <- V(g.pd.cf.x)$vertex.names ## return counterfactual node collapsed graph tmp.acq.df <- data.frame( acquirer_uuid = acq.src.allpd$acquirer_uuid[j], acquiree_uuid = cf.uuid, acquired_on = acq.src.allpd$acquired_on[j] ) return(acf$nodeCollapseGraph(g.pd.cf.x, tmp.acq.df)) # return(acf$nodeCollapseGraphAbsorbSubgraph(g.pd, g.full.pd, tmp.acq.df, absorb.levels)) }) names(g.cf) <- df.targ.alt$company_name_unique ##--------------------------------------------- ## APPEND PAIRING COVARIATES TO REGRESSION DATAFRAME ##--------------------------------------------- cat('appending dyadic regression dataframe...\n') for (k in 1:nrow(df.alt[df.alt$set=='acquirer', ])) { ix <- which( df.alt$company_name_unique == df.alt[df.alt$set=='acquirer', ][k, ]$company_name_unique ) if (length(df.alt$event[ix])==0) next for (r in 1:nrow(df.alt[df.alt$set=='target', ])) { jx <- which( df.alt$company_name_unique == df.alt[df.alt$set=='target', ][r, ]$company_name_unique ) # cat(sprintf('k %s, ix %s, r %s, jx %s\n',k,ix,r,jx)) ## skip pairing if neither acquirer nor target were in this actual event if (length(df.alt$event[jx])==0) next if ( !as.integer(df.alt$event[ix]) & !as.integer(df.alt$event[jx]) ) next cat(sprintf('appending pairing %s-->%s\n',df.alt$company_name_unique[ix],df.alt$company_name_unique[jx])) if (df.alt$company_name_unique[ix] != df.alt$company_name_unique[jx]) { # cat(sprintf('ix %s jx %s\n',ix,jx)) ## DISTANCE ij.dist <- igraph::distances(g.full.pd, v = which(V(g.full.pd)$name == df.alt$company_name_unique[ix]), to= which(V(g.full.pd)$name == df.alt$company_name_unique[jx]) ) ## AQUIRER POSITION cat(' power centralities\n') pow.n1 <- unname(igraph::power_centrality(g.pd, nodes = which(V(g.pd)$name==df.alt$company_name_unique[ix]), exponent = -0.1)) #pow.n2 <- unname(igraph::power_centrality(g.full.pd, nodes = which(V(g.full.pd)$name==df.alt$company_name_unique[ix]), exponent = -0.2)) pow.n3 <- unname(igraph::power_centrality(g.pd, nodes = which(V(g.pd)$name==df.alt$company_name_unique[ix]), exponent = -0.3)) ## COUNTERFACTUAL NETWORK `r` for different target jx g.cf.r <- g.cf[[ df.alt$company_name_unique[jx] ]] if (class(g.cf.r) != 'igraph') { cat(sprintf('ix=%s,jx=%s: g.cf.r class `%s` is not igraph\n',ix,jx,class(g.cf.r))) next } ## COUNTERFACTUAL NETWORK COVARIATES cat(' network counterfactuals\n') # cf.m.mmc <- acf$getFirmFirmMmc(g.cf.r, as.integer(V(g.cf.r)$nc)) # cf.num.mmc.comps <- acf$getNumMmcRivalsByMembership(g.cf.r, as.integer(V(g.cf.r)$nc), cf.m.mmc) cf.closeness <- igraph::closeness(g.cf.r, vids = which(V(g.cf.r)$name==df.alt$company_name_unique[ix]), normalized = TRUE) cf.degree <- igraph::degree(g.cf.r, v = which(V(g.cf.r)$name==df.alt$company_name_unique[ix])) cf.constraint <- igraph::constraint(g.cf.r, nodes = which(V(g.cf.r)$name==df.alt$company_name_unique[ix])) cf.pow.n1 <- unname(igraph::power_centrality(g.cf.r, nodes = which(V(g.cf.r)$name==df.alt$company_name_unique[ix]), exponent = -0.1)) #cf.pow.n2 <- unname(igraph::power_centrality(g.cf.r, nodes = which(V(g.cf.r)$name==df.alt$company_name_unique[ix]), exponent = -0.2)) cf.pow.n3 <- unname(igraph::power_centrality(g.cf.r, nodes = which(V(g.cf.r)$name==df.alt$company_name_unique[ix]), exponent = -0.3)) ## PAIRING DATAFRAME l.tmp <- list( ###------ event metadata ------ y = ifelse(as.integer(df.alt$event[ix]) & as.integer(df.alt$event[jx]), 1, 0), t = j, date = date_j, uuid = uuid_j, i = df.alt$company_name_unique[ix], j = df.alt$company_name_unique[jx], ###------ acquirer covars ------ i.age = 2018 - df.alt$founded_year[ix], i.pow.n1 = pow.n1, #i.pow.n2 = pow.n2, i.pow.n3 = pow.n3, i.closeness = df.alt$closeness[ix], i.deg = df.alt$deg[ix], i.deg.full = df.alt$deg.full[ix], i.fm.mmc.sum = ifelse(is.missing(df.alt$fm.mmc.sum[ix]), NA, df.alt$fm.mmc.sum[ix]), i.num.mkts = ifelse(is.missing(df.alt$num.mkts[ix]), NA, df.alt$num.mkts[ix]), i.num.mmc.comps = ifelse(is.missing(df.alt$num.mmc.comps[ix]), NA, df.alt$num.mmc.comps[ix]), i.constraint = df.alt$constraint[ix], i.acqs = df.alt$acqs[ix], i.rival.acq.1 = df.alt$rival.acq.1[ix], i.rival.acq.2 = df.alt$rival.acq.2[ix], i.rival.acq.3 = df.alt$rival.acq.3[ix], i.div = df.alt$div[ix], i.ij.sim = df.alt$ij.sim[ix], i.ij.cossim = df.alt$ij.cossim[ix], i.ln_asset = df.alt$ln_asset[ix], i.ln_emp = df.alt$ln_emp[ix], i.roa = df.alt$roa[ix], i.cash = df.alt$cash[ix], i.m2b = df.alt$m2b[ix], ###------ target covars ------ j.age = 2018 - df.alt$founded_year[jx], j.deg = df.alt$deg[jx], j.deg.full = df.alt$deg.full[jx], j.fm.mmc.sum = ifelse(is.missing(df.alt$fm.mmc.sum[jx]), NA, df.alt$fm.mmc.sum[jx]), j.num.mkts = ifelse(is.missing(df.alt$num.mkts[jx]), NA, df.alt$num.mkts[jx]), j.num.mmc.comps = ifelse(is.missing(df.alt$num.mmc.comps[jx]), NA, df.alt$num.mmc.comps[jx]), j.constraint = df.alt$constraint[jx], j.acqs = df.alt$acqs[jx], j.rival.acq.1 = df.alt$rival.acq.1[jx], j.rival.acq.2 = df.alt$rival.acq.2[jx], j.rival.acq.3 = df.alt$rival.acq.3[jx], j.fund.v.cnt = df.alt$fund.v.cnt[jx], j.fund.v.amt = df.alt$fund.v.amt[jx], j.fund.cnt = df.alt$fund.cnt[jx], j.fund.amt = df.alt$fund.amt[jx], j.ij.sim = df.alt$ij.sim[jx], j.ij.cossim = df.alt$ij.cossim[jx], ###------ dyadic covars: acquisition pairing ------ ij.same.region = ifelse(df.alt$region[ix] == df.alt$region[jx], 1, 0), ij.same.state = ifelse(df.alt$state_code[ix] == df.alt$state_code[jx], 1, 0), ij.same.country = ifelse(df.alt$country_code[ix] == df.alt$country_code[jx], 1, 0), ij.same.employee.range = ifelse(df.alt$employee_count[ix] == df.alt$employee_count[jx], 1, 0), ij.dist = ifelse( class(ij.dist)=='matrix' & nrow(ij.dist)>0 & ncol(ij.dist)>0, ij.dist[1,1], Inf), ij.diff.deg = as.numeric(df.alt$deg[ix]) - as.numeric(df.alt$deg[jx]), ij.diff.deg.full = as.numeric(df.alt$deg.full[ix]) - as.numeric(df.alt$deg.full[jx]), ij.diff.fm.mmc.sum = ifelse(any(is.missing(df.alt$fm.mmc.sum[ix]),is.missing(df.alt$fm.mmc.sum[jx])), NA, as.numeric(df.alt$fm.mmc.sum[ix]) - as.numeric(df.alt$fm.mmc.sum[jx])), ij.diff.num.mkts = ifelse(any(is.missing(df.alt$num.mkts[ix]), is.missing(df.alt$num.mkts[jx])), NA, as.numeric(df.alt$num.mkts[ix]) - as.numeric(df.alt$num.mkts[jx])), ij.diff.num.mmc.comps = ifelse(any(is.missing(df.alt$num.mmc.comps[ix]), is.missing(df.alt$num.mmc.comps[jx])), NA, as.numeric(df.alt$num.mmc.comps[ix]) - as.numeric(df.alt$num.mmc.comps[jx])), ij.diff.constraint = as.numeric(df.alt$constraint[ix]) - as.numeric(df.alt$constraint[jx]), ij.diff.acqs = as.numeric(df.alt$acqs[ix]) - as.numeric(df.alt$acqs[jx]), ###------ network synergies ------ ij.syn.pow.n1 = (cf.pow.n1 - pow.n1) / pow.n1, #ij.syn.pow.n2 = (cf.pow.n2 - pow.n2) / pow.n2, ij.syn.pow.n3 = (cf.pow.n3 - pow.n3) / pow.n3, # ij.syn.num.mmc.comps = (cf.num.mmc.comps - df.alt$num.mmc.comps[ix]) / df.alt$num.mmc.comps[ix], ij.syn.closeness = (cf.closeness - df.alt$closeness[ix]) / df.alt$closeness[ix], ij.syn.degree = (cf.degree - df.alt$deg[ix]) / df.alt$deg[ix], ij.syn.constraint = (cf.constraint - df.alt$constraint[ix]) / df.alt$constraint[ix] ) l.cov[[ (length(l.cov)+1) ]] <- l.tmp } } } cat('done.\n') ##--------------------------------------- ##================================= ## NODE COLLAPSE update network ##--------------------------------- cat(sprintf('node collapsing acquisition %s:\n',j)) g.pd.nc <- acf$nodeCollapseGraph(g.pd.nc, acq.src.allpd[j,]) # g.pd.nc <- acf$nodeCollapseGraphAbsorbSubgraph(g.pd.nc, g.full.pd.nc, acq.src.allpd[(j-1),], absorb.levels) g.full.pd.nc <- acf$nodeCollapseGraph(g.full.pd.nc, acq.src.allpd[j,]) ## FLAG TO NOT RUN NODE COLLAPSE AT START OF NEXT LOOP SINCE IT WAS ALREADY PROCESSED HERE do.node.collapse <- FALSE ## save incrementally if (lidx %% 10 == 0) { saveRDS(list(l=l, l.cov=l.cov), file = file.path(.data_dir, sprintf("acqlogit_compnet_processed_acquisitions_synergies_ABSORB_list_%s_d%s.rds",name_i,d))) } gc() } ## end loop ## final save saveRDS(list(l=l, l.cov=l.cov), file = file.path(.data_dir, sprintf("acqlogit_compnet_processed_acquisitions_synergies_ABSORB_list_%s_d%s.rds",name_i,d)))
/R/acqlogit/acqlogit_qsub_2_3_node_collapse_absorb.R
no_license
sdownin/compnet
R
false
false
44,713
r
########################################################################################## # # MMC Network & Acquisitions # # Create acquisition logit model covariates arrays # # @author sdowning.bm02g@nctu.edu.tw # # @export [list] cb # # # ## update founded_on,closed_on dates - Jin-Su's Email 2018-04-23 # ## C:\\Users\\T430\\Google Drive\\PhD\\Dissertation\\competition networks\\compnet2\\founded_on_date_edit # co.date <- cb$readCsv('founded_on_date_edit/missing_companies_20180330.csv') # ########################################################################################## library(network, quietly = T) library(texreg, quietly = T) library(igraph, quietly = T) library(plyr, quietly = T) library(reshape2) library(intergraph) ##=================== ## DIRECTORIES ##------------------- .compustat_dir <- '/home/sdowning/data/compustat' ##=================== ## PARAMS ##------------------- ### ABSORB LEVELS -- ego net order of acquisition target ### 1 = only direct competitors of acquisition target ### 2 = direct and 2nd-order indirect competitors of acquisition target ### ... etc. absorb.levels <- 1 ##-------------------------------------------------------------- ## ## CREATE FIRM NETWORK PERIOD LISTS ## ##-------------------------------------------------------------- ## get date periods and ego network based on settings times <- sapply(years, function(x)paste0(x,'-01-01')) start <- times[1] end <- times[length(times)] ## EGO NETWORK g.ego <- igraph::make_ego_graph(graph = g.full, nodes = V(g.full)[V(g.full)$name==name_i], order = d, mode = 'all')[[1]] ## NETWORKS IN TIMEFRAME TO PROCESS NODE COLLAPSE AND POCESS COVARIATES g.pd <- acf$makePdGraph(g.ego, start, end, isolates.remove=TRUE) ## ego network g.full.pd <- acf$makePdGraph(g.full, start, end, isolates.remove=TRUE) ## full network ## CHECK NETWORK PERIOD SIZES sapply(2:length(times), function(i){gi=acf$makePdGraph(g.ego, times[i-1], times[i], TRUE); return(c(e=ecount(gi),v=vcount(gi)))}) ##-------------------------------------------------- ## LOAD IN EGO NETWORK AND GLOBAL NETWORK ##-------------------------------------------------- g.pd.file <- file.path(.data_dir,sprintf('g_%s_d%s_NCINIT_%s_%s.graphml',name_i,d,start,end)) g.full.pd.file <- file.path(.data_dir,sprintf('g_full_NCINIT_%s_%s.graphml',start,end)) cat(sprintf('qsub_2: loading graphml files %s %s',g.pd.file, g.full.pd.file)) g.pd <- igraph::read.graph(g.pd.file, format='graphml') g.full.pd <- igraph::read.graph(g.full.pd.file, format='graphml') ## Full timeframe Clusters V(g.pd)$nc <- as.integer(igraph::multilevel.community(g.pd)$membership) V(g.full.pd)$nc <- as.integer(igraph::multilevel.community(g.full.pd)$membership) # ## assign UUIDs if not exists # idx.uuid.na <- which(is.na(V(g.pd)$company_uuid) | V(g.pd)$company_uuid=="") # for (idx in idx.uuid.na) { # V(g.pd)$company_uuid[idx] <- cb$uuid() # } # idx.full.uuid.na <- which(is.na(V(g.full.pd)$company_uuid) | V(g.full.pd)$company_uuid=="") # for (idx in idx.full.uuid.na) { # V(g.full.pd)$company_uuid[idx] <- cb$uuid() # } ## cache original clusters V(g.pd)$nc.orig <- V(g.pd)$nc V(g.full.pd)$nc.orig <- V(g.full.pd)$nc ## keep original timeframe graph g.pd.orig <- g.pd g.full.pd.orig <- g.full.pd ##---------------------------------- ##============================================ ## ## DATA FOR CONTROLS AND PROPENSITY SCORES ## ##-------------------------------------------- ##-------------------------------------------- ## Load updated compustat data ##-------------------------------------------- csfunda.file <- file.path(.compustat_dir,'fundamentals-annual-UPDATED.csv') if (!file.exists(csfunda.file)) { ## if file not exists, then run script to create it stop(sprintf('stop: cannot find csfunda.file %s',csfunda.file)) #source(file.path(.script_dir,'acqlogit_compustat_update.R')) } csa2.all <- cb$readCsv(csfunda.file) minyr <- min(unique(csa2.all$fyear), na.rm = T) csa2 <- csa2.all[which(csa2.all$fyear != minyr & !is.na(csa2.all$fyear)), ] ##-------------------------------------------- ## LOAD SEGMENTS DATA FOR DIVERSIFICATION ##-------------------------------------------- seg <- read.csv(file.path(.compustat_dir,'segments.csv'), na=c(NA,'','NA'), stringsAsFactors = F, fill = T) # segcus <- read.csv(file.path(getwd(),'compustat','segments-customer.csv'), na=c(NA,'','NA'), stringsAsFactors = F, fill = T) col.seg <- c('conm','tic','datadate','srcdate','stype','snms','soptp1','geotp','sic','SICS1','SICS2','sales','revts','nis') seg2 <- seg[seg$soptp1=='PD_SRVC',col.seg] ## exclude geographic MARKET segments; include PD_SRVC product/service segments seg2$date <- sapply(seg2$datadate, function(x){ x <- as.character(x) return(sprintf('%s-%s-%s',str_sub(x,1,4),str_sub(x,5,6),str_sub(x,7,8))) }) seg2$year <- sapply(seg2$date, function(x)as.integer(str_sub(x,1,4))) # ### # print(head(seg2[which(seg2$tic=='GOOGL'),],12)) # View(seg2[which(seg2$tic=='GOOGL'),]) ##-------------------------------------------- ## MERGE Compustat Data into Compnet Dataframe ##-------------------------------------------- ## EGO GRAPH VERTEX DATARAME MERGE WITH COMPUSTAT DATA g.full.pd.df <- as_data_frame(g.full.pd, what = 'vertices') ## rename graph vertex name to company_name_unique names(g.full.pd.df)[which(names(g.full.pd.df)=='name')] <- 'company_name_unique' ## crunchbase ipo data fields for crunchbase compnet firms ipocols <- c('company_name_unique','stock_symbol','stock_exchange_symbol','went_public_on') g.full.pd.df <- merge(g.full.pd.df, cb$co_ipo[,ipocols], by.x='company_name_unique', by.y='company_name_unique', all.x=T, all.y=F) ##============================================ ## MANUAL CORRECTIONS (COMPUSTAT STOCK SYMBOLS CHANGED AFTER CRUNCHBASE DATA) ##-------------------------------------------- # ## SEARCH COMPUSTAT NAMES # csa2[grep('SONY',csa2$conm),] # ## SEARCH COMPUSTAT TICKER SYMBOLS # csa2[grep('software',csa2$conm,ignore.case = T),c('conm','tic')] # ## SEARCH CRUNCHBASE IPOS # cb$co_ipo[grep('software-ag',cb$co_ipo$company_name_unique),c('company_name_unique','stock_symbol','stock_exchange_symbol')] # # > unique(as.character(df.sub$i[is.na(df.sub$roa)])) # # [1] "ask-com" ?? "bazaarvoice"- # # [3] "bmc-software"- "compuware"- # # [5] "csc"-[bought by DXC] "forcepoint"- # # [7] "fujitsu"- "google"- # # [9] "htc" ?? "mcafee"- # # [11] "naspers"- "netsuite" # # [13] "opera-software"- "qlik-technologies"- # # [15] "responsys"- "rightnow-technologies"- # # [17] "samsung-electronics"?? "servicepower" ?? # # [19] "siemens"- "software-ag" # # [21] "solarwinds"- "sony"- ### MAP: [COMPUSTAT]::CONM |--> [CrunchBase]::ipo.stock_symbol ### to replace the COMPUSTAT `tic` value with the CrunchBase `stock_symbol` ### in order to merge COMPUSTAT financials into CrunchBase firm data cs.conm_cb.stock <- c( `ALPHABET INC`='GOOG', `SONY CORP`='6758', ## Tokyo exchange symbol -- just using it here to map Compustat data to CrunchBase for Sony `BMC SOFTWARE INC`='BMC', `DXC TECHNOLOGY COMPANY`='csc', ## CSC was acquired by DXC `FUJITSU LTD`='6702', ## Tokyo exchange symbol `NASPERS LTD`='NPN', ## Johannesburg stock exchange `OPERA LTD -ADR`='OPESF', `RESPONSYS INC`='MKTG', `SIEMENS AG`='SIEMENS', `SOLARWINDS INC`='SWI', `SONY CORP SNE`='6758', `BAZAARVOICE INC`='BV', `COMPUWARE CORP`='CPWR', `WEBSENSE INC`='WBSN', ##Forcepoint or Websense ? `MCAFEE INC`='MFE', `QLIK TECHNOLOGIES INC`='QLIK', `RIGHTNOW TECHNOLOGIES INC`='RNOW' ) csa2$stock_symbol <- csa2$tic for (conm in names(cs.conm_cb.stock)) { csa2$stock_symbol[csa2$conm==conm] <- cs.conm_cb.stock[conm] ## set the } ##=============================== ## MERGE COMPUSTAT DATA INTO CRUNCHBASE DATA ##------------------------------- ## merge in COMPUSTAT data by stock_exchange symbol csa2.tmp <- csa2[csa2$stock_symbol %in% g.full.pd.df$stock_symbol & !is.na(csa2$stock_symbol),] df.cs <- merge(g.full.pd.df, csa2.tmp, by.x='stock_symbol',by.y='stock_symbol', all.x=T, all.y=F) ## SEGMENTS DATA company_name_unique to merge df.conm.u <- data.frame() for (conm in unique(df.cs$conm)) { names <- df.cs[which(df.cs$conm==conm),'company_name_unique'] df.conm.u <- rbind(df.conm.u, data.frame(conm=conm, company_name_unique=unique(names)[1]) ) } ## drop NA df.conm.u <- na.omit(df.conm.u) ## MERGE IN company_name_unique seg3 <- merge(seg2, df.conm.u, by.x='conm', by.y='conm', all.x=T, all.y=F) ## filter only segments data for firms with company_name_unique matched from CrunchBase data seg4 <- seg3[which(!is.na(seg3$company_name_unique)),] ##=================================== ## ## AQUISITIONS FILTER ## ##----------------------------------- ## GET ALL ACQ EVENT VERTICES ## keep only acquisitions with acquirer in ego network and target in global competition network acq.src <- co_acq[ co_acq$acquirer_name_unique %in% V(g.pd)$name & co_acq$acquiree_name_unique %in% V(g.full.pd)$name, ] acq.src.allpd <- acq.src[ acq.src$acquired_on >= start & acq.src$acquired_on < end , ] acq.src.allpd <- acq.src.allpd[order(acq.src.allpd$acquired_on, decreasing = F), ] ##--------------------------------------------- ## LOAD DATA AFTER PROPENSITIES ARE COMPUTED ##--------------------------------------------- l.prop <- readRDS(file.path(.data_dir, sprintf('acqlogit_propensity_score_comp_list_%s_d%s_ctrl.rds',name_i,d))) g.prop <- l.prop$g.prop g.full.prop <- l.prop$g.full.prop # g.prop.nc <- l.prop$g.prop.nc ## ? # g.full.prop.nc <- l.prop$g.full.prop.nc ## ? a.df <- l.prop$a.df a.df.ctrl <- l.prop$a.df.ctrl t.df <- l.prop$t.df a.prop <- l.prop$a.prop t.prop <- l.prop$t.prop ##============================================= ## YEAR PERIODS: DEFINE NICHE CLUSTERS ##--------------------------------------------- l <- list() l.cov <- list() ## covariates data to compute regression dataframe df.mmc <- data.frame() df.rem <- data.frame() # df.reg <- data.frame() ## replaced by l.cov lidx <- 0 ## acquisition list index timeval <- timeval.last <- 0 ## set graphs to use to process node collapse from g.pd.nc <- g.pd.orig ## ego network to node collapse for network covariates g.full.pd.nc <- g.full.pd.orig ## full netowrk to node collapse for network covariates ##=============================== ## ## MAIN LOOP: COMPUTE COVARIATES ## ##------------------------------- ## ACQUISITION EVENTS: UPDATE MMC & DYNAMIC EFFs do.node.collapse <- TRUE ## START WITH FALSE AND SET TO TRUE ON FIRST LOOP for (j in 1:nrow(acq.src.allpd)) { df.acq.j <- acq.src.allpd[j,] ## this acquisition row in the acquisition dataframe date_j <- acq.src.allpd$acquired_on[j] year_j <- as.integer(str_sub(date_j,1,4)) uuid_j <- df.acq.j$acquisition_uuid ## g.pd d2 updated each acquisition ## g.pd.orig d2 original ## g.full.pd.orig global network within timeframe start, end cat(sprintf('\n\n%s %s-->%s: acquisition %s (%.2f%s)\n\n',date_j, df.acq.j$acquirer_name_unique, df.acq.j$acquiree_name_unique, j,100*j/nrow(acq.src.allpd),'%')) ##------------------------------------------- ## NODE COLLAPSE PREVIOUS ACQUISITION IF IT WAS SKIPPED ##------------------------------------------- if (do.node.collapse & j > 1) { cat(sprintf('node collapsing previous skipped acquisition %s:\n',(j-1))) g.pd.nc <- acf$nodeCollapseGraph(g.pd.nc, acq.src.allpd[(j-1),]) # g.pd.nc <- acf$nodeCollapseGraphAbsorbSubgraph(g.pd.nc, g.full.pd.nc, acq.src.allpd[(j-1),], absorb.levels) g.full.pd.nc <- acf$nodeCollapseGraph(g.full.pd.nc, acq.src.allpd[(j-1),]) ## FLAG TO NODE COLLAPSE NEXT LOOP do.node.collapse <- TRUE } else { ## DONT NODE COLLAPSE PREVIOUS ACQUISITION IF IT WAS ALREADY NODE COLLAPSED (NOT SKIPPED) ## FLAG TO NODE COLLAPSE NEXT LOOP do.node.collapse <- TRUE } ##------------------------------------------- ## CHECKS TO SKIP THIS ACQUISITION ##------------------------------------------- ## ACQUISITION MUST BE IN PROPENSITY SCORES DATAFRAMES if ( !(uuid_j %in% a.prop$acquisition_uuid) | !(uuid_j %in% t.prop$acquisition_uuid)) next # SKIP IF ALL ACQUIRER ALTERNATIVES HAVE NO COMPUSTAT FINANICALS (check m2b all NA) if (all(is.na(a.prop$m2b[which(a.prop$acquisition_uuid==uuid_j)]))) next ## SKIP IF EITHER ACQUIRER OR TARGET IS NOT IN NETWORK if ( !(acq.src.allpd$acquiree_name_unique[j] %in% V(g.full.pd.nc)$name) ) next if ( !(acq.src.allpd$acquirer_name_unique[j] %in% V(g.pd.nc)$name) ) next ## SKIP IF ACQUIRER IS NOT PUBLIC isPublicAcq <- (acq.src.allpd$acquirer_name_unique[j] %in% cb$co_ipo$company_name_unique & cb$co_ipo$went_public_on[cb$co_ipo$company_name_unique==acq.src.allpd$acquirer_name_unique[j]] <= acq.src.allpd$acquired_on[j]) if (length(isPublicAcq)==0) next if ( ! isPublicAcq) next lidx <- length(l) + 1 l[[lidx]] <- list() ##------------------------------------- ## Absorb subgraph from global network to ego firm network, if not exists ##------------------------------------- ## cache network before absorbing target subgraph to compute counterfactual newtorks g.pd.nc.cf <- g.pd.nc if ( ! acq.src.allpd$acquiree_uuid[j] %in% V(g.pd.nc)$company_uuid) { targ.g.full.pd.nc.vid <- which(V(g.full.pd.nc)$company_uuid == acq.src.allpd$acquiree_uuid[j]) ## target subgraph from global network (using absorb.levels number of indirect competitor levels) g.full.pd.nc.sub.l <- igraph::make_ego_graph(graph=g.full.pd.nc, order=absorb.levels, nodes=targ.g.full.pd.nc.vid) ## only absorb if target subgraph exists if (length(g.full.pd.nc.sub.l)>0 & class(g.full.pd.nc.sub.l[[1]])=='igraph') { g.full.pd.nc.sub <- g.full.pd.nc.sub.l[[1]] ## igraph "+" operator combines graphs .verts1 <- as_data_frame(g.pd.nc, "vertices") .verts2 <- as_data_frame(g.full.pd.nc.sub, "vertices") .names.2.add.1 <- .verts2$name[which( ! .verts2$name %in% .verts1$name)] .names.2.add.1 <- .names.2.add.1[!is.na(.names.2.add.1)] for (.name in .names.2.add.1) { vr <- nrow(.verts1)+1 .verts1[vr, ] <- NA for (col in names(.verts1)) { if (col %in% names(.verts2)) { .verts1[vr, col] <- .verts2[which(.verts2$name==.name), col] } } } .verts <- unique(.verts1) .el <- rbind(as_data_frame(g.pd.nc), as_data_frame(g.full.pd.nc.sub)) g.pd.nc <- graph_from_data_frame(d = .el, directed = FALSE, vertices = .verts) } } ##------------------------------------- ## Set Network Cluster for period network (after having node collapsed) ##------------------------------------- V(g.pd.nc)$nc <- as.integer(igraph::multilevel.community(g.pd.nc)$membership) V(g.full.pd.nc)$nc <- as.integer(igraph::multilevel.community(g.full.pd.nc)$membership) ##===================================== ## Subset Year Period Network ##------------------------------------- cat(' subsetting network edges for year period of acquisition...') ## Period network removes competitive relations ended < year_j OR started >= year_j+1 g.pd <- asIgraph(acf$makePdNetwork(asNetwork(g.pd.nc), year_j-1, year_j+1, isolates.remove = F)) g.full.pd <- asIgraph(acf$makePdNetwork(asNetwork(g.full.pd.nc), year_j-1, year_j+1, isolates.remove = F)) V(g.pd)$name <- V(g.pd)$vertex.names V(g.full.pd)$name <- V(g.full.pd)$vertex.names cat('done.') ##------------------------------- ## Compute Focal Firm Ego Network MMC Measures ##------------------------------- ## GET FIRM x FRIM MMC MATRIX TO USE IN FM-MMC COMPUTATION m.mmc <- acf$getFirmFirmMmc(g.pd, as.integer(V(g.pd)$nc)) ## Update MMC after acquisition l[[lidx]]$mmc <- acf$getFmMmc(g.pd, as.integer(V(g.pd)$nc)) ## MMC degree: number of mmc dyads linked to each firm i V(g.pd)$num.mmc.comps <- acf$getNumMmcRivalsByMembership(g.pd, as.integer(V(g.pd)$nc), m.mmc) ## SUM FM MMC over markets ?????? V(g.pd)$fm.mmc.sum <- rowSums(l[[lidx]]$mmc) V(g.pd)$num.mkts <- apply(l[[lidx]]$mmc, MARGIN=1, FUN=function(x){ return(length(x[x>0])) }) ##----------------------------------------- ## GET DATAFRAME VARS ##----------------------------------------- # ## Acquirer d2 original org.vid # xi.new.vid <- which(V(g.pd)$name == acq.src.allpd$acquirer_name_unique[j]) # ## target d2 original org.vid # xj.new.vid <- which(V(g.pd)$name == acq.src.allpd$acquiree_name_unique[j]) ## acquirer d2 t=j id xi <- which(V(g.pd)$name==acq.src.allpd$acquirer_name_unique[j]) ## target d2 t=j id xj <- which(V(g.pd)$name==acq.src.allpd$acquiree_name_unique[j]) ## CHECKS if (length(xi)==0) stop(sprintf('acquirer firm `%s` not in g.pd focal firm ego network\n',acq.src.allpd$acquirer_name_unique[j])) if (length(xj)==0) stop(sprintf('target firm `%s` not in g.pd focal firm ego network\n',acq.src.allpd$acquiree_name_unique[j])) # ## acquirer id in original graph (at start of period) # xi.orig <- as.integer(V(g.pd.orig)[V(g.pd.orig)$name==acq.src.allpd$acquirer_name_unique[j]]) # xi.nc <- as.integer(V(g.pd.orig)$nc[xi.orig]) ## original nc for the period xi.nc <- V(g.pd)$nc[xi] # xi.mmc.sum <- V(g.pd)$fm.mmc.sum[xi] xi.num.mkts <- V(g.pd)$num.mkts[xi] num.mmc.comps <- V(g.pd)$num.mmc.comps[xi] # ## # xj.orig <- ifelse( !is.na(xj.orig.vid), as.integer(V(g.pd.orig)[V(g.pd.orig)$orig.vid==xj.orig.vid]), NA) # xj.orig <- ifelse(length(xj.orig) > 1, xj.orig, NA) # xj.nc <- ifelse(length(xj)==0,NA, V(g.pd.orig)$nc[xj.orig] ) ## original nc for the period xj.nc <- V(g.pd)$nc[xj] ##-------------------------------------- ## ## TARGET ALTERNATIVES SET ## ##-------------------------------------- ## SELECT FROM PROPENSITY SCORES (if alternatives more than 5) t.prop.j <- t.prop[which(t.prop$acquisition_uuid==df.acq.j$acquisition_uuid & !is.na(t.prop$pred)),] t.prop.j <- t.prop.j[order(t.prop.j$pred, decreasing = T), ] if (nrow(t.prop.j)==0) { next } else if (nrow(t.prop.j)>6) { idx.1 <- which(t.prop.j$y==1) idx.0 <- which(t.prop.j$y==0) idx.0.sample <- idx.0[1:min(5,length(idx.0))] alt.targ.names <- t.prop.j$company_name_unique[c(idx.1, idx.0.sample)] } else { alt.targ.names <- t.prop.j$company_name_unique } ## ACTUAL TARGET ID targ.id <- xj ## START TARGET ALTERNATIVES DATAFRAME df.targ.alt <- cb$co[which(cb$co$company_name_unique %in% alt.targ.names),] ## MERGE IN y and d df.targ.alt <- merge(df.targ.alt, t.prop.j[,c('company_name_unique','y','d')], by.x='company_name_unique',by.y='company_name_unique',all.x=T,all.y=F) ## ipo status df.targ.alt$is.public <- sapply(1:nrow(df.targ.alt), function(x){ isNotOperating <- df.targ.alt$status[x] != 'operating' ipo.date <- cb$co_ipo$went_public_on[which(cb$co_ipo$company_name_unique == df.targ.alt$company_name_unique[x])] if (length(ipo.date)>1) ipo.date <- min(ipo.date, na.rm=TRUE) if (length(ipo.date)<1) return(0) return(ifelse( isNotOperating & ipo.date <= date_j, 1, 0)) }) ## set is.public NAs = 0 df.targ.alt$is.public[is.na(df.targ.alt$is.public)] <- 0 ## target had IPO df.targ <- df.targ.alt[which(df.targ.alt$company_name_unique == V(g.pd)$name[targ.id]), ] if (nrow(df.targ) != 1) next ## select based on ownership status df.targ.alt <- df.targ.alt[which(df.targ.alt$is.public == df.targ$is.public),] ## add MMC df.targ.alt$fm.mmc.sum <- sapply(df.targ.alt$company_name_unique, function(name){ ifelse(name %in% V(g.pd)$name, V(g.pd)$fm.mmc.sum[which(V(g.pd)$name == name)] , NA) }) df.targ.alt$num.mkts <- sapply(df.targ.alt$company_name_unique, function(name){ ifelse(name %in% V(g.pd)$name, V(g.pd)$num.mkts[which(V(g.pd)$name == name)] , NA) }) df.targ.alt$num.mmc.comps <- sapply(df.targ.alt$company_name_unique, function(name){ ifelse(name %in% V(g.pd)$name, V(g.pd)$num.mmc.comps[which(V(g.pd)$name == name)] , NA) }) ## ACQUISITIONS #df.targ.alt$acqs <- unname(sapply(df.targ.alt$company_name_unique, function(name){ # length(which(cb$co_acq$acquirer_name_unique==name & cb$co_acq$acquired_on <= date_j)) # })) df.targ.alt$acqs <- unname(sapply(df.targ.alt$company_name_unique, function(name){ x <- length(which(cb$co_acq$acquirer_name_unique==name & cb$co_acq$acquired_on <= date_j)) n <- length(which(cb$co_acq$acquirer_name_unique %in% V(g.full.pd)$name & cb$co_acq$acquired_on <= date_j)) return(x/n) })) ## VENTURE FUNDING df.targ.alt$fund.v.cnt <- unname(sapply(df.targ.alt$company_name_unique, function(name){ length(which(cb$co_rou$company_name_unique==name & cb$co_rou$announced_on <= date_j & cb$co_rou$funding_round_type=='venture')) })) df.targ.alt$fund.v.amt <- unname(sapply(df.targ.alt$company_name_unique, function(name){ idx <- which(cb$co_rou$company_name_unique==name & cb$co_rou$announced_on <= date_j & cb$co_rou$funding_round_type=='venture') return(sum(cb$co_rou$raised_amount_usd[idx], na.rm = T)) })) ## ALL FUNDING df.targ.alt$fund.cnt <- unname(sapply(df.targ.alt$company_name_unique, function(name){ length(which(cb$co_rou$company_name_unique==name & cb$co_rou$announced_on <= date_j)) })) df.targ.alt$fund.amt <- unname(sapply(df.targ.alt$company_name_unique, function(name){ idx <- which(cb$co_rou$company_name_unique==name & cb$co_rou$announced_on <= date_j) return(sum(cb$co_rou$raised_amount_usd[idx], na.rm = T)) })) ## USE EGO and GLOBAL NETWORK for DEGREE df.targ.alt$deg <- sapply(df.targ.alt$company_name_unique, function(name){ ifelse(name %in% V(g.pd)$name, igraph::degree(g.pd,which(V(g.pd)$name==name)) , NA) }) df.targ.alt$deg.full <- sapply(df.targ.alt$company_name_unique, function(name){ ifelse(name %in% V(g.full.pd)$name, igraph::degree(g.full.pd,which(V(g.full.pd)$name==name)) , NA) }) ##---------------------------------------------------------- ## DATA SAMPLE OF ALL ACQUIRERS (REAL + 5 ALTERNATIVES) l[[lidx]]$df.targ.alt <- df.targ.alt ##-------------------------------------- ## ## ACQUIRER ALTERNATIVES SET ## ##-------------------------------------- ## SELECT FROM PROPENSITY SCORES (if alternatives more than 5) a.prop.j <- a.prop[which(a.prop$acquisition_uuid==df.acq.j$acquisition_uuid & !is.na(a.prop$pred)),] a.prop.j <- a.prop.j[order(a.prop.j$pred, decreasing = T), ] if (nrow(a.prop.j)==0) { next } else if (nrow(a.prop.j)>6) { idx.1 <- which(a.prop.j$y==1) idx.0 <- which(a.prop.j$y==0) idx.0.sample <- idx.0[1:min(5,length(idx.0))] alt.acq.names <- a.prop.j$company_name_unique[c(idx.1, idx.0.sample)] } else { alt.acq.names <- a.prop.j$company_name_unique } ## ACTUAL TARGET ID acq.id <- xi ## START TARGET ALTERNATIVES DATAFRAME df.acq.alt <- cb$co[which(cb$co$company_name_unique %in% alt.acq.names),] ## MERGE IN y and d df.acq.alt <- merge(df.acq.alt, a.prop.j[,c('company_name_unique','y','d')], by.x='company_name_unique',by.y='company_name_unique',all.x=T,all.y=F) ## ipo status df.acq.alt$is.public <- sapply(1:nrow(df.acq.alt), function(x){ isNotOperating <- df.acq.alt$status[x] != 'operating' ipo.date <- cb$co_ipo$went_public_on[which(cb$co_ipo$company_name_unique == df.acq.alt$company_name_unique[x])] if (length(ipo.date)>1) ipo.date <- min(ipo.date, na.rm=TRUE) if (length(ipo.date)<1) return(0) return(ifelse( isNotOperating & ipo.date <= date_j, 1, 0)) }) ## set is.public NAs = 0 df.acq.alt$is.public[is.na(df.acq.alt$is.public)] <- 0 ## target had IPO df.acq <- df.acq.alt[which(df.acq.alt$company_name_unique == V(g.pd)$name[acq.id]), ] if (nrow(df.acq) != 1) next ##stop(sprintf('error: nrow df.acq %s > 1',nrow(df.acq))) ## select based on ownership status df.acq.alt <- df.acq.alt[which(df.acq.alt$is.public == df.acq$is.public), ] ## set MMC df.acq.alt$fm.mmc.sum <- sapply(df.acq.alt$company_name_unique, function(name){ ifelse(name %in% V(g.pd)$name, as.numeric(V(g.pd)$fm.mmc.sum[which(V(g.pd)$name == name)]) , NA) }) df.acq.alt$num.mkts <- sapply(df.acq.alt$company_name_unique, function(name){ ifelse(name %in% V(g.pd)$name, as.numeric(V(g.pd)$num.mkts[which(V(g.pd)$name == name)]) , NA) }) df.acq.alt$num.mmc.comps <- sapply(df.acq.alt$company_name_unique, function(name){ ifelse(name %in% V(g.pd)$name, as.numeric(V(g.pd)$num.mmc.comps[which(V(g.pd)$name == name)]) , NA) }) ## ACQUISITIONS #df.acq.alt$acqs <- unname(sapply(df.acq.alt$company_name_unique, function(name){ # length(which(cb$co_acq$acquirer_name_unique==name & cb$co_acq$acquired_on <= date_j)) #})) df.acq.alt$acqs <- unname(sapply(df.acq.alt$company_name_unique, function(name){ x <- length(which(cb$co_acq$acquirer_name_unique==name & cb$co_acq$acquired_on <= date_j)) n <- length(which(cb$co_acq$acquirer_name_unique %in% V(g.full.pd)$name & cb$co_acq$acquired_on <= date_j)) return(x/n) })) ## USE EGO and GLOBAL NETWORK for DEGREE df.acq.alt$deg <- sapply(df.acq.alt$company_name_unique, function(name){ ifelse(name %in% V(g.pd)$name, igraph::degree(g.pd,which(V(g.pd)$name==name)) , NA) }) df.acq.alt$deg.full <- sapply(df.acq.alt$company_name_unique, function(name){ ifelse(name %in% V(g.full.pd)$name, igraph::degree(g.full.pd,which(V(g.full.pd)$name==name)) , NA) }) ## KEEP SAME DIMENSIONS AS TARGET DATAFRAME df.acq.alt$fund.v.cnt <- NA df.acq.alt$fund.v.amt <- NA df.acq.alt$fund.cnt <- NA df.acq.alt$fund.amt <- NA ##---------------------------------------------------------- ## DATA SAMPLE OF ALL ACQUIRERS (REAL + 5 ALTERNATIVES) l[[lidx]]$df.acq.alt <- df.acq.alt ##-------------------------------------- ## ## NETWORK COVARIATES ## ##-------------------------------------- cat('computing network covariates...') df.acq.alt$set <- 'acquirer' df.acq.alt$event <- sapply(df.acq.alt$d, function(d)ifelse(as.integer(d)==0, 1, 0)) df.targ.alt$set <- 'target' df.targ.alt$event <- sapply(df.targ.alt$d, function(d)ifelse(as.integer(d)==0, 1, 0)) df.alt <- rbind(df.acq.alt, df.targ.alt) df.alt$t <- j ## acquisition index df.alt <- df.alt[order(which(V(g.full.pd.orig)$name %in% df.alt$company_name_unique )), ] ## confirm ascencing order if (!all(count(df.alt$set)$freq>1)) { cat('missing alternative match. skipping.\n') next ## SKIP IF NOT AT LEAST 1 ALTERNATIVE FOR ACQUIRER AND TARGET } ##----------------------------------------------- ### Create Diff Graph (removed|acquired nodes are represented as isolates) ## ## ******** TODO ********* ##----------------------------------------------- # vids <- which( V(g.pd)$name %in% df.alt$company_name_unique ) # vids.orig <- which( V(g.pd)$name %in% df.alt$company_name_unique ) # vids.orig.rm <- vids[which( !(vids.orig %in% vids))] # mapping <- V(g.pd)[which(V(g.pd)$orig.vid %in% V(g.full.pd)$orig.vid) ] # g.diff <- igraph::contract.vertices(g.full.pd, mapping = mapping) # V(g.diff)$name <- V(g.full.pd.orig)$name # vids.diff <- as.integer( V(g.diff)[which( V(g.diff)$name %in% df.alt$company_name_unique )] ) ##----------------------------------------------- ## global covars ##----------------------------------------------- cov.vids <- which(V(g.pd)$name %in% df.alt$company_name_unique) tmp.cov <- data.frame( company_name_unique = unlist(V(g.pd)$name[cov.vids]), closeness = unname(igraph::closeness(g.pd, vids = cov.vids,normalized = TRUE)), constraint = unname(unlist(igraph::constraint(g.pd, nodes = cov.vids))) ) df.alt <- merge(df.alt, tmp.cov, by = 'company_name_unique', all.x = T, all.y = F) # ## acquisition experience # df.alt$acq.experience <- unlist(sapply(1:nrow(df.alt), function(x){ return( # nrow(acq.src.allpd[which(acq.src.allpd$acquirer_name_unique == df.alt$company_name_unique[x] # & acq.src.allpd$acquired_on <= date_j), ]) / j ## scale experience to num observed acquisitions # )})) # ## local covars in pd graph # df.alt$fm.mmc.sum <- sapply(df.alt$company_name_unique, function(name){ # ifelse(name %in% V(g.pd)$name, as.numeric(V(g.pd)$fm.mmc.sum[which(V(g.pd)$name == name)]), NA) # }) # df.alt$num.mkts <- sapply(df.alt$company_name_unique, function(name){ # ifelse(name %in% V(g.pd)$name, as.numeric(V(g.pd)$num.mkts[which(V(g.pd)$name == name)]), NA) # }) # df.alt$num.mmc.comps <- sapply(df.alt$company_name_unique, function(name){ # ifelse(name %in% V(g.pd)$name, as.numeric(V(g.pd)$num.mmc.comps[which(V(g.pd)$name == name)]), NA) # }) cat('done.\n') ##--------------------------------------------- ##--------------------------------------------- ## MERGE IN PUBLIC FIRM FINANCIAL CONTROLS ##--------------------------------------------- cat('adding public firm financial controls...') ctrl.col <- c('company_name_unique','datayear','act','emp','ebitda','m2b','che') ctrl.idx <- which(df.cs$datayear == (df.acq.j$acquired_year-1) ) if (length(ctrl.idx)==0) next df.alt <- merge(df.alt, df.cs[ctrl.idx,ctrl.col], by.x='company_name_unique',by.y='company_name_unique',all.x=T,all.y=F) df.alt$ln_asset <- log(df.alt$act) df.alt$ln_emp <- log(df.alt$emp) df.alt$roa <- df.alt$ebitda / df.alt$act df.alt$cash <- df.alt$che / df.alt$act cat('done.\n') ##--------------------------------------------- ## RIVALS RECENT ACQUISITIONS ##--------------------------------------------- cat('computing acquirers rivals recent acquisitions...') df.alt$rival.acq.1 <- NA df.alt$rival.acq.2 <- NA df.alt$rival.acq.3 <- NA for (ri in 1:nrow(df.alt)) { x <- df.alt[ri,] xdate <- as.character(df.acq.j$acquired_on) ## date of this acquitision j if(is.na(xdate)) next parts <- str_split(xdate,'[-]')[[1]] xdate1 <- sprintf('%04d-%s-%s',as.numeric(parts[1])-1,parts[2],parts[3]) xdate2 <- sprintf('%04d-%s-%s',as.numeric(parts[1])-2,parts[2],parts[3]) xdate3 <- sprintf('%04d-%s-%s',as.numeric(parts[1])-3,parts[2],parts[3]) rivals <- names(neighbors(g.full.pd, v = which(V(g.pd)$name==x$company_name_unique))) df.alt$rival.acq.1[ri] <- sum(rivals %in% cb$co_acq$acquirer_name_unique[cb$co_acq$acquired_on < xdate & cb$co_acq$acquired_on >= xdate1]) df.alt$rival.acq.2[ri] <- sum(rivals %in% cb$co_acq$acquirer_name_unique[cb$co_acq$acquired_on < xdate & cb$co_acq$acquired_on >= xdate2]) df.alt$rival.acq.3[ri] <- sum(rivals %in% cb$co_acq$acquirer_name_unique[cb$co_acq$acquired_on < xdate & cb$co_acq$acquired_on >= xdate3]) } cat('done.\n') ##--------------------------------------------- ## COMPUTE PRODUCT SIMILARITY OF COUNTERFACTUAL acquirer|target TO ACTUAL target|acquirer ##--------------------------------------------- cat('computing product similarity...') df.alt$ij.sim <- NA df.alt$ij.cossim <- NA for (si in 1:nrow(df.alt)) { x <- df.alt[si,] if (x$set == 'target') { firm.i <- df.acq.j$acquirer_name_unique firm.j <- x$company_name_unique } else { firm.i <- x$company_name_unique firm.j <- df.acq.j$acquiree_name_unique } ## acquirer cats.i <- str_split(cb$co$category_list[cb$co$company_name_unique == firm.i], '[|]')[[1]] cg.i <- str_split(cb$co$category_group_list[cb$co$company_name_unique == firm.i], '[|]')[[1]] c.i <- unique(c(cats.i,cg.i)) ## target cats.j <- str_split(cb$co$category_list[cb$co$company_name_unique == firm.j], '[|]')[[1]] cg.j <- str_split(cb$co$category_group_list[cb$co$company_name_unique == firm.j], '[|]')[[1]] c.j <- unique(c(cats.j,cg.j)) ## similarity c.all <- unique(c(c.i,c.j)) v.i <- as.integer(c.all %in% c.i) v.j <- as.integer(c.all %in% c.j) df.alt$ij.sim[si] <- (v.i %*% v.j)[1,1] df.alt$ij.cossim[si] <- (v.i %*% v.j)[1,1] / (sqrt(sum(v.i^2)) * sqrt(sum(v.j^2))) } cat('done.\n') ##--------------------------------------------- ## Diversification Control ##--------------------------------------------- df.alt$div <- NA for (di in 1:nrow(df.alt)) { x = df.alt[di,] df.seg <- seg4[which(seg4$year==(df.acq.j$acquired_year-1) & seg4$company_name_unique==x$company_name_unique),] if (nrow(df.seg)==0) next sm <- sum(df.seg$sales) df.alt$div[di] <- sum(sapply(df.seg$sales, function(x) ifelse(x==0, 0, (x/sm) * log( 1/(x/sm) )) )) } ##--------------------------------------------- ## CACHE ALTERNATIVES DATAFRAME ##--------------------------------------------- l[[lidx]]$df.alt <- df.alt ##--------------------------------------------- ## SYNERGIES ##--------------------------------------------- cat('computing counterfactual networks for positional synergy:\n') ## ACQUIRER's POSITION acquirer <- acq.src.allpd$acquirer_name_unique[j] ## Counterfactual target graphs g.cf <- lapply(df.targ.alt$company_name_unique, function(name){ ## counterfactual target UUID cf.uuid <- cb$co$company_uuid[which(cb$co$company_name_unique==name)] ## absorb counterfactual target subgraph into cached actual graph, if not exists g.pd.nc.cf.x <- g.pd.nc.cf if ( ! cf.uuid %in% V(g.pd.nc.cf.x)$company_uuid) { targ.g.full.pd.nc.vid <- which(V(g.full.pd.nc)$company_uuid == cf.uuid) g.full.pd.nc.sub.l <- igraph::make_ego_graph(graph=g.full.pd.nc, order=absorb.levels, nodes=targ.g.full.pd.nc.vid) if (length(g.full.pd.nc.sub.l)>0 & class(g.full.pd.nc.sub.l[[1]])=='igraph'){ g.full.pd.nc.sub <- g.full.pd.nc.sub.l[[1]] .verts1 <- as_data_frame(g.pd.nc.cf.x, "vertices") .verts2 <- as_data_frame(g.full.pd.nc.sub, "vertices") cat(sprintf('vdf1 dim (%s, %s), vdf2 dim (%s, %s)\n',nrow(.verts1),ncol(.verts1),nrow(.verts2),ncol(.verts2))) cat(sprintf(' vdf1::%s\n vdf2::%s',paste(names(.verts1),collapse = '|'),paste(names(.verts2),collapse = '|'))) ## fix duplicates .names.2.add.1 <- .verts2$name[which( ! .verts2$name %in% .verts1$name)] .names.2.add.1 <- .names.2.add.1[!is.na(.names.2.add.1)] for (.name in .names.2.add.1) { vr <- nrow(.verts1)+1 .verts1[vr, ] <- NA for (col in names(.verts1)) { if (col %in% names(.verts2)) { .verts1[vr, col] <- .verts2[which(.verts2$name==.name), col] } } } .verts <- unique(.verts1) .el <- rbind(as_data_frame(g.pd.nc.cf.x), as_data_frame(g.full.pd.nc.sub)) g.pd.nc.cf.x <- graph_from_data_frame(d = .el, directed = FALSE, vertices = .verts) } } ## set network clusters V(g.pd.nc.cf.x)$nc <- as.integer(igraph::multilevel.community(g.pd.nc.cf.x)$membership) ## subset period graph (removing ) g.pd.cf.x <- asIgraph(acf$makePdNetwork(asNetwork(g.pd.nc.cf.x), year_j-1, year_j+1, isolates.remove = F)) V(g.pd.cf.x)$name <- V(g.pd.cf.x)$vertex.names ## return counterfactual node collapsed graph tmp.acq.df <- data.frame( acquirer_uuid = acq.src.allpd$acquirer_uuid[j], acquiree_uuid = cf.uuid, acquired_on = acq.src.allpd$acquired_on[j] ) return(acf$nodeCollapseGraph(g.pd.cf.x, tmp.acq.df)) # return(acf$nodeCollapseGraphAbsorbSubgraph(g.pd, g.full.pd, tmp.acq.df, absorb.levels)) }) names(g.cf) <- df.targ.alt$company_name_unique ##--------------------------------------------- ## APPEND PAIRING COVARIATES TO REGRESSION DATAFRAME ##--------------------------------------------- cat('appending dyadic regression dataframe...\n') for (k in 1:nrow(df.alt[df.alt$set=='acquirer', ])) { ix <- which( df.alt$company_name_unique == df.alt[df.alt$set=='acquirer', ][k, ]$company_name_unique ) if (length(df.alt$event[ix])==0) next for (r in 1:nrow(df.alt[df.alt$set=='target', ])) { jx <- which( df.alt$company_name_unique == df.alt[df.alt$set=='target', ][r, ]$company_name_unique ) # cat(sprintf('k %s, ix %s, r %s, jx %s\n',k,ix,r,jx)) ## skip pairing if neither acquirer nor target were in this actual event if (length(df.alt$event[jx])==0) next if ( !as.integer(df.alt$event[ix]) & !as.integer(df.alt$event[jx]) ) next cat(sprintf('appending pairing %s-->%s\n',df.alt$company_name_unique[ix],df.alt$company_name_unique[jx])) if (df.alt$company_name_unique[ix] != df.alt$company_name_unique[jx]) { # cat(sprintf('ix %s jx %s\n',ix,jx)) ## DISTANCE ij.dist <- igraph::distances(g.full.pd, v = which(V(g.full.pd)$name == df.alt$company_name_unique[ix]), to= which(V(g.full.pd)$name == df.alt$company_name_unique[jx]) ) ## AQUIRER POSITION cat(' power centralities\n') pow.n1 <- unname(igraph::power_centrality(g.pd, nodes = which(V(g.pd)$name==df.alt$company_name_unique[ix]), exponent = -0.1)) #pow.n2 <- unname(igraph::power_centrality(g.full.pd, nodes = which(V(g.full.pd)$name==df.alt$company_name_unique[ix]), exponent = -0.2)) pow.n3 <- unname(igraph::power_centrality(g.pd, nodes = which(V(g.pd)$name==df.alt$company_name_unique[ix]), exponent = -0.3)) ## COUNTERFACTUAL NETWORK `r` for different target jx g.cf.r <- g.cf[[ df.alt$company_name_unique[jx] ]] if (class(g.cf.r) != 'igraph') { cat(sprintf('ix=%s,jx=%s: g.cf.r class `%s` is not igraph\n',ix,jx,class(g.cf.r))) next } ## COUNTERFACTUAL NETWORK COVARIATES cat(' network counterfactuals\n') # cf.m.mmc <- acf$getFirmFirmMmc(g.cf.r, as.integer(V(g.cf.r)$nc)) # cf.num.mmc.comps <- acf$getNumMmcRivalsByMembership(g.cf.r, as.integer(V(g.cf.r)$nc), cf.m.mmc) cf.closeness <- igraph::closeness(g.cf.r, vids = which(V(g.cf.r)$name==df.alt$company_name_unique[ix]), normalized = TRUE) cf.degree <- igraph::degree(g.cf.r, v = which(V(g.cf.r)$name==df.alt$company_name_unique[ix])) cf.constraint <- igraph::constraint(g.cf.r, nodes = which(V(g.cf.r)$name==df.alt$company_name_unique[ix])) cf.pow.n1 <- unname(igraph::power_centrality(g.cf.r, nodes = which(V(g.cf.r)$name==df.alt$company_name_unique[ix]), exponent = -0.1)) #cf.pow.n2 <- unname(igraph::power_centrality(g.cf.r, nodes = which(V(g.cf.r)$name==df.alt$company_name_unique[ix]), exponent = -0.2)) cf.pow.n3 <- unname(igraph::power_centrality(g.cf.r, nodes = which(V(g.cf.r)$name==df.alt$company_name_unique[ix]), exponent = -0.3)) ## PAIRING DATAFRAME l.tmp <- list( ###------ event metadata ------ y = ifelse(as.integer(df.alt$event[ix]) & as.integer(df.alt$event[jx]), 1, 0), t = j, date = date_j, uuid = uuid_j, i = df.alt$company_name_unique[ix], j = df.alt$company_name_unique[jx], ###------ acquirer covars ------ i.age = 2018 - df.alt$founded_year[ix], i.pow.n1 = pow.n1, #i.pow.n2 = pow.n2, i.pow.n3 = pow.n3, i.closeness = df.alt$closeness[ix], i.deg = df.alt$deg[ix], i.deg.full = df.alt$deg.full[ix], i.fm.mmc.sum = ifelse(is.missing(df.alt$fm.mmc.sum[ix]), NA, df.alt$fm.mmc.sum[ix]), i.num.mkts = ifelse(is.missing(df.alt$num.mkts[ix]), NA, df.alt$num.mkts[ix]), i.num.mmc.comps = ifelse(is.missing(df.alt$num.mmc.comps[ix]), NA, df.alt$num.mmc.comps[ix]), i.constraint = df.alt$constraint[ix], i.acqs = df.alt$acqs[ix], i.rival.acq.1 = df.alt$rival.acq.1[ix], i.rival.acq.2 = df.alt$rival.acq.2[ix], i.rival.acq.3 = df.alt$rival.acq.3[ix], i.div = df.alt$div[ix], i.ij.sim = df.alt$ij.sim[ix], i.ij.cossim = df.alt$ij.cossim[ix], i.ln_asset = df.alt$ln_asset[ix], i.ln_emp = df.alt$ln_emp[ix], i.roa = df.alt$roa[ix], i.cash = df.alt$cash[ix], i.m2b = df.alt$m2b[ix], ###------ target covars ------ j.age = 2018 - df.alt$founded_year[jx], j.deg = df.alt$deg[jx], j.deg.full = df.alt$deg.full[jx], j.fm.mmc.sum = ifelse(is.missing(df.alt$fm.mmc.sum[jx]), NA, df.alt$fm.mmc.sum[jx]), j.num.mkts = ifelse(is.missing(df.alt$num.mkts[jx]), NA, df.alt$num.mkts[jx]), j.num.mmc.comps = ifelse(is.missing(df.alt$num.mmc.comps[jx]), NA, df.alt$num.mmc.comps[jx]), j.constraint = df.alt$constraint[jx], j.acqs = df.alt$acqs[jx], j.rival.acq.1 = df.alt$rival.acq.1[jx], j.rival.acq.2 = df.alt$rival.acq.2[jx], j.rival.acq.3 = df.alt$rival.acq.3[jx], j.fund.v.cnt = df.alt$fund.v.cnt[jx], j.fund.v.amt = df.alt$fund.v.amt[jx], j.fund.cnt = df.alt$fund.cnt[jx], j.fund.amt = df.alt$fund.amt[jx], j.ij.sim = df.alt$ij.sim[jx], j.ij.cossim = df.alt$ij.cossim[jx], ###------ dyadic covars: acquisition pairing ------ ij.same.region = ifelse(df.alt$region[ix] == df.alt$region[jx], 1, 0), ij.same.state = ifelse(df.alt$state_code[ix] == df.alt$state_code[jx], 1, 0), ij.same.country = ifelse(df.alt$country_code[ix] == df.alt$country_code[jx], 1, 0), ij.same.employee.range = ifelse(df.alt$employee_count[ix] == df.alt$employee_count[jx], 1, 0), ij.dist = ifelse( class(ij.dist)=='matrix' & nrow(ij.dist)>0 & ncol(ij.dist)>0, ij.dist[1,1], Inf), ij.diff.deg = as.numeric(df.alt$deg[ix]) - as.numeric(df.alt$deg[jx]), ij.diff.deg.full = as.numeric(df.alt$deg.full[ix]) - as.numeric(df.alt$deg.full[jx]), ij.diff.fm.mmc.sum = ifelse(any(is.missing(df.alt$fm.mmc.sum[ix]),is.missing(df.alt$fm.mmc.sum[jx])), NA, as.numeric(df.alt$fm.mmc.sum[ix]) - as.numeric(df.alt$fm.mmc.sum[jx])), ij.diff.num.mkts = ifelse(any(is.missing(df.alt$num.mkts[ix]), is.missing(df.alt$num.mkts[jx])), NA, as.numeric(df.alt$num.mkts[ix]) - as.numeric(df.alt$num.mkts[jx])), ij.diff.num.mmc.comps = ifelse(any(is.missing(df.alt$num.mmc.comps[ix]), is.missing(df.alt$num.mmc.comps[jx])), NA, as.numeric(df.alt$num.mmc.comps[ix]) - as.numeric(df.alt$num.mmc.comps[jx])), ij.diff.constraint = as.numeric(df.alt$constraint[ix]) - as.numeric(df.alt$constraint[jx]), ij.diff.acqs = as.numeric(df.alt$acqs[ix]) - as.numeric(df.alt$acqs[jx]), ###------ network synergies ------ ij.syn.pow.n1 = (cf.pow.n1 - pow.n1) / pow.n1, #ij.syn.pow.n2 = (cf.pow.n2 - pow.n2) / pow.n2, ij.syn.pow.n3 = (cf.pow.n3 - pow.n3) / pow.n3, # ij.syn.num.mmc.comps = (cf.num.mmc.comps - df.alt$num.mmc.comps[ix]) / df.alt$num.mmc.comps[ix], ij.syn.closeness = (cf.closeness - df.alt$closeness[ix]) / df.alt$closeness[ix], ij.syn.degree = (cf.degree - df.alt$deg[ix]) / df.alt$deg[ix], ij.syn.constraint = (cf.constraint - df.alt$constraint[ix]) / df.alt$constraint[ix] ) l.cov[[ (length(l.cov)+1) ]] <- l.tmp } } } cat('done.\n') ##--------------------------------------- ##================================= ## NODE COLLAPSE update network ##--------------------------------- cat(sprintf('node collapsing acquisition %s:\n',j)) g.pd.nc <- acf$nodeCollapseGraph(g.pd.nc, acq.src.allpd[j,]) # g.pd.nc <- acf$nodeCollapseGraphAbsorbSubgraph(g.pd.nc, g.full.pd.nc, acq.src.allpd[(j-1),], absorb.levels) g.full.pd.nc <- acf$nodeCollapseGraph(g.full.pd.nc, acq.src.allpd[j,]) ## FLAG TO NOT RUN NODE COLLAPSE AT START OF NEXT LOOP SINCE IT WAS ALREADY PROCESSED HERE do.node.collapse <- FALSE ## save incrementally if (lidx %% 10 == 0) { saveRDS(list(l=l, l.cov=l.cov), file = file.path(.data_dir, sprintf("acqlogit_compnet_processed_acquisitions_synergies_ABSORB_list_%s_d%s.rds",name_i,d))) } gc() } ## end loop ## final save saveRDS(list(l=l, l.cov=l.cov), file = file.path(.data_dir, sprintf("acqlogit_compnet_processed_acquisitions_synergies_ABSORB_list_%s_d%s.rds",name_i,d)))
% Generated by roxygen2 (4.0.1): do not edit by hand \name{i50} \alias{i50} \title{Calculate quantiles} \usage{ i50(x, ...) } \arguments{ \item{x}{XXX} \item{...}{additional stuff} } \description{ XXX }
/man/i50.Rd
no_license
einarhjorleifsson/fishvise
R
false
false
205
rd
% Generated by roxygen2 (4.0.1): do not edit by hand \name{i50} \alias{i50} \title{Calculate quantiles} \usage{ i50(x, ...) } \arguments{ \item{x}{XXX} \item{...}{additional stuff} } \description{ XXX }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print_methods.R \name{print.cofad_mx} \alias{print.cofad_mx} \title{Output of a mixed design contrast analysis} \usage{ \method{print}{cofad_mx}(x, ...) } \arguments{ \item{x}{output of calc_contrast} \item{...}{further arguments} } \value{ Displays the significance of the contrast analysis. The contrastweights, the corresponding group and an effectsize are given. } \description{ Output of a mixed design contrast analysis }
/man/print.cofad_mx.Rd
no_license
cran/cofad
R
false
true
507
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print_methods.R \name{print.cofad_mx} \alias{print.cofad_mx} \title{Output of a mixed design contrast analysis} \usage{ \method{print}{cofad_mx}(x, ...) } \arguments{ \item{x}{output of calc_contrast} \item{...}{further arguments} } \value{ Displays the significance of the contrast analysis. The contrastweights, the corresponding group and an effectsize are given. } \description{ Output of a mixed design contrast analysis }
\name{TPC} \alias{TPC} \title{True Postive Count} \usage{ TPC(thres, y, y.hat) } \arguments{ \item{y.hat}{numeric. risk between 0 and 1} \item{y}{status yes=1, no=0 or dead=1, alive=0} \item{thres}{thershold where to split. Must be in range of \code{y.hat}} } \value{ true positive count } \description{ Area under the ROC curve. Calculated in various ways. } \author{ Andi Boeck }
/pkg/Atools/man/TPC.Rd
no_license
jhoefler/fitmixst4
R
false
false
417
rd
\name{TPC} \alias{TPC} \title{True Postive Count} \usage{ TPC(thres, y, y.hat) } \arguments{ \item{y.hat}{numeric. risk between 0 and 1} \item{y}{status yes=1, no=0 or dead=1, alive=0} \item{thres}{thershold where to split. Must be in range of \code{y.hat}} } \value{ true positive count } \description{ Area under the ROC curve. Calculated in various ways. } \author{ Andi Boeck }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pm_plot.R \name{pmMultiBarplot} \alias{pmMultiBarplot} \title{Plot both mutation signatures and their mutational exposures from pmsignature output for more than two groups} \usage{ pmMultiBarplot( inputG, inputParam, sigOrder = NULL, groupIndices, sortSampleNum = TRUE, charSize = 3 ) } \arguments{ \item{inputG}{a MutationFeatureData S4 class output by the pmsignature.} \item{inputParam}{a estimatedParameters S4 class output by the pmsignature.} \item{sigOrder}{the order of signatures if needed (default: NULL).} \item{groupIndices}{a vector of group indicators.} \item{sortSampleNum}{an indictor variable on whether samples are sorted by the number of mutations (default: TRUE).} \item{charSize}{the size of the character on the signature plot (default: 3)} } \value{ a list of the signature plot and the mean difference plot. } \description{ Plot both mutation signatures and their mutational exposures from pmsignature output for more than two groups } \examples{ load(system.file("extdata/sample.rdata", package="HiLDA")) Param <- pmgetSignature(G, K = 3) pmPlots <- pmMultiBarplot(G, Param, groupIndices=c(1, rep(2,3), rep(3,6))) cowplot::plot_grid(pmPlots$sigPlot, pmPlots$propPlot, rel_widths = c(1,3)) }
/man/pmMultiBarplot.Rd
no_license
USCbiostats/HiLDA
R
false
true
1,314
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pm_plot.R \name{pmMultiBarplot} \alias{pmMultiBarplot} \title{Plot both mutation signatures and their mutational exposures from pmsignature output for more than two groups} \usage{ pmMultiBarplot( inputG, inputParam, sigOrder = NULL, groupIndices, sortSampleNum = TRUE, charSize = 3 ) } \arguments{ \item{inputG}{a MutationFeatureData S4 class output by the pmsignature.} \item{inputParam}{a estimatedParameters S4 class output by the pmsignature.} \item{sigOrder}{the order of signatures if needed (default: NULL).} \item{groupIndices}{a vector of group indicators.} \item{sortSampleNum}{an indictor variable on whether samples are sorted by the number of mutations (default: TRUE).} \item{charSize}{the size of the character on the signature plot (default: 3)} } \value{ a list of the signature plot and the mean difference plot. } \description{ Plot both mutation signatures and their mutational exposures from pmsignature output for more than two groups } \examples{ load(system.file("extdata/sample.rdata", package="HiLDA")) Param <- pmgetSignature(G, K = 3) pmPlots <- pmMultiBarplot(G, Param, groupIndices=c(1, rep(2,3), rep(3,6))) cowplot::plot_grid(pmPlots$sigPlot, pmPlots$propPlot, rel_widths = c(1,3)) }
library(readr) library(data.table) library(rgdal) library(ggplot2) library(ggthemes) library(scales) library(leaflet) library(sp) library(sf) setwd('C:/Development/travel-time-prediction/') read_results <- function(file) { results <- as.data.table(read_csv(file)) results$Error = results$LinkTravelTime - results$LinkTravelTime_Predicted results$Hour <- as.numeric(format(results$DateTime, "%H")) results$DayType = factor(results$DayType) results$LinkName <- factor(results$LinkName, levels = unique(results$LinkName[order(results$LineDirectionLinkOrder)])) results$LinkOrder <- as.integer(results$LinkName) results } plot_result_errors <- function(ds = list(), labels = list()) { p <- ggplot() for (i in seq_len(min(length(ds), length(labels)))) { loop_input = paste("stat_ecdf(data = ds[[i]], aes(x = Error, colour = '",labels[[i]],"'))", sep="") p <- p + eval(parse(text=loop_input)) } p <- p + scale_y_continuous(labels=percent) + facet_grid(LinkOrder ~ .) + theme_tufte() + theme(panel.grid = element_line(size = .25, linetype = "solid", color = "black")) + theme(legend.position = "bottom") p } results_lr_single <- read_results('./data/results_lr_single.csv') results_lr_multiple <- read_results('./data/results_lr_multiple.csv') plot_result_errors(list(results_lr_single, results_lr_multiple), list('LR single', 'LR multiple')) + xlim(-100, 150) + theme(axis.text.x = element_text(size=7)) + theme(axis.text.y = element_text(size=7)) + ggsave('plots/results_lr_errors.pdf', width = 210, height = 148, units = "mm") results_svr_single <- read_results('./data/results_svr_single.csv') results_svr_multiple <- read_results('./data/results_svr_multiple.csv') plot_result_errors(list(results_svr_single, results_svr_multiple), list('SVR single', 'SVR multiple')) + xlim(-100, 150) + theme(axis.text.x = element_text(size=7)) + theme(axis.text.y = element_text(size=7)) + ggsave('plots/results_svr_errors.pdf', width = 210, height = 148, units = "mm") # Look at some of the exstreame errors: results_svr_single[abs(Error) > 120,] results_svr_multiple[abs(Error) > 120,] results_nn_single <- read_results('./data/results_nn_single.csv') results_nn_multiple <- read_results('./data/results_nn_multiple.csv') plot_result_errors(list(results_nn_single, results_nn_multiple), list('DNN single', 'DNN multiple')) + xlim(-100, 150) + theme(axis.text.x = element_text(size=7)) + theme(axis.text.y = element_text(size=7)) + ggsave('plots/results_dnn_errors.pdf', width = 210, height = 148, units = "mm") results_nn_single[abs(Error) > 120,] results_nn_multiple[abs(Error) > 120, list(JourneyLinkRef, DateTime, Observed, Predicted, Error)] plot_result_errors(list(results_lr_single, results_svr_single, results_nn_single), list('LR single', 'SVR single', 'NN single')) + xlim(-100, 150) + theme(axis.text.x = element_text(size=7)) + theme(axis.text.y = element_text(size=7)) plot_result_errors(list(results_lr_multiple, results_svr_multiple, results_nn_multiple), list('LR multiple', 'SVR multiple', 'NN multiple')) + xlim(-100, 150) + theme(axis.text.x = element_text(size=7)) + theme(axis.text.y = element_text(size=7)) #route_links <- readOGR(dsn = "data/4A_RouteLinks.csv", layer = "GeographyWkt", use_iconv = TRUE, encoding = "UTF-8") #route_links1 <- st_read("data/4A_RouteLinks.csv", "4A_RouteLinks", crs = 4267) #route_links <- read_csv("data/4A_RouteLinks.csv") #route_links$Geography <- st_as_sfc(route_links$GeographyWkt) data <- read_delim("./data/4A_201701.csv", delim = ";", escape_double = FALSE, na = c("", "NA", "NULL"), col_types = cols( DateTime = col_datetime(format = "%Y-%m-%d %H:%M:%S"), LinkTravelTime = col_integer() ) ) setDT(data) data <- data[LinkTravelTime > 0] data$LinkName <- factor(data$LinkName, levels = unique(data$LinkName[order(data$LineDirectionCode, data$LineDirectionLinkOrder)])) #levels(data$LinkName)$26 # Look only south bound d1 <- data[LineDirectionCode == 1] d1$LinkName <- droplevels(d1$LinkName) d <- factor(unique(d1$LinkName)) d1_smry <- d1[, .N, by = list(LineDirectionLinkOrder, LinkRef, LinkName)][order(LineDirectionLinkOrder)] # Select links of interest d1_loi <- d1[(26 <= LineDirectionLinkOrder) & (LineDirectionLinkOrder <= 32)] d1_loi$LinkName <- droplevels(d1_loi$LinkName) ggplot(d1_loi, aes(factor(LineDirectionLinkOrder), LinkTravelTime)) + geom_boxplot(outlier.shape = NA) + coord_cartesian(ylim = quantile(d1_loi$LinkTravelTime, c(0.1, 0.9))) + scale_y_continuous(limits = quantile(d1_loi$LinkTravelTime, c(0.1, 0.9))) + labs(x = "Link Index", y = "Link travel time (s)") + theme_bw() + ggsave('plots/d1_loi_boxplot_nooutlier.pdf', width = 120, height = 80, units = "mm") setkey(d1_loi, LinkTravelTime) d1_loi_b10 <- d1_loi[, tail(.SD, 10), by=LinkName] d1_loi_b5p <- d1_loi[, tail(.SD, 0.05 * .N), by=LinkName] gplot(d1_loi_b5p) d <- d1_loi_b5p[,.N, by=JourneyRef][,list(Count=sum(.N)),by=N][order(N),list(N,CumCount=cumsum(Count))] ggplot(d, aes(N, CumCount)) + geom_bar(stat = "identity") + #scale_y_continuous(labels=percent) + labs(x = "Number of links", y = "Cum. frequency") + theme_bw() + ggsave('plots/d1_loi_b5p_by_journeyref.pdf', width = 120, height = 80, units = "mm") d1_loi_b5p$Hour <- as.numeric(format(d1_loi_b5p$DateTime, "%H")) + as.numeric(format(d1_loi_b5p$DateTime, "%M"))/60 ggplot(d1_loi_b5p, aes(Hour)) + geom_histogram(bins = 24 * 4) + theme_bw() + ggsave('plots/d1_loi_b5p_hour_histogram.pdf', width = 120, height = 80, units = "mm")
/LSTM for travel time prediction/analysis.R
no_license
mg56648/crashdetection-twitterdata
R
false
false
5,662
r
library(readr) library(data.table) library(rgdal) library(ggplot2) library(ggthemes) library(scales) library(leaflet) library(sp) library(sf) setwd('C:/Development/travel-time-prediction/') read_results <- function(file) { results <- as.data.table(read_csv(file)) results$Error = results$LinkTravelTime - results$LinkTravelTime_Predicted results$Hour <- as.numeric(format(results$DateTime, "%H")) results$DayType = factor(results$DayType) results$LinkName <- factor(results$LinkName, levels = unique(results$LinkName[order(results$LineDirectionLinkOrder)])) results$LinkOrder <- as.integer(results$LinkName) results } plot_result_errors <- function(ds = list(), labels = list()) { p <- ggplot() for (i in seq_len(min(length(ds), length(labels)))) { loop_input = paste("stat_ecdf(data = ds[[i]], aes(x = Error, colour = '",labels[[i]],"'))", sep="") p <- p + eval(parse(text=loop_input)) } p <- p + scale_y_continuous(labels=percent) + facet_grid(LinkOrder ~ .) + theme_tufte() + theme(panel.grid = element_line(size = .25, linetype = "solid", color = "black")) + theme(legend.position = "bottom") p } results_lr_single <- read_results('./data/results_lr_single.csv') results_lr_multiple <- read_results('./data/results_lr_multiple.csv') plot_result_errors(list(results_lr_single, results_lr_multiple), list('LR single', 'LR multiple')) + xlim(-100, 150) + theme(axis.text.x = element_text(size=7)) + theme(axis.text.y = element_text(size=7)) + ggsave('plots/results_lr_errors.pdf', width = 210, height = 148, units = "mm") results_svr_single <- read_results('./data/results_svr_single.csv') results_svr_multiple <- read_results('./data/results_svr_multiple.csv') plot_result_errors(list(results_svr_single, results_svr_multiple), list('SVR single', 'SVR multiple')) + xlim(-100, 150) + theme(axis.text.x = element_text(size=7)) + theme(axis.text.y = element_text(size=7)) + ggsave('plots/results_svr_errors.pdf', width = 210, height = 148, units = "mm") # Look at some of the exstreame errors: results_svr_single[abs(Error) > 120,] results_svr_multiple[abs(Error) > 120,] results_nn_single <- read_results('./data/results_nn_single.csv') results_nn_multiple <- read_results('./data/results_nn_multiple.csv') plot_result_errors(list(results_nn_single, results_nn_multiple), list('DNN single', 'DNN multiple')) + xlim(-100, 150) + theme(axis.text.x = element_text(size=7)) + theme(axis.text.y = element_text(size=7)) + ggsave('plots/results_dnn_errors.pdf', width = 210, height = 148, units = "mm") results_nn_single[abs(Error) > 120,] results_nn_multiple[abs(Error) > 120, list(JourneyLinkRef, DateTime, Observed, Predicted, Error)] plot_result_errors(list(results_lr_single, results_svr_single, results_nn_single), list('LR single', 'SVR single', 'NN single')) + xlim(-100, 150) + theme(axis.text.x = element_text(size=7)) + theme(axis.text.y = element_text(size=7)) plot_result_errors(list(results_lr_multiple, results_svr_multiple, results_nn_multiple), list('LR multiple', 'SVR multiple', 'NN multiple')) + xlim(-100, 150) + theme(axis.text.x = element_text(size=7)) + theme(axis.text.y = element_text(size=7)) #route_links <- readOGR(dsn = "data/4A_RouteLinks.csv", layer = "GeographyWkt", use_iconv = TRUE, encoding = "UTF-8") #route_links1 <- st_read("data/4A_RouteLinks.csv", "4A_RouteLinks", crs = 4267) #route_links <- read_csv("data/4A_RouteLinks.csv") #route_links$Geography <- st_as_sfc(route_links$GeographyWkt) data <- read_delim("./data/4A_201701.csv", delim = ";", escape_double = FALSE, na = c("", "NA", "NULL"), col_types = cols( DateTime = col_datetime(format = "%Y-%m-%d %H:%M:%S"), LinkTravelTime = col_integer() ) ) setDT(data) data <- data[LinkTravelTime > 0] data$LinkName <- factor(data$LinkName, levels = unique(data$LinkName[order(data$LineDirectionCode, data$LineDirectionLinkOrder)])) #levels(data$LinkName)$26 # Look only south bound d1 <- data[LineDirectionCode == 1] d1$LinkName <- droplevels(d1$LinkName) d <- factor(unique(d1$LinkName)) d1_smry <- d1[, .N, by = list(LineDirectionLinkOrder, LinkRef, LinkName)][order(LineDirectionLinkOrder)] # Select links of interest d1_loi <- d1[(26 <= LineDirectionLinkOrder) & (LineDirectionLinkOrder <= 32)] d1_loi$LinkName <- droplevels(d1_loi$LinkName) ggplot(d1_loi, aes(factor(LineDirectionLinkOrder), LinkTravelTime)) + geom_boxplot(outlier.shape = NA) + coord_cartesian(ylim = quantile(d1_loi$LinkTravelTime, c(0.1, 0.9))) + scale_y_continuous(limits = quantile(d1_loi$LinkTravelTime, c(0.1, 0.9))) + labs(x = "Link Index", y = "Link travel time (s)") + theme_bw() + ggsave('plots/d1_loi_boxplot_nooutlier.pdf', width = 120, height = 80, units = "mm") setkey(d1_loi, LinkTravelTime) d1_loi_b10 <- d1_loi[, tail(.SD, 10), by=LinkName] d1_loi_b5p <- d1_loi[, tail(.SD, 0.05 * .N), by=LinkName] gplot(d1_loi_b5p) d <- d1_loi_b5p[,.N, by=JourneyRef][,list(Count=sum(.N)),by=N][order(N),list(N,CumCount=cumsum(Count))] ggplot(d, aes(N, CumCount)) + geom_bar(stat = "identity") + #scale_y_continuous(labels=percent) + labs(x = "Number of links", y = "Cum. frequency") + theme_bw() + ggsave('plots/d1_loi_b5p_by_journeyref.pdf', width = 120, height = 80, units = "mm") d1_loi_b5p$Hour <- as.numeric(format(d1_loi_b5p$DateTime, "%H")) + as.numeric(format(d1_loi_b5p$DateTime, "%M"))/60 ggplot(d1_loi_b5p, aes(Hour)) + geom_histogram(bins = 24 * 4) + theme_bw() + ggsave('plots/d1_loi_b5p_hour_histogram.pdf', width = 120, height = 80, units = "mm")
#' Perform the computation; same as calling map without .f and lazy = F #' @param x a disk.frame #' @param name not used kept for compatibility with dplyr #' @export #' @importFrom dplyr compute #' @rdname map #' @examples #' cars.df = as.disk.frame(cars) #' cars.df2 = cars.df %>% map(~.x) #' # the computation is performed and the data is now stored elsewhere #' cars.df3 = compute(cars.df2) #' #' # clean up #' delete(cars.df) #' delete(cars.df3) compute.disk.frame <- function(x, name, outdir = tempfile("tmp_df_", fileext=".df"), overwrite = TRUE, ...) { overwrite_check(outdir, overwrite) write_disk.frame(x, outdir = outdir, overwrite = TRUE) }
/R/compute.r
no_license
iqis/disk.frame
R
false
false
658
r
#' Perform the computation; same as calling map without .f and lazy = F #' @param x a disk.frame #' @param name not used kept for compatibility with dplyr #' @export #' @importFrom dplyr compute #' @rdname map #' @examples #' cars.df = as.disk.frame(cars) #' cars.df2 = cars.df %>% map(~.x) #' # the computation is performed and the data is now stored elsewhere #' cars.df3 = compute(cars.df2) #' #' # clean up #' delete(cars.df) #' delete(cars.df3) compute.disk.frame <- function(x, name, outdir = tempfile("tmp_df_", fileext=".df"), overwrite = TRUE, ...) { overwrite_check(outdir, overwrite) write_disk.frame(x, outdir = outdir, overwrite = TRUE) }
# ucitavanje biblioteke i podataka library(e1071) slova <- read.csv('slova.csv') # podela skupa na dva dela x <- subset(slova, select = -slovo) y <- as.factor(slova$slovo) # izdvajanje obelezja od znacaja obelezje <- x$x_ivice N <- length(obelezje) # 20000 sr <- mean(obelezje) # 3.0461 vr <- var(obelezje) # 5.4407 # raslojavanje prema tipu slova; # funkcionalni koncept: lapply od niza svih slova # pravi listu indeksa podataka u kojima tip slova y # odgovara tekucem slovu k, dakle gde je k == y strat <- lapply(LETTERS, function (k) which(k == y)) obelezje_strat <- lapply(strat, function (k) obelezje[k]) broj_strat <- length(strat) # 26 N_strat <- sapply(strat, length) ok <- sum(N_strat) == N # TRUE # dozvoljena apsolutna greska d <- 0.07 # dozvoljena greska prve vrste alpha <- 0.05 # odgovarajuci kvantil N(0,1) z <- qnorm(1 - alpha/2) # 1.9600 # tacne statistike po slojevima sr_strat <- sapply(obelezje_strat, mean) vr_strat <- sapply(obelezje_strat, var) # neophodan obim uzorka ups <- 1/N * sum(N_strat * vr_strat) # 2.3161 n <- ups * (z/d)^2 # 1815 n <- 2*ceiling(n) # 3632 # proporcionalni raspored n_strat <- round(n*N_strat/N) ok <- sum(n_strat) == n # FALSE # fiksiranje generatora pseudoslucajnosti set.seed(0) # popravka da bi bilo ok while (!ok) { if (sum(n_strat) > n) { i <- sample(broj_strat, 1) n_strat[i] <- n_strat[i] - 1 } else { i <- sample(broj_strat, 1) n_strat[i] <- n_strat[i] + 1 } ok <- sum(n_strat) == n } # uzorkovanje prema izracunatom indeksi <- lapply(1:broj_strat, function (i) sample(N_strat[i], n_strat[i])) ok <- all(sapply(indeksi, length) == n_strat) # TRUE uzorak <- lapply(1:broj_strat, function (i) obelezje_strat[[i]][indeksi[[i]]]) ok <- all(sapply(uzorak, length) == n_strat) # TRUE # uzoracke vrednosti po stratumima xn_strat <- sapply(uzorak, mean) sn2_strat <- sapply(uzorak, var) D_xn_strat <- sn2_strat/n_strat * (1 - n_strat/N_strat) greska <- abs(sr_strat - xn_strat) # ocenjivanje srednje vrednosti xn <- 1/N * sum(N_strat * xn_strat) # 3.0429 D_xn <- 1/N^2 * sum(N_strat^2 * D_xn_strat) # 0.0005 # interval poverenja sirina <- z * sqrt(D_xn) # 0.0448 I_xn <- c(xn - sirina, # 3.00 xn + sirina) # 3.09 upada <- sr >= I_xn[1] && sr <= I_xn[2] # TRUE # spojeni uzorak indeksi <- unlist(sapply(1:broj_strat, function (i) strat[[i]][indeksi[[i]]])) ok <- length(indeksi) == n # TRUE # pravljenje SVM modela model <- svm(x[indeksi,], y[indeksi], fitted = F) summary(model) # provera kvaliteta modela pred <- predict(model, x) prec <- mean(y == pred) # 0.8794
/R skriptovi/strworp.r
no_license
matfija/Uzorkovanje-slova
R
false
false
2,694
r
# ucitavanje biblioteke i podataka library(e1071) slova <- read.csv('slova.csv') # podela skupa na dva dela x <- subset(slova, select = -slovo) y <- as.factor(slova$slovo) # izdvajanje obelezja od znacaja obelezje <- x$x_ivice N <- length(obelezje) # 20000 sr <- mean(obelezje) # 3.0461 vr <- var(obelezje) # 5.4407 # raslojavanje prema tipu slova; # funkcionalni koncept: lapply od niza svih slova # pravi listu indeksa podataka u kojima tip slova y # odgovara tekucem slovu k, dakle gde je k == y strat <- lapply(LETTERS, function (k) which(k == y)) obelezje_strat <- lapply(strat, function (k) obelezje[k]) broj_strat <- length(strat) # 26 N_strat <- sapply(strat, length) ok <- sum(N_strat) == N # TRUE # dozvoljena apsolutna greska d <- 0.07 # dozvoljena greska prve vrste alpha <- 0.05 # odgovarajuci kvantil N(0,1) z <- qnorm(1 - alpha/2) # 1.9600 # tacne statistike po slojevima sr_strat <- sapply(obelezje_strat, mean) vr_strat <- sapply(obelezje_strat, var) # neophodan obim uzorka ups <- 1/N * sum(N_strat * vr_strat) # 2.3161 n <- ups * (z/d)^2 # 1815 n <- 2*ceiling(n) # 3632 # proporcionalni raspored n_strat <- round(n*N_strat/N) ok <- sum(n_strat) == n # FALSE # fiksiranje generatora pseudoslucajnosti set.seed(0) # popravka da bi bilo ok while (!ok) { if (sum(n_strat) > n) { i <- sample(broj_strat, 1) n_strat[i] <- n_strat[i] - 1 } else { i <- sample(broj_strat, 1) n_strat[i] <- n_strat[i] + 1 } ok <- sum(n_strat) == n } # uzorkovanje prema izracunatom indeksi <- lapply(1:broj_strat, function (i) sample(N_strat[i], n_strat[i])) ok <- all(sapply(indeksi, length) == n_strat) # TRUE uzorak <- lapply(1:broj_strat, function (i) obelezje_strat[[i]][indeksi[[i]]]) ok <- all(sapply(uzorak, length) == n_strat) # TRUE # uzoracke vrednosti po stratumima xn_strat <- sapply(uzorak, mean) sn2_strat <- sapply(uzorak, var) D_xn_strat <- sn2_strat/n_strat * (1 - n_strat/N_strat) greska <- abs(sr_strat - xn_strat) # ocenjivanje srednje vrednosti xn <- 1/N * sum(N_strat * xn_strat) # 3.0429 D_xn <- 1/N^2 * sum(N_strat^2 * D_xn_strat) # 0.0005 # interval poverenja sirina <- z * sqrt(D_xn) # 0.0448 I_xn <- c(xn - sirina, # 3.00 xn + sirina) # 3.09 upada <- sr >= I_xn[1] && sr <= I_xn[2] # TRUE # spojeni uzorak indeksi <- unlist(sapply(1:broj_strat, function (i) strat[[i]][indeksi[[i]]])) ok <- length(indeksi) == n # TRUE # pravljenje SVM modela model <- svm(x[indeksi,], y[indeksi], fitted = F) summary(model) # provera kvaliteta modela pred <- predict(model, x) prec <- mean(y == pred) # 0.8794
#!/usr/bin/env Rscript library(dplyr) library(methods) library(R6) library(BaTFLED3D) # devtools::document() args <- commandArgs(TRUE) run_prefix <- args[1] # Functions ############################################################ # Function to load in data from runs loadData <- function(f, results){ #loads an RData file, and returns a list with the #trained model and warm & cold RMSE vectors. load(f) summaries['final', 'RMSE', fold] <- final.RMSE summaries['final', 'clip.RMSE', fold] <- final.RMSE.clip summaries['final', 'exp.var', fold] <- final.exp.var summaries['final', 'clip.exp.var', fold] <- final.exp.var.clip summaries['final', 'p.cor', fold] <- final.p.cor summaries['final', 'clip.p.cor', fold] <- final.p.cor.clip summaries['final', 's.cor', fold] <- final.s.cor summaries['final', 'clip.s.cor', fold] <- final.s.cor.clip summaries['mean', 'RMSE', fold] <- final.mean.RMSE summaries['mean', 'exp.var', fold] <- final.mean.exp.var summaries['mean', 'p.cor', fold] <- final.mean.p.cor summaries['mean', 's.cor', fold] <- final.mean.s.cor return(summaries) } ########### MAIN ############### # Determine the number of runs with this prefix n.files <- length(list.files(path = dirname(run_prefix), pattern = paste0(basename(run_prefix), '.[0-9]+.out'))) summaries <- array(NA, dim=c(2, 8, n.files), dimnames=list(c('final', 'mean'), c('RMSE', 'clip.RMSE', 'exp.var', 'clip.exp.var', 'p.cor', 'clip.p.cor', 's.cor', 'clip.s.cor'), paste0('fold.', 1:n.files))) for(fld in 1:n.files) { # Load in the run data f <- paste0(run_prefix, '.', (fld-1), '/final_test.Rdata') summaries <- loadData(f, results) } # Save all the data # save.image(paste0(run_prefix, '_summary.Rdata')) print("######################################################") print('## Means ##') print(apply(summaries, c(1,2), mean, na.rm=T)) print('## Standard deviations ##') print(apply(summaries, c(1,2), sd, na.rm=T)) # print("######################################################") # print('## Better than mean ##') # print(better) # # Make data frame counting how many folds peform better than the mean # better <- matrix(NA, 7, 10, dimnames=list(dimnames(results$summaries)[[1]], # dimnames(results$summaries)[[2]][!grepl('iter', dimnames(results$summaries)[[2]])])) # # for(type in c('A', 'H', 'train')) for(resp in c('RMSE', 'min.RMSE', 'clip.RMSE', 'min.clip.RMSE')) # better[type, resp] <- sum(results$summaries[type, resp,] < results$mean['RMSE', 'train.m1',]) # for(resp in c('RMSE', 'min.RMSE', 'clip.RMSE', 'min.clip.RMSE')) # better['warm', resp] <- sum(results$summaries['warm', resp,] < results$mean['RMSE', 'warm.m1',]) # for(type in c('m1', 'm2', 'm1m2')) for (resp in c('RMSE', 'min.RMSE', 'clip.RMSE', 'min.clip.RMSE')) # better[type, resp] <- sum(results$summaries[type, resp,] < results$mean['RMSE', type,]) # for(type in c('A', 'H', 'train')) for(resp in c('exp.var', 'max.exp.var')) # better[type, resp] <- sum(results$summaries[type, resp,] > results$mean['exp.var', 'train.m1',]) # for(resp in c('exp.var', 'max.exp.var')) # better['warm', resp] <- sum(results$summaries['warm', resp,] > results$mean['exp.var', 'warm.m1',]) # for(type in c('m1', 'm2', 'm1m2')) for (resp in c('exp.var', 'max.exp.var')) # better[type, resp] <- sum(results$summaries[type, resp,] > results$mean['exp.var', type,]) # for(type in c('A', 'H', 'train')) for(resp in c('p.cor', 'max.p.cor')) # better[type, resp] <- sum(results$summaries[type, resp,] > results$mean['p.cor', 'train.m1',]) # for(resp in c('p.cor', 'max.p.cor')) # better['warm', resp] <- sum(results$summaries['warm', resp,] > results$mean['p.cor', 'warm.m1',]) # for(type in c('m1', 'm2', 'm1m2')) for (resp in c('p.cor', 'max.p.cor')) # better[type, resp] <- sum(results$summaries[type, resp,] > results$mean['p.cor', type,]) # for(type in c('A', 'H', 'train')) for(resp in c('s.cor', 'max.s.cor')) # better[type, resp] <- sum(results$summaries[type, resp,] > results$mean['s.cor', 'train.m1',]) # for(resp in c('s.cor', 'max.s.cor')) # better['warm', resp] <- sum(results$summaries['warm', resp,] > results$mean['s.cor', 'warm.m1',]) # for(type in c('m1', 'm2', 'm1m2')) for (resp in c('s.cor', 'max.s.cor')) # better[type, resp] <- sum(results$summaries[type, resp,] > results$mean['s.cor', type,]) # # # Read log files to get run time if the runs finished # if(length(system2('grep', c('"Job terminated"', paste0(run_prefix, '.*.log')), stdout=T)>0)) { # months <- c(31,28,31,30,31,30,31,31,30,31,30,31) # month.days <- cumsum(months) # log.starts <- system2('grep', c('"Job submitted"', paste0(run_prefix, '.*.log')), stdout=T) # log.ends <- system2('grep', c('"Job terminated"', paste0(run_prefix, '.*.log')), stdout=T) # month.start <- as.numeric(sapply(strsplit(sapply(strsplit(log.starts, split=' '), # '[', 3), split='/'), '[', 1)) # month.end <- as.numeric(sapply(strsplit(sapply(strsplit(log.ends, split=' '), # '[', 3), split='/'), '[', 1)) # day.start <- month.days[month.start] + # as.numeric(sapply(strsplit(sapply(strsplit(log.starts, split=' '), '[', 3), split='/'), '[', 2)) # day.end <- month.days[month.end] + # as.numeric(sapply(strsplit(sapply(strsplit(log.ends, split=' '), '[', 3), split='/'), '[', 2)) # days <- day.end - day.start # hour.start <- as.numeric(sapply(strsplit(sapply(strsplit(log.starts, split=' '), # '[', 4), split=':'), '[', 1)) # hour.end <- as.numeric(sapply(strsplit(sapply(strsplit(log.ends, split=' '), # '[', 4), split=':'), '[', 1)) # min.start <- as.numeric(sapply(strsplit(sapply(strsplit(log.starts, split=' '), # '[', 4), split=':'), '[', 2)) # min.end <- as.numeric(sapply(strsplit(sapply(strsplit(log.ends, split=' '), # '[', 4), split=':'), '[', 2)) # # hours <- days * 24 + (hour.end - hour.start) + (min.end - min.start)/60 # rm(log.starts, log.ends) # print("Run time statistics (hours):") # summary(hours) # } #
/not_in_pkg/summarize_final_cold.R
permissive
singletrips/BaTFLED3D
R
false
false
6,088
r
#!/usr/bin/env Rscript library(dplyr) library(methods) library(R6) library(BaTFLED3D) # devtools::document() args <- commandArgs(TRUE) run_prefix <- args[1] # Functions ############################################################ # Function to load in data from runs loadData <- function(f, results){ #loads an RData file, and returns a list with the #trained model and warm & cold RMSE vectors. load(f) summaries['final', 'RMSE', fold] <- final.RMSE summaries['final', 'clip.RMSE', fold] <- final.RMSE.clip summaries['final', 'exp.var', fold] <- final.exp.var summaries['final', 'clip.exp.var', fold] <- final.exp.var.clip summaries['final', 'p.cor', fold] <- final.p.cor summaries['final', 'clip.p.cor', fold] <- final.p.cor.clip summaries['final', 's.cor', fold] <- final.s.cor summaries['final', 'clip.s.cor', fold] <- final.s.cor.clip summaries['mean', 'RMSE', fold] <- final.mean.RMSE summaries['mean', 'exp.var', fold] <- final.mean.exp.var summaries['mean', 'p.cor', fold] <- final.mean.p.cor summaries['mean', 's.cor', fold] <- final.mean.s.cor return(summaries) } ########### MAIN ############### # Determine the number of runs with this prefix n.files <- length(list.files(path = dirname(run_prefix), pattern = paste0(basename(run_prefix), '.[0-9]+.out'))) summaries <- array(NA, dim=c(2, 8, n.files), dimnames=list(c('final', 'mean'), c('RMSE', 'clip.RMSE', 'exp.var', 'clip.exp.var', 'p.cor', 'clip.p.cor', 's.cor', 'clip.s.cor'), paste0('fold.', 1:n.files))) for(fld in 1:n.files) { # Load in the run data f <- paste0(run_prefix, '.', (fld-1), '/final_test.Rdata') summaries <- loadData(f, results) } # Save all the data # save.image(paste0(run_prefix, '_summary.Rdata')) print("######################################################") print('## Means ##') print(apply(summaries, c(1,2), mean, na.rm=T)) print('## Standard deviations ##') print(apply(summaries, c(1,2), sd, na.rm=T)) # print("######################################################") # print('## Better than mean ##') # print(better) # # Make data frame counting how many folds peform better than the mean # better <- matrix(NA, 7, 10, dimnames=list(dimnames(results$summaries)[[1]], # dimnames(results$summaries)[[2]][!grepl('iter', dimnames(results$summaries)[[2]])])) # # for(type in c('A', 'H', 'train')) for(resp in c('RMSE', 'min.RMSE', 'clip.RMSE', 'min.clip.RMSE')) # better[type, resp] <- sum(results$summaries[type, resp,] < results$mean['RMSE', 'train.m1',]) # for(resp in c('RMSE', 'min.RMSE', 'clip.RMSE', 'min.clip.RMSE')) # better['warm', resp] <- sum(results$summaries['warm', resp,] < results$mean['RMSE', 'warm.m1',]) # for(type in c('m1', 'm2', 'm1m2')) for (resp in c('RMSE', 'min.RMSE', 'clip.RMSE', 'min.clip.RMSE')) # better[type, resp] <- sum(results$summaries[type, resp,] < results$mean['RMSE', type,]) # for(type in c('A', 'H', 'train')) for(resp in c('exp.var', 'max.exp.var')) # better[type, resp] <- sum(results$summaries[type, resp,] > results$mean['exp.var', 'train.m1',]) # for(resp in c('exp.var', 'max.exp.var')) # better['warm', resp] <- sum(results$summaries['warm', resp,] > results$mean['exp.var', 'warm.m1',]) # for(type in c('m1', 'm2', 'm1m2')) for (resp in c('exp.var', 'max.exp.var')) # better[type, resp] <- sum(results$summaries[type, resp,] > results$mean['exp.var', type,]) # for(type in c('A', 'H', 'train')) for(resp in c('p.cor', 'max.p.cor')) # better[type, resp] <- sum(results$summaries[type, resp,] > results$mean['p.cor', 'train.m1',]) # for(resp in c('p.cor', 'max.p.cor')) # better['warm', resp] <- sum(results$summaries['warm', resp,] > results$mean['p.cor', 'warm.m1',]) # for(type in c('m1', 'm2', 'm1m2')) for (resp in c('p.cor', 'max.p.cor')) # better[type, resp] <- sum(results$summaries[type, resp,] > results$mean['p.cor', type,]) # for(type in c('A', 'H', 'train')) for(resp in c('s.cor', 'max.s.cor')) # better[type, resp] <- sum(results$summaries[type, resp,] > results$mean['s.cor', 'train.m1',]) # for(resp in c('s.cor', 'max.s.cor')) # better['warm', resp] <- sum(results$summaries['warm', resp,] > results$mean['s.cor', 'warm.m1',]) # for(type in c('m1', 'm2', 'm1m2')) for (resp in c('s.cor', 'max.s.cor')) # better[type, resp] <- sum(results$summaries[type, resp,] > results$mean['s.cor', type,]) # # # Read log files to get run time if the runs finished # if(length(system2('grep', c('"Job terminated"', paste0(run_prefix, '.*.log')), stdout=T)>0)) { # months <- c(31,28,31,30,31,30,31,31,30,31,30,31) # month.days <- cumsum(months) # log.starts <- system2('grep', c('"Job submitted"', paste0(run_prefix, '.*.log')), stdout=T) # log.ends <- system2('grep', c('"Job terminated"', paste0(run_prefix, '.*.log')), stdout=T) # month.start <- as.numeric(sapply(strsplit(sapply(strsplit(log.starts, split=' '), # '[', 3), split='/'), '[', 1)) # month.end <- as.numeric(sapply(strsplit(sapply(strsplit(log.ends, split=' '), # '[', 3), split='/'), '[', 1)) # day.start <- month.days[month.start] + # as.numeric(sapply(strsplit(sapply(strsplit(log.starts, split=' '), '[', 3), split='/'), '[', 2)) # day.end <- month.days[month.end] + # as.numeric(sapply(strsplit(sapply(strsplit(log.ends, split=' '), '[', 3), split='/'), '[', 2)) # days <- day.end - day.start # hour.start <- as.numeric(sapply(strsplit(sapply(strsplit(log.starts, split=' '), # '[', 4), split=':'), '[', 1)) # hour.end <- as.numeric(sapply(strsplit(sapply(strsplit(log.ends, split=' '), # '[', 4), split=':'), '[', 1)) # min.start <- as.numeric(sapply(strsplit(sapply(strsplit(log.starts, split=' '), # '[', 4), split=':'), '[', 2)) # min.end <- as.numeric(sapply(strsplit(sapply(strsplit(log.ends, split=' '), # '[', 4), split=':'), '[', 2)) # # hours <- days * 24 + (hour.end - hour.start) + (min.end - min.start)/60 # rm(log.starts, log.ends) # print("Run time statistics (hours):") # summary(hours) # } #
library(ggplot2) # for plotting library(caret) # for modeling #' model_nz_pmnt #' #' @param data the data to fit to the non_zero_pmnt model #' @param devt development time to be modeled #' #' @imports caret #' #' @examples #' my_sample <- readRDS("../00-data/03-nzm/nzm-model-data.RDS") model_nz_pmnt <- function(data, devt, remove_outlier = TRUE, outlier_pct_cut = 0.99) { # filter for development time to model and drop 'devt' column sample <- data[data$devt == devt, ] m_outliers <- caret::train(tot_pd_incr_act ~ tot_rx, data = sample, method = "gam", trControl = trainControl(method = "repeatedcv", repeats = 10)) # find cooks distance if (remove_outlier) { cooks <- cooks.distance(m_outliers$finalModel) # find claims with cooks distance > outlier_cut outlier_cut <- quantile(cooks, probs = outlier_pct_cut) outliers <- sample[cooks > outlier_cut, ] # remove outliers sample <- sample[cooks <= outlier_cut, ] # fit model with outliers removed from data m <- caret::train(tot_pd_incr_act ~ tot_rx, data = sample, method = "gam", trControl = trainControl(method = "repeatedcv", repeats = 10)) } else { m <- m_outliers } list("model" = m, "outliers" = outliers) } # load data # data created in 'nzm-01-data-prep-simple.R' sample <- readRDS("../00-data/03-nzm/nzm-model-data-simple.RDS") # find development periods we want to fit model to devt_periods <- c(18, 30) # set seed set.seed(6001) sample_open <- dplyr::filter(sample, status_act == "O") %>% dplyr::select(-status_act) sample_closed <- dplyr::filter(sample, status_act == "C") %>% dplyr::select(-status_act) # Run models for open claims in 12 months: my_models_nz_open <- lapply(devt_periods, model_nz_pmnt, data = sample_open) # Run models for closed claims in 12 months: my_models_nz_closed <- lapply(devt_periods, model_nz_pmnt, data = sample_closed, outlier_cut = 10) # save models for reuse saveRDS(my_models_nz_open, file = "../00-data/03-nzm/nzm-open-gam-models-simple.RDS") saveRDS(my_models_nz_closed, file = "../00-data/03-nzm/nzm-closed-gam-models-simple.RDS") # Model Diagnostics: summary(my_models_nz_open[[1]][[1]]) my_models_nz_open[[1]][[2]] # outliers summary(my_models_nz_open[[2]][[1]]) my_models_nz_open[[2]][[2]] # outliers summary(my_models_nz_closed[[1]]) summary(my_models_nz_closed[[2]]) summary(my_models_nz_closed[[3]]) mod_18_o <- my_models_nz_open[[1]] mod_30_o <- my_models_nz_open[[2]] mod_42_o <- my_models_nz_open[[3]] mod_18_c <- my_models_nz_closed[[1]] mod_30_c <- my_models_nz_closed[[2]] mod_42_c <- my_models_nz_closed[[3]] plot(mod_18_o$finalModel) plot(mod_18_c$finalModel) sample_18 <- sample[sample$devt == 18, ] sample_30 <- sample[sample$devt == 30, ] sample_42 <- sample[sample$devt == 42, ] sample_18$preds <- ifelse(sample_18$status_act == "O", predict(mod_18_o, sample_18), predict(mod_18_c, sample_18)) sample_30$preds <- ifelse(sample_30$status_act == "O", predict(mod_30_o, sample_30), predict(mod_30_c, sample_30)) sample_42$preds <- ifelse(sample_42$status_act == "O", predict(mod_42_o, sample_42), predict(mod_42_c, sample_42)) sum(sample_18$tot_pd_incr_act) ; sum(sample_18$preds) sum(sample_30$tot_pd_incr_act) ; sum(sample_30$preds) sum(sample_42$tot_pd_incr_act) ; sum(sample_42$preds) sample_18$residuals <- sample_18$tot_pd_incr_act - sample_18$preds sample_30$residuals <- sample_30$tot_pd_incr_act - sample_30$preds sample_42$residuals <- sample_42$tot_pd_incr_act - sample_42$preds plot(sample_18$preds, sample_18$residuals) plot(sample_30$preds, sample_30$residuals) plot(sample_42$preds, sample_42$residuals)
/03-nonzero-pmnt/nzm-02-model-simple.R
no_license
jimbrig/claim-pm
R
false
false
4,051
r
library(ggplot2) # for plotting library(caret) # for modeling #' model_nz_pmnt #' #' @param data the data to fit to the non_zero_pmnt model #' @param devt development time to be modeled #' #' @imports caret #' #' @examples #' my_sample <- readRDS("../00-data/03-nzm/nzm-model-data.RDS") model_nz_pmnt <- function(data, devt, remove_outlier = TRUE, outlier_pct_cut = 0.99) { # filter for development time to model and drop 'devt' column sample <- data[data$devt == devt, ] m_outliers <- caret::train(tot_pd_incr_act ~ tot_rx, data = sample, method = "gam", trControl = trainControl(method = "repeatedcv", repeats = 10)) # find cooks distance if (remove_outlier) { cooks <- cooks.distance(m_outliers$finalModel) # find claims with cooks distance > outlier_cut outlier_cut <- quantile(cooks, probs = outlier_pct_cut) outliers <- sample[cooks > outlier_cut, ] # remove outliers sample <- sample[cooks <= outlier_cut, ] # fit model with outliers removed from data m <- caret::train(tot_pd_incr_act ~ tot_rx, data = sample, method = "gam", trControl = trainControl(method = "repeatedcv", repeats = 10)) } else { m <- m_outliers } list("model" = m, "outliers" = outliers) } # load data # data created in 'nzm-01-data-prep-simple.R' sample <- readRDS("../00-data/03-nzm/nzm-model-data-simple.RDS") # find development periods we want to fit model to devt_periods <- c(18, 30) # set seed set.seed(6001) sample_open <- dplyr::filter(sample, status_act == "O") %>% dplyr::select(-status_act) sample_closed <- dplyr::filter(sample, status_act == "C") %>% dplyr::select(-status_act) # Run models for open claims in 12 months: my_models_nz_open <- lapply(devt_periods, model_nz_pmnt, data = sample_open) # Run models for closed claims in 12 months: my_models_nz_closed <- lapply(devt_periods, model_nz_pmnt, data = sample_closed, outlier_cut = 10) # save models for reuse saveRDS(my_models_nz_open, file = "../00-data/03-nzm/nzm-open-gam-models-simple.RDS") saveRDS(my_models_nz_closed, file = "../00-data/03-nzm/nzm-closed-gam-models-simple.RDS") # Model Diagnostics: summary(my_models_nz_open[[1]][[1]]) my_models_nz_open[[1]][[2]] # outliers summary(my_models_nz_open[[2]][[1]]) my_models_nz_open[[2]][[2]] # outliers summary(my_models_nz_closed[[1]]) summary(my_models_nz_closed[[2]]) summary(my_models_nz_closed[[3]]) mod_18_o <- my_models_nz_open[[1]] mod_30_o <- my_models_nz_open[[2]] mod_42_o <- my_models_nz_open[[3]] mod_18_c <- my_models_nz_closed[[1]] mod_30_c <- my_models_nz_closed[[2]] mod_42_c <- my_models_nz_closed[[3]] plot(mod_18_o$finalModel) plot(mod_18_c$finalModel) sample_18 <- sample[sample$devt == 18, ] sample_30 <- sample[sample$devt == 30, ] sample_42 <- sample[sample$devt == 42, ] sample_18$preds <- ifelse(sample_18$status_act == "O", predict(mod_18_o, sample_18), predict(mod_18_c, sample_18)) sample_30$preds <- ifelse(sample_30$status_act == "O", predict(mod_30_o, sample_30), predict(mod_30_c, sample_30)) sample_42$preds <- ifelse(sample_42$status_act == "O", predict(mod_42_o, sample_42), predict(mod_42_c, sample_42)) sum(sample_18$tot_pd_incr_act) ; sum(sample_18$preds) sum(sample_30$tot_pd_incr_act) ; sum(sample_30$preds) sum(sample_42$tot_pd_incr_act) ; sum(sample_42$preds) sample_18$residuals <- sample_18$tot_pd_incr_act - sample_18$preds sample_30$residuals <- sample_30$tot_pd_incr_act - sample_30$preds sample_42$residuals <- sample_42$tot_pd_incr_act - sample_42$preds plot(sample_18$preds, sample_18$residuals) plot(sample_30$preds, sample_30$residuals) plot(sample_42$preds, sample_42$residuals)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PLR_determination.R \name{plr_yoy_regression} \alias{plr_yoy_regression} \title{Year-on-Year Regression} \usage{ plr_yoy_regression( data, power_var, time_var, model, per_year = 12, return_PLR = TRUE ) } \arguments{ \item{data}{Result of a power predictive model} \item{power_var}{String name of the variable used as power} \item{time_var}{String name of the variable used as time} \item{model}{String name of the model the data was passed through} \item{per_year}{Time step count per year based on model. Typically 12 for MbM, 365 for DbD.} \item{return_PLR}{boolean; option to return PLR value, rather than the raw regression data.} } \value{ Returns PLR value and error evaluated with YoY regression, if return_PLR is false it will return the individual YoY calculations } \description{ Automatically calculates Performance Loss Rate (PLR) using year on year regression. Note that it needs data from a power predictive model. } \examples{ # build var_list var_list <- plr_build_var_list(time_var = "timestamp", power_var = "power", irrad_var = "g_poa", temp_var = "mod_temp", wind_var = NA) # Clean Data test_dfc <- plr_cleaning(test_df, var_list, irrad_thresh = 100, low_power_thresh = 0.01, high_power_cutoff = NA) # Perform the power predictive modeling step test_xbx_wbw_res <- plr_xbx_model(test_dfc, var_list, by = "week", data_cutoff = 30, predict_data = NULL) # Calculate Performance Loss Rate xbx_wbw_plr <- plr_yoy_regression(test_xbx_wbw_res, power_var = 'power_var', time_var = 'time_var', model = "xbx", per_year = 52, return_PLR = TRUE) }
/man/plr_yoy_regression.Rd
no_license
romainfrancois/PVplr
R
false
true
2,163
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PLR_determination.R \name{plr_yoy_regression} \alias{plr_yoy_regression} \title{Year-on-Year Regression} \usage{ plr_yoy_regression( data, power_var, time_var, model, per_year = 12, return_PLR = TRUE ) } \arguments{ \item{data}{Result of a power predictive model} \item{power_var}{String name of the variable used as power} \item{time_var}{String name of the variable used as time} \item{model}{String name of the model the data was passed through} \item{per_year}{Time step count per year based on model. Typically 12 for MbM, 365 for DbD.} \item{return_PLR}{boolean; option to return PLR value, rather than the raw regression data.} } \value{ Returns PLR value and error evaluated with YoY regression, if return_PLR is false it will return the individual YoY calculations } \description{ Automatically calculates Performance Loss Rate (PLR) using year on year regression. Note that it needs data from a power predictive model. } \examples{ # build var_list var_list <- plr_build_var_list(time_var = "timestamp", power_var = "power", irrad_var = "g_poa", temp_var = "mod_temp", wind_var = NA) # Clean Data test_dfc <- plr_cleaning(test_df, var_list, irrad_thresh = 100, low_power_thresh = 0.01, high_power_cutoff = NA) # Perform the power predictive modeling step test_xbx_wbw_res <- plr_xbx_model(test_dfc, var_list, by = "week", data_cutoff = 30, predict_data = NULL) # Calculate Performance Loss Rate xbx_wbw_plr <- plr_yoy_regression(test_xbx_wbw_res, power_var = 'power_var', time_var = 'time_var', model = "xbx", per_year = 52, return_PLR = TRUE) }
% Please edit documentation in R/roxygen.R \name{split_filename} \alias{split_filename} \title{First line is title} \usage{ split_filename(x, ...) } \arguments{ \item{x}{explanation of \code{x}} \item{...}{explanation of \code{...}} } \value{ The value returned by this function. } \description{ This is a brief description. } \details{ Anything else after the description goes to the Details section. You can write several paragraphs. } \examples{ split_filename("foo.bar") } \author{ Who are you? } \references{ \url{https://github.com/yihui/rmini} } \seealso{ \code{\link[tools]{file_ext}}, \code{\link[tools]{file_path_sans_ext}} }
/man/split_filename.Rd
no_license
geertaarts/habitat.model
R
false
false
638
rd
% Please edit documentation in R/roxygen.R \name{split_filename} \alias{split_filename} \title{First line is title} \usage{ split_filename(x, ...) } \arguments{ \item{x}{explanation of \code{x}} \item{...}{explanation of \code{...}} } \value{ The value returned by this function. } \description{ This is a brief description. } \details{ Anything else after the description goes to the Details section. You can write several paragraphs. } \examples{ split_filename("foo.bar") } \author{ Who are you? } \references{ \url{https://github.com/yihui/rmini} } \seealso{ \code{\link[tools]{file_ext}}, \code{\link[tools]{file_path_sans_ext}} }
## app.R ## # setwd('/home/johan/public/shiny-server/sample-apps/') # shiny::runApp('dashboard') library(shinydashboard) uni_df <- readRDS('uni_df.rds') uni_df <<-uni_df # not sure why we need this ############# # WORLD MAP # ############# library(ggplot2) library(ggmap) # Sandhu's library(rsconnect) library(quantmod) #library(googleway) # google service: # geocoding-backend.googleapis.com # Google Maps Geocoding API # vector of countries, each student counted only once # use package ggmap to get coordinates # creating a function to load map by location # first argument is location second is zoom #university_map <- function(loc,z=10){ # region <- get_map(location = loc, # maptype = 'toner', #source = 'stamen', # zoom = z, # api_key = key) # map_region <- ggmap(region, extent='panel', base_layer=ggplot(uni_df, # aes(x=longitude, y=latitude,label=name)),na.rm=TRUE) # map.uni.region <- map_region + geom_point(color = "blue", size = 2) # map.uni.region + geom_text(hjust=0, vjust=0,angle=45,colour="blue") #} university_map <- function(loc='Shanghai', z=12, updateProgress=NULL){ # If we were passed a progress update function, call it if (is.function(updateProgress)) { text <- "Geolocalizing..." updateProgress(detail = text) } region <- get_map(location = loc, maptype = 'roadmap', #source = 'stamen', zoom = z, api_key = key) if (is.function(updateProgress)) { text <- "Fetching map..." updateProgress(detail = text) } map_region <- ggmap(region, extent='panel', base_layer=ggplot(uni_df, aes(x=longitude, y=latitude,label=name)),na.rm=TRUE) map.uni.region <- map_region + geom_point(color = "blue", size = 3) map.uni.region + geom_text(hjust=0, vjust=0,angle=45,colour="blue", label.size=.5) } shinyServer(function(input, output, session) { # A temp file to save the output. # This file will be removed later by renderImage # Geolocalize observeEvent(input$show, { output$myImage <- renderImage({ progress <- shiny::Progress$new(style = 'old') progress$set(message = "Computing data", value = 0) # Close the progress when this reactive exits (even if there's an error) on.exit(progress$close()) # Create a closure to update progress. # Each time this is called: # - If `value` is NULL, it will move the progress bar 1/5 of the remaining # distance. If non-NULL, it will set the progress to that value. # - It also accepts optional detail text. updateProgress <- function(value = NULL, detail = NULL) { if (is.null(value)) { value <- progress$getValue() value <- value + (progress$getMax() - value) / 5 } progress$set(value = value, detail = detail) } outfile <- tempfile(fileext='.png') paste("\"",paste(input$region, collapse=' '),"\"",collapse='') png(outfile, width=700, height=800) print(university_map(input$region,input$zoom,updateProgress)) dev.off() # Return a list containing the filename # modify the .img css list(src = outfile, contentType = 'image/png', width = '100%', height = '100%')#'calc(width*8/7)') }, deleteFile = TRUE) }) # Stocks output$graph <- renderPlot({ if (input$stock=='Apple' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("AAPL", src = "yahoo", from = start, to = end) class(AAPL) candleChart(AAPL, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("AAPL", src = "yahoo", from = start, to = end) class(AAPL) candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("AAPL", src = "yahoo", from = start, to = end) class(AAPL) candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("AAPL", src = "yahoo", from = start, to = end) class(AAPL) candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Google' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("GOOG", src = "yahoo", from = start, to = end) class(GOOG) candleChart(GOOG, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("GOOG", src = "yahoo", from = start, to = end) class(GOOG) candleChart(GOOG, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("GOOG", src = "yahoo", from = start, to = end) class(GOOG) candleChart(GOOG, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("GOOG", src = "yahoo", from = start, to = end) class(GOOG) candleChart(GOOG, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Microsoft' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("MSFT", src = "yahoo", from = start, to = end) class(MSFT) candleChart(MSFT, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("MSFT", src = "yahoo", from = start, to = end) class(MSFT) candleChart(MSFT, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("MSFT", src = "yahoo", from = start, to = end) class(MSFT) candleChart(MSFT, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("MSFT", src = "yahoo", from = start, to = end) class(MSFT) candleChart(MSFT, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Tesla' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("TSLA", src = "yahoo", from = start, to = end) class(TSLA) candleChart(TSLA, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("TSLA", src = "yahoo", from = start, to = end) class(TSLA) candleChart(TSLA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("TSLA", src = "yahoo", from = start, to = end) class(TSLA) candleChart(TSLA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("TSLA", src = "yahoo", from = start, to = end) class(TSLA) candleChart(TSLA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Amazon' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("AMZN", src = "yahoo", from = start, to = end) class(AMZN) candleChart(AMZN, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("AMZN", src = "yahoo", from = start, to = end) class(AMZN) candleChart(AMZN, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("AMZN", src = "yahoo", from = start, to = end) class(AMZN) candleChart(AMZN, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("AMZN", src = "yahoo", from = start, to = end) class(AMZN) candleChart(AMZN, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Accenture' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("ACN", src = "yahoo", from = start, to = end) class(ACN) candleChart(ACN, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("ACN", src = "yahoo", from = start, to = end) class(ACN) candleChart(ACN, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("ACN", src = "yahoo", from = start, to = end) class(ACN) candleChart(ACN, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("ACN", src = "yahoo", from = start, to = end) class(ACN) candleChart(ACN, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Coca-Cola') { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("KO", src = "yahoo", from = start, to = end) class(KO) candleChart(KO, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("KO", src = "yahoo", from = start, to = end) class(KO) candleChart(KO, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("KO", src = "yahoo", from = start, to = end) class(KO) candleChart(KO, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("KO", src = "yahoo", from = start, to = end) class(KO) candleChart(KO, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Exxon Mobil') { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("XOM", src = "yahoo", from = start, to = end) class(XOM) candleChart(XOM, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("XOM", src = "yahoo", from = start, to = end) class(XOM) candleChart(XOM, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("XOM", src = "yahoo", from = start, to = end) class(XOM) candleChart(XOM, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("XOM", src = "yahoo", from = start, to = end) class(XOM) candleChart(XOM, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Top Hundred Stocks') { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("NUS", src = "yahoo", from = start, to = end) class(NUS) candleChart(NUS, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("NUS", src = "yahoo", from = start, to = end) class(NUS) candleChart(NUS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("NUS", src = "yahoo", from = start, to = end) class(NUS) candleChart(NUS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("NUS", src = "yahoo", from = start, to = end) class(NUS) candleChart(NUS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Walt Disney') { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("DIS", src = "yahoo", from = start, to = end) class(DIS) candleChart(DIS, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("DIS", src = "yahoo", from = start, to = end) class(DIS) candleChart(DIS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("DIS", src = "yahoo", from = start, to = end) class(DIS) candleChart(DIS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("DIS", src = "yahoo", from = start, to = end) class(DIS) candleChart(DIS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='General Electric') { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("GE", src = "yahoo", from = start, to = end) class(GE) candleChart(GE, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("GE", src = "yahoo", from = start, to = end) class(GE) candleChart(GE, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("GE", src = "yahoo", from = start, to = end) class(GE) candleChart(GE, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("GE", src = "yahoo", from = start, to = end) class(GE) candleChart(GE, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Bank of America') { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("BA", src = "yahoo", from = start, to = end) class(BA) candleChart(BA, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("BA", src = "yahoo", from = start, to = end) class(BA) candleChart(BA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("BA", src = "yahoo", from = start, to = end) class(BA) candleChart(BA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("BA", src = "yahoo", from = start, to = end) class(BA) candleChart(BA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Ford Motor') { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("F", src = "yahoo", from = start, to = end) class(F) candleChart(F, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("F", src = "yahoo", from = start, to = end) class(F) candleChart(F, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("F", src = "yahoo", from = start, to = end) class(F) candleChart(F, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("F", src = "yahoo", from = start, to = end) class(F) candleChart(F, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='At&T' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("T", src = "yahoo", from = start, to = end) class(T) candleChart(T, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("T", src = "yahoo", from = start, to = end) class(T) candleChart(T, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("T", src = "yahoo", from = start, to = end) class(T) candleChart(T, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("T", src = "yahoo", from = start, to = end) class(T) candleChart(T, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Pfizer') { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("PFE", src = "yahoo", from = start, to = end) class(PFE) candleChart(PFE, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("PFE", src = "yahoo", from = start, to = end) class(PFE) candleChart(PFE, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("PFE", src = "yahoo", from = start, to = end) class(PFE) candleChart(PFE, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("PFE", src = "yahoo", from = start, to = end) class(PFE) candleChart(PFE, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Morgan Stanley' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("MS", src = "yahoo", from = start, to = end) class(MS) candleChart(MS, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("MS", src = "yahoo", from = start, to = end) class(MS) candleChart(MS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("MS", src = "yahoo", from = start, to = end) class(MS) candleChart(MS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("MS", src = "yahoo", from = start, to = end) class(MS) candleChart(MS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='JP Morgan Chase' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("JPM", src = "yahoo", from = start, to = end) class(JPM) candleChart(JPM, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("JPM", src = "yahoo", from = start, to = end) class(JPM) candleChart(JPM, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("JPM", src = "yahoo", from = start, to = end) class(JPM) candleChart(JPM, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("JPM", src = "yahoo", from = start, to = end) class(JPM) candleChart(JPM, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Alibaba' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("BABA", src = "yahoo", from = start, to = end) class(BABA) candleChart(BABA, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("BABA", src = "yahoo", from = start, to = end) class(BABA) candleChart(BABA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("BABA", src = "yahoo", from = start, to = end) class(BABA) candleChart(BABA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("BABA", src = "yahoo", from = start, to = end) class(BABA) candleChart(BABA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Twitter' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("TWTR", src = "yahoo", from = start, to = end) class(TWTR) candleChart(TWTR, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("TWTR", src = "yahoo", from = start, to = end) class(TWTR) candleChart(TWTR, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("TWTR", src = "yahoo", from = start, to = end) class(TWTR) candleChart(TWTR, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("TWTR", src = "yahoo", from = start, to = end) class(TWTR) candleChart(TWTR, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Verizon' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("VZ", src = "yahoo", from = start, to = end) class(VZ) candleChart(VZ, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("VZ", src = "yahoo", from = start, to = end) class(VZ) candleChart(VZ, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("VZ", src = "yahoo", from = start, to = end) class(VZ) candleChart(VZ, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("VZ", src = "yahoo", from = start, to = end) class(VZ) candleChart(VZ, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } }) })
/server.r
no_license
arcnewuss/demo_dashboard
R
false
false
39,491
r
## app.R ## # setwd('/home/johan/public/shiny-server/sample-apps/') # shiny::runApp('dashboard') library(shinydashboard) uni_df <- readRDS('uni_df.rds') uni_df <<-uni_df # not sure why we need this ############# # WORLD MAP # ############# library(ggplot2) library(ggmap) # Sandhu's library(rsconnect) library(quantmod) #library(googleway) # google service: # geocoding-backend.googleapis.com # Google Maps Geocoding API # vector of countries, each student counted only once # use package ggmap to get coordinates # creating a function to load map by location # first argument is location second is zoom #university_map <- function(loc,z=10){ # region <- get_map(location = loc, # maptype = 'toner', #source = 'stamen', # zoom = z, # api_key = key) # map_region <- ggmap(region, extent='panel', base_layer=ggplot(uni_df, # aes(x=longitude, y=latitude,label=name)),na.rm=TRUE) # map.uni.region <- map_region + geom_point(color = "blue", size = 2) # map.uni.region + geom_text(hjust=0, vjust=0,angle=45,colour="blue") #} university_map <- function(loc='Shanghai', z=12, updateProgress=NULL){ # If we were passed a progress update function, call it if (is.function(updateProgress)) { text <- "Geolocalizing..." updateProgress(detail = text) } region <- get_map(location = loc, maptype = 'roadmap', #source = 'stamen', zoom = z, api_key = key) if (is.function(updateProgress)) { text <- "Fetching map..." updateProgress(detail = text) } map_region <- ggmap(region, extent='panel', base_layer=ggplot(uni_df, aes(x=longitude, y=latitude,label=name)),na.rm=TRUE) map.uni.region <- map_region + geom_point(color = "blue", size = 3) map.uni.region + geom_text(hjust=0, vjust=0,angle=45,colour="blue", label.size=.5) } shinyServer(function(input, output, session) { # A temp file to save the output. # This file will be removed later by renderImage # Geolocalize observeEvent(input$show, { output$myImage <- renderImage({ progress <- shiny::Progress$new(style = 'old') progress$set(message = "Computing data", value = 0) # Close the progress when this reactive exits (even if there's an error) on.exit(progress$close()) # Create a closure to update progress. # Each time this is called: # - If `value` is NULL, it will move the progress bar 1/5 of the remaining # distance. If non-NULL, it will set the progress to that value. # - It also accepts optional detail text. updateProgress <- function(value = NULL, detail = NULL) { if (is.null(value)) { value <- progress$getValue() value <- value + (progress$getMax() - value) / 5 } progress$set(value = value, detail = detail) } outfile <- tempfile(fileext='.png') paste("\"",paste(input$region, collapse=' '),"\"",collapse='') png(outfile, width=700, height=800) print(university_map(input$region,input$zoom,updateProgress)) dev.off() # Return a list containing the filename # modify the .img css list(src = outfile, contentType = 'image/png', width = '100%', height = '100%')#'calc(width*8/7)') }, deleteFile = TRUE) }) # Stocks output$graph <- renderPlot({ if (input$stock=='Apple' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("AAPL", src = "yahoo", from = start, to = end) class(AAPL) candleChart(AAPL, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("AAPL", src = "yahoo", from = start, to = end) class(AAPL) candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("AAPL", src = "yahoo", from = start, to = end) class(AAPL) candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("AAPL", src = "yahoo", from = start, to = end) class(AAPL) candleChart(AAPL, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Google' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("GOOG", src = "yahoo", from = start, to = end) class(GOOG) candleChart(GOOG, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("GOOG", src = "yahoo", from = start, to = end) class(GOOG) candleChart(GOOG, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("GOOG", src = "yahoo", from = start, to = end) class(GOOG) candleChart(GOOG, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("GOOG", src = "yahoo", from = start, to = end) class(GOOG) candleChart(GOOG, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Microsoft' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("MSFT", src = "yahoo", from = start, to = end) class(MSFT) candleChart(MSFT, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("MSFT", src = "yahoo", from = start, to = end) class(MSFT) candleChart(MSFT, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("MSFT", src = "yahoo", from = start, to = end) class(MSFT) candleChart(MSFT, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("MSFT", src = "yahoo", from = start, to = end) class(MSFT) candleChart(MSFT, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Tesla' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("TSLA", src = "yahoo", from = start, to = end) class(TSLA) candleChart(TSLA, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("TSLA", src = "yahoo", from = start, to = end) class(TSLA) candleChart(TSLA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("TSLA", src = "yahoo", from = start, to = end) class(TSLA) candleChart(TSLA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("TSLA", src = "yahoo", from = start, to = end) class(TSLA) candleChart(TSLA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Amazon' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("AMZN", src = "yahoo", from = start, to = end) class(AMZN) candleChart(AMZN, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("AMZN", src = "yahoo", from = start, to = end) class(AMZN) candleChart(AMZN, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("AMZN", src = "yahoo", from = start, to = end) class(AMZN) candleChart(AMZN, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("AMZN", src = "yahoo", from = start, to = end) class(AMZN) candleChart(AMZN, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Accenture' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("ACN", src = "yahoo", from = start, to = end) class(ACN) candleChart(ACN, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("ACN", src = "yahoo", from = start, to = end) class(ACN) candleChart(ACN, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("ACN", src = "yahoo", from = start, to = end) class(ACN) candleChart(ACN, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("ACN", src = "yahoo", from = start, to = end) class(ACN) candleChart(ACN, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Coca-Cola') { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("KO", src = "yahoo", from = start, to = end) class(KO) candleChart(KO, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("KO", src = "yahoo", from = start, to = end) class(KO) candleChart(KO, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("KO", src = "yahoo", from = start, to = end) class(KO) candleChart(KO, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("KO", src = "yahoo", from = start, to = end) class(KO) candleChart(KO, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Exxon Mobil') { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("XOM", src = "yahoo", from = start, to = end) class(XOM) candleChart(XOM, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("XOM", src = "yahoo", from = start, to = end) class(XOM) candleChart(XOM, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("XOM", src = "yahoo", from = start, to = end) class(XOM) candleChart(XOM, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("XOM", src = "yahoo", from = start, to = end) class(XOM) candleChart(XOM, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Top Hundred Stocks') { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("NUS", src = "yahoo", from = start, to = end) class(NUS) candleChart(NUS, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("NUS", src = "yahoo", from = start, to = end) class(NUS) candleChart(NUS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("NUS", src = "yahoo", from = start, to = end) class(NUS) candleChart(NUS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("NUS", src = "yahoo", from = start, to = end) class(NUS) candleChart(NUS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Walt Disney') { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("DIS", src = "yahoo", from = start, to = end) class(DIS) candleChart(DIS, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("DIS", src = "yahoo", from = start, to = end) class(DIS) candleChart(DIS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("DIS", src = "yahoo", from = start, to = end) class(DIS) candleChart(DIS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("DIS", src = "yahoo", from = start, to = end) class(DIS) candleChart(DIS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='General Electric') { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("GE", src = "yahoo", from = start, to = end) class(GE) candleChart(GE, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("GE", src = "yahoo", from = start, to = end) class(GE) candleChart(GE, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("GE", src = "yahoo", from = start, to = end) class(GE) candleChart(GE, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("GE", src = "yahoo", from = start, to = end) class(GE) candleChart(GE, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Bank of America') { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("BA", src = "yahoo", from = start, to = end) class(BA) candleChart(BA, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("BA", src = "yahoo", from = start, to = end) class(BA) candleChart(BA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("BA", src = "yahoo", from = start, to = end) class(BA) candleChart(BA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("BA", src = "yahoo", from = start, to = end) class(BA) candleChart(BA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Ford Motor') { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("F", src = "yahoo", from = start, to = end) class(F) candleChart(F, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("F", src = "yahoo", from = start, to = end) class(F) candleChart(F, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("F", src = "yahoo", from = start, to = end) class(F) candleChart(F, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("F", src = "yahoo", from = start, to = end) class(F) candleChart(F, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='At&T' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("T", src = "yahoo", from = start, to = end) class(T) candleChart(T, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("T", src = "yahoo", from = start, to = end) class(T) candleChart(T, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("T", src = "yahoo", from = start, to = end) class(T) candleChart(T, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("T", src = "yahoo", from = start, to = end) class(T) candleChart(T, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Pfizer') { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("PFE", src = "yahoo", from = start, to = end) class(PFE) candleChart(PFE, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("PFE", src = "yahoo", from = start, to = end) class(PFE) candleChart(PFE, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("PFE", src = "yahoo", from = start, to = end) class(PFE) candleChart(PFE, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("PFE", src = "yahoo", from = start, to = end) class(PFE) candleChart(PFE, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Morgan Stanley' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("MS", src = "yahoo", from = start, to = end) class(MS) candleChart(MS, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("MS", src = "yahoo", from = start, to = end) class(MS) candleChart(MS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("MS", src = "yahoo", from = start, to = end) class(MS) candleChart(MS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("MS", src = "yahoo", from = start, to = end) class(MS) candleChart(MS, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='JP Morgan Chase' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("JPM", src = "yahoo", from = start, to = end) class(JPM) candleChart(JPM, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("JPM", src = "yahoo", from = start, to = end) class(JPM) candleChart(JPM, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("JPM", src = "yahoo", from = start, to = end) class(JPM) candleChart(JPM, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("JPM", src = "yahoo", from = start, to = end) class(JPM) candleChart(JPM, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Alibaba' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("BABA", src = "yahoo", from = start, to = end) class(BABA) candleChart(BABA, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("BABA", src = "yahoo", from = start, to = end) class(BABA) candleChart(BABA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("BABA", src = "yahoo", from = start, to = end) class(BABA) candleChart(BABA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("BABA", src = "yahoo", from = start, to = end) class(BABA) candleChart(BABA, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Twitter' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("TWTR", src = "yahoo", from = start, to = end) class(TWTR) candleChart(TWTR, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("TWTR", src = "yahoo", from = start, to = end) class(TWTR) candleChart(TWTR, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("TWTR", src = "yahoo", from = start, to = end) class(TWTR) candleChart(TWTR, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("TWTR", src = "yahoo", from = start, to = end) class(TWTR) candleChart(TWTR, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } else if (input$stock=='Verizon' ) { if (input$ana=='Moving Average') {title <- "Moving Average Plot" start <- input$startdate end <- input$enddate getSymbols("VZ", src = "yahoo", from = start, to = end) class(VZ) candleChart(VZ, up.col = "black" ,dn.col = "red", theme = "white", subset = "2016-09-01/") addSMA(n = c(input$days)) } else if(input$ana=='Moving Average Convergence Divergence (MACD)') { title <- "Moving Average Convergence Divergence (MACD)" start <- input$startdate end <- input$enddate getSymbols("VZ", src = "yahoo", from = start, to = end) class(VZ) candleChart(VZ, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addMACD() } else if (input$ana=='Rate of Change') { title <- "Rate of Change" start <- input$startdate end <- input$enddate getSymbols("VZ", src = "yahoo", from = start, to = end) class(VZ) candleChart(VZ, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/") addROC(n = 1, type = c("discrete", "continuous"), col = "red") } else if (input$ana=='Price Envelop and Relative Strength Index') { { title <- "Price Envelop and Relative Strength Index and Relative Sensitivity Index" start <- input$startdate end <- input$enddate getSymbols("VZ", src = "yahoo", from = start, to = end) class(VZ) candleChart(VZ, up.col = "black", dn.col = "red", theme = "white", subset = "2016-09-01/", TA="addVo(); addRSI()") addBBands() } } } }) })
library(lcmm) ### Name: plot ### Title: Plot of a fitted model ### Aliases: plot plot.hlme plot.lcmm plot.multlcmm plot.Jointlcmm ### ** Examples ###################### fit, residuals and postprob # estimation of the model m<-lcmm(Y~Time*X1,mixture=~Time,random=~Time,classmb=~X2+X3, subject='ID',ng=2,data=data_hlme,B=c(0.41,0.55,-0.18,-0.41, -14.26,-0.34,1.33,13.51,24.65,2.98,1.18,26.26,0.97)) # fit plot(m,which="fit",marg=FALSE,var.time="Time",bty="n") # residuals plot plot(m) # postprob plot plot(m,which="postprob") ###################### fit, linkfunctions #### Estimation of homogeneous mixed models with different assumed link #### functions, a quadratic mean trajectory for the latent process with #### independent random intercept, slope and quadratic slope #### (comparison of linear, Beta and 3 and 5 splines link functions) ## Not run: ##D ##D # linear link function ##D m10<-lcmm(Ydep2~Time+I(Time^2),random=~Time+I(Time^2),subject='ID',ng=1, ##D data=data_lcmm,link="linear", ##D B=c(-0.7454, -0.2031, 0.2715, 0.2916 , 0.6114, -0.0064, 0.0545, ##D 0.0128, 25.3795, 2.2371)) ##D ##D # Beta link function ##D m11<-lcmm(Ydep2~Time+I(Time^2),random=~Time+I(Time^2),subject='ID',ng=1, ##D data=data_lcmm,link="beta",B=c(-0.9109, -0.0831, 0.5194, 0.1910 , ##D 0.8984, -0.0179, -0.0636, 0.0045, 0.5514, -0.7692, 0.7037, 0.0899)) ##D ##D # fit ##D par(mfrow=c(2,1),mar=c(4,4,1,1)) ##D plot(m11,which="fit",var.time="Time",bty="l",ylim=c(-3,0)) ##D plot(m11,which="fit",var.time="Time",marg=FALSE,bty="l",ylim=c(-3,0)) ##D ##D # I-splines with 3 equidistant nodes ##D m12<-lcmm(Ydep2~Time+I(Time^2),random=~Time+I(Time^2),subject='ID',ng=1, ##D data=data_lcmm,link="3-equi-splines",B=c(-0.9272, -0.0753 , 0.5304, ##D 0.1950, 0.9260, -0.0204, -0.0739 , 0.0059, -7.8369, 0.9228 ,-1.4689, ##D 2.0396, 1.8102)) ##D ##D # I-splines with 5 nodes, and interior nodes entered manually ##D m13<-lcmm(Ydep2~Time+I(Time^2),random=~Time+I(Time^2),subject='ID',ng=1, ##D data=data_lcmm,link="5-manual-splines",intnodes=c(10,20,25), ##D B=c(-0.9315, -0.0739 , 0.5254 , 0.1933, 0.9418, -0.0206, -0.0776, ##D 0.0064, -7.8645, 0.7470, 1.2080, 1.5537 , 1.7558 , 1.3386 , 1.0982)) ##D ##D # Plot of estimated different link functions: ##D # (applicable for models that only differ in the "link function" used. ##D # Otherwise, the latent process scale is different and a rescaling ##D # is necessary) ##D plot(m10,which="linkfunction",bty="l") ##D plot(m11,which="linkfunction",bty="l",add=TRUE,col=2) ##D plot(m12,which="linkfunction",bty="l",add=TRUE,col=3) ##D plot(m13,which="linkfunction",bty="l",add=TRUE,col=4) ##D legend("topleft",col=1:4,legend=c("linear","beta","3-Isplines","5-Isplines"),lty=1,bty='n') ## End(Not run) ###################### fit, baselinerisk and survival ## Not run: ##D #### estimation with 3 latent classes (ng=3) - see Jointlcmm ##D #### help for details on the model ##D m3 <- Jointlcmm(fixed= Ydep1~Time*X1,mixture=~Time,random=~Time, ##D classmb=~X3,subject='ID',survival = Surv(Tevent,Event)~ X1+mixture(X2), ##D hazard="3-quant-splines",hazardtype="PH",ng=3,data=data_lcmm, ##D B=c(0.7576, 0.4095, -0.8232, -0.2737, 0, 0, 0, 0.2838, -0.6338, ##D 2.6324, 5.3963, -0.0273, 1.3979, 0.8168, -15.041, 10.164, 10.2394, ##D 11.5109, -2.6219, -0.4553, -0.6055, 1.473, -0.0383, 0.8512, 0.0389, ##D 0.2624, 1.4982)) ##D ##D # fit ##D plot(m3,which="fit",var.time="Time",bty="l") ##D plot(m3,which="fit",var.time="Time",marg=FALSE,bty="l",ylim=c(0,15)) ##D ##D ##D # Class-specific predicted baseline risk & survival functions in the ##D # 3-class model retained (for the reference value of the covariates) ##D plot(m3,which="baselinerisk",bty="l") ##D plot(m3,which="baselinerisk",ylim=c(0,5),bty="l") ##D plot(m3,which="survival",bty="l") ## End(Not run)
/data/genthat_extracted_code/lcmm/examples/plot.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
3,967
r
library(lcmm) ### Name: plot ### Title: Plot of a fitted model ### Aliases: plot plot.hlme plot.lcmm plot.multlcmm plot.Jointlcmm ### ** Examples ###################### fit, residuals and postprob # estimation of the model m<-lcmm(Y~Time*X1,mixture=~Time,random=~Time,classmb=~X2+X3, subject='ID',ng=2,data=data_hlme,B=c(0.41,0.55,-0.18,-0.41, -14.26,-0.34,1.33,13.51,24.65,2.98,1.18,26.26,0.97)) # fit plot(m,which="fit",marg=FALSE,var.time="Time",bty="n") # residuals plot plot(m) # postprob plot plot(m,which="postprob") ###################### fit, linkfunctions #### Estimation of homogeneous mixed models with different assumed link #### functions, a quadratic mean trajectory for the latent process with #### independent random intercept, slope and quadratic slope #### (comparison of linear, Beta and 3 and 5 splines link functions) ## Not run: ##D ##D # linear link function ##D m10<-lcmm(Ydep2~Time+I(Time^2),random=~Time+I(Time^2),subject='ID',ng=1, ##D data=data_lcmm,link="linear", ##D B=c(-0.7454, -0.2031, 0.2715, 0.2916 , 0.6114, -0.0064, 0.0545, ##D 0.0128, 25.3795, 2.2371)) ##D ##D # Beta link function ##D m11<-lcmm(Ydep2~Time+I(Time^2),random=~Time+I(Time^2),subject='ID',ng=1, ##D data=data_lcmm,link="beta",B=c(-0.9109, -0.0831, 0.5194, 0.1910 , ##D 0.8984, -0.0179, -0.0636, 0.0045, 0.5514, -0.7692, 0.7037, 0.0899)) ##D ##D # fit ##D par(mfrow=c(2,1),mar=c(4,4,1,1)) ##D plot(m11,which="fit",var.time="Time",bty="l",ylim=c(-3,0)) ##D plot(m11,which="fit",var.time="Time",marg=FALSE,bty="l",ylim=c(-3,0)) ##D ##D # I-splines with 3 equidistant nodes ##D m12<-lcmm(Ydep2~Time+I(Time^2),random=~Time+I(Time^2),subject='ID',ng=1, ##D data=data_lcmm,link="3-equi-splines",B=c(-0.9272, -0.0753 , 0.5304, ##D 0.1950, 0.9260, -0.0204, -0.0739 , 0.0059, -7.8369, 0.9228 ,-1.4689, ##D 2.0396, 1.8102)) ##D ##D # I-splines with 5 nodes, and interior nodes entered manually ##D m13<-lcmm(Ydep2~Time+I(Time^2),random=~Time+I(Time^2),subject='ID',ng=1, ##D data=data_lcmm,link="5-manual-splines",intnodes=c(10,20,25), ##D B=c(-0.9315, -0.0739 , 0.5254 , 0.1933, 0.9418, -0.0206, -0.0776, ##D 0.0064, -7.8645, 0.7470, 1.2080, 1.5537 , 1.7558 , 1.3386 , 1.0982)) ##D ##D # Plot of estimated different link functions: ##D # (applicable for models that only differ in the "link function" used. ##D # Otherwise, the latent process scale is different and a rescaling ##D # is necessary) ##D plot(m10,which="linkfunction",bty="l") ##D plot(m11,which="linkfunction",bty="l",add=TRUE,col=2) ##D plot(m12,which="linkfunction",bty="l",add=TRUE,col=3) ##D plot(m13,which="linkfunction",bty="l",add=TRUE,col=4) ##D legend("topleft",col=1:4,legend=c("linear","beta","3-Isplines","5-Isplines"),lty=1,bty='n') ## End(Not run) ###################### fit, baselinerisk and survival ## Not run: ##D #### estimation with 3 latent classes (ng=3) - see Jointlcmm ##D #### help for details on the model ##D m3 <- Jointlcmm(fixed= Ydep1~Time*X1,mixture=~Time,random=~Time, ##D classmb=~X3,subject='ID',survival = Surv(Tevent,Event)~ X1+mixture(X2), ##D hazard="3-quant-splines",hazardtype="PH",ng=3,data=data_lcmm, ##D B=c(0.7576, 0.4095, -0.8232, -0.2737, 0, 0, 0, 0.2838, -0.6338, ##D 2.6324, 5.3963, -0.0273, 1.3979, 0.8168, -15.041, 10.164, 10.2394, ##D 11.5109, -2.6219, -0.4553, -0.6055, 1.473, -0.0383, 0.8512, 0.0389, ##D 0.2624, 1.4982)) ##D ##D # fit ##D plot(m3,which="fit",var.time="Time",bty="l") ##D plot(m3,which="fit",var.time="Time",marg=FALSE,bty="l",ylim=c(0,15)) ##D ##D ##D # Class-specific predicted baseline risk & survival functions in the ##D # 3-class model retained (for the reference value of the covariates) ##D plot(m3,which="baselinerisk",bty="l") ##D plot(m3,which="baselinerisk",ylim=c(0,5),bty="l") ##D plot(m3,which="survival",bty="l") ## End(Not run)
mutation_count = mutation_simulator(gene_number = 20000,window_number=100,feature_prob = 0.2,base_rate = 2e-6,sample_size = 314, p_risk = 1, effect_size = 1.5) head mutation_count = mutation_simulator_v2(gene_number = 500,window_number=1000,feature_prob = 0.2,base_rate = 2e-6,sample_size = 314, p_risk = 0.3, effect_size = 3) # This function estimates the relative risk from simulated data generated by mutation_simulator_v2 estimate_effect_size_for_simulation_data_mixture_with_categorization<-function(data,feature_start = 5, feature_end = 6, feature_number = 2, prior_prob = 0.3){ #[data] is the returned value from function{mutation_simulator} #[feature_start] is the column number of the first feature in data$data #[feature_end] is the column number of the last feature in data$data #[feature_number] is the number of features #[prior_prob] is the prior probability of each gene being a risk gene. For simplicity, set to be equal for all genes, and is eual to the prior probability used in mutation_simulator_v2 # first get the real mutation rate by multiplying 2 and sample size. data$data$base_mut_rate = data$data$base_mut_rate * data$sample_size *2 partition_by_gene <- split(data$data, data$data$gene) # function to get effective information of each element of partition_by_gene # These information are those necessary to compute log-likelihood in the optimization function partition_feature <- function(pbg){ # input is one element of the list of partition_by_gene pbg_split <- split(pbg, pbg[,feature_start:feature_end],drop = TRUE) info_for_each_feature <- function(feature_set){ list(feature_vector = as.numeric(feature_set[1,feature_start:feature_end]), sum_mut_rate_count = sum(feature_set[,3]*log(feature_set[,4])), sum_mut_rate = sum(feature_set[,4]), sum_mut_count = sum(feature_set[,3]), log_fcount = sum(log(factorial(feature_set[,3])))) } sapply(pbg_split, info_for_each_feature,simplify = FALSE) } data_partition <- sapply(partition_by_gene, partition_feature, simplify = FALSE) fr<-function(x){ all_rr = x cal_logP_Zg1 <- function(data_partition_element){ cal_logP_Zg1_level2 <-function(data_partition_element_level2){ data_partition_element_level2[[2]]+data_partition_element_level2[[4]]*data_partition_element_level2[[1]]%*%all_rr-data_partition_element_level2[[3]]*exp(data_partition_element_level2[[1]]%*%all_rr)-data_partition_element_level2[[5]] } sum(sapply(data_partition_element, cal_logP_Zg1_level2)) } cal_logP_Zg0 <- function(data_partition_element){ cal_logP_Zg0_level2 <-function(data_partition_element_level2){ data_partition_element_level2[[2]]-data_partition_element_level2[[3]]-data_partition_element_level2[[5]] } sum(sapply(data_partition_element, cal_logP_Zg0_level2)) } logP_Zg1 = sapply(data_partition, cal_logP_Zg1) logP_Zg0 = sapply(data_partition, cal_logP_Zg0) sum(log((prior_prob*exp(logP_Zg1)+(1-prior_prob)*exp(logP_Zg0)))) } optimization_time <- system.time(mle <- optim(rep(0.1, feature_number), fr,control=list("fnscale"=-1), hessian = TRUE)) list(optimization_time = optimization_time,mle = mle) } # This function estimates the relative risk from simulated data generated by mutation_simulator_v2 estimate_effect_size_for_simulation_data_mixture_without_categorization<-function(data, feature_start = 5, feature_end = 6, feature_number = 2, prior_prob = 0.3){ #[data] is the returned value from function{mutation_simulator} #[feature_start] is the column number of the first feature in data$data #[feature_end] is the column number of the last feature in data$data #[feature_number] is the number of features #[prior_prob] is the prior probability of each gene being a risk gene. For simplicity, set to be equal for all genes, and is eual to the prior probability used in mutation_simulator_v2 # first get the real mutation rate by multiplying 2 and sample size. data$data$base_mut_rate = data$data$base_mut_rate * data$sample_size *2 fr<-function(x){ all_rr <- x logP_Zg1 <- by(data$data, data$data[,"gene"], function(x) sum(x$mut_count*(log(x$base_mut_rate)+(as.matrix(x[,feature_start:feature_end])%*%all_rr))-x$base_mut_rate*exp((as.matrix(x[,feature_start:feature_end])%*%all_rr))-log(factorial(x$mut_count)))) logP_Zg0 <- by(data$data, data$data[,"gene"], function(x) sum(x$mut_count*log(x$base_mut_rate)-x$base_mut_rate-log(factorial(x$mut_count)))) sum(log((prior_prob*exp(logP_Zg1)+(1-prior_prob)*exp(logP_Zg0)))) # minimization } optimization_time <- system.time(mle <- optim(rep(0.1, feature_number), fr,control=list("fnscale"=-1), hessian = TRUE)) list(optimization_time = optimization_time,mle = mle) }
/lib/170124_temp.R
no_license
liu-genomics/ASD
R
false
false
4,817
r
mutation_count = mutation_simulator(gene_number = 20000,window_number=100,feature_prob = 0.2,base_rate = 2e-6,sample_size = 314, p_risk = 1, effect_size = 1.5) head mutation_count = mutation_simulator_v2(gene_number = 500,window_number=1000,feature_prob = 0.2,base_rate = 2e-6,sample_size = 314, p_risk = 0.3, effect_size = 3) # This function estimates the relative risk from simulated data generated by mutation_simulator_v2 estimate_effect_size_for_simulation_data_mixture_with_categorization<-function(data,feature_start = 5, feature_end = 6, feature_number = 2, prior_prob = 0.3){ #[data] is the returned value from function{mutation_simulator} #[feature_start] is the column number of the first feature in data$data #[feature_end] is the column number of the last feature in data$data #[feature_number] is the number of features #[prior_prob] is the prior probability of each gene being a risk gene. For simplicity, set to be equal for all genes, and is eual to the prior probability used in mutation_simulator_v2 # first get the real mutation rate by multiplying 2 and sample size. data$data$base_mut_rate = data$data$base_mut_rate * data$sample_size *2 partition_by_gene <- split(data$data, data$data$gene) # function to get effective information of each element of partition_by_gene # These information are those necessary to compute log-likelihood in the optimization function partition_feature <- function(pbg){ # input is one element of the list of partition_by_gene pbg_split <- split(pbg, pbg[,feature_start:feature_end],drop = TRUE) info_for_each_feature <- function(feature_set){ list(feature_vector = as.numeric(feature_set[1,feature_start:feature_end]), sum_mut_rate_count = sum(feature_set[,3]*log(feature_set[,4])), sum_mut_rate = sum(feature_set[,4]), sum_mut_count = sum(feature_set[,3]), log_fcount = sum(log(factorial(feature_set[,3])))) } sapply(pbg_split, info_for_each_feature,simplify = FALSE) } data_partition <- sapply(partition_by_gene, partition_feature, simplify = FALSE) fr<-function(x){ all_rr = x cal_logP_Zg1 <- function(data_partition_element){ cal_logP_Zg1_level2 <-function(data_partition_element_level2){ data_partition_element_level2[[2]]+data_partition_element_level2[[4]]*data_partition_element_level2[[1]]%*%all_rr-data_partition_element_level2[[3]]*exp(data_partition_element_level2[[1]]%*%all_rr)-data_partition_element_level2[[5]] } sum(sapply(data_partition_element, cal_logP_Zg1_level2)) } cal_logP_Zg0 <- function(data_partition_element){ cal_logP_Zg0_level2 <-function(data_partition_element_level2){ data_partition_element_level2[[2]]-data_partition_element_level2[[3]]-data_partition_element_level2[[5]] } sum(sapply(data_partition_element, cal_logP_Zg0_level2)) } logP_Zg1 = sapply(data_partition, cal_logP_Zg1) logP_Zg0 = sapply(data_partition, cal_logP_Zg0) sum(log((prior_prob*exp(logP_Zg1)+(1-prior_prob)*exp(logP_Zg0)))) } optimization_time <- system.time(mle <- optim(rep(0.1, feature_number), fr,control=list("fnscale"=-1), hessian = TRUE)) list(optimization_time = optimization_time,mle = mle) } # This function estimates the relative risk from simulated data generated by mutation_simulator_v2 estimate_effect_size_for_simulation_data_mixture_without_categorization<-function(data, feature_start = 5, feature_end = 6, feature_number = 2, prior_prob = 0.3){ #[data] is the returned value from function{mutation_simulator} #[feature_start] is the column number of the first feature in data$data #[feature_end] is the column number of the last feature in data$data #[feature_number] is the number of features #[prior_prob] is the prior probability of each gene being a risk gene. For simplicity, set to be equal for all genes, and is eual to the prior probability used in mutation_simulator_v2 # first get the real mutation rate by multiplying 2 and sample size. data$data$base_mut_rate = data$data$base_mut_rate * data$sample_size *2 fr<-function(x){ all_rr <- x logP_Zg1 <- by(data$data, data$data[,"gene"], function(x) sum(x$mut_count*(log(x$base_mut_rate)+(as.matrix(x[,feature_start:feature_end])%*%all_rr))-x$base_mut_rate*exp((as.matrix(x[,feature_start:feature_end])%*%all_rr))-log(factorial(x$mut_count)))) logP_Zg0 <- by(data$data, data$data[,"gene"], function(x) sum(x$mut_count*log(x$base_mut_rate)-x$base_mut_rate-log(factorial(x$mut_count)))) sum(log((prior_prob*exp(logP_Zg1)+(1-prior_prob)*exp(logP_Zg0)))) # minimization } optimization_time <- system.time(mle <- optim(rep(0.1, feature_number), fr,control=list("fnscale"=-1), hessian = TRUE)) list(optimization_time = optimization_time,mle = mle) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/varPercent.R \name{varPercent} \alias{varPercent} \title{Calculate percent variance of eigenvalues for plot_shiny.mfpca()} \usage{ varPercent(level, plotObj) } \arguments{ \item{level}{numeric, 1 or 2 for levels 1 or 2, respectively, 12 to calculate total variance.} \item{plotObj}{the mfpca object plotted in the plot_shiny.mfpca() function.} } \value{ a list of numbers that indicate percent variance for selected level. } \description{ Internal method that calculates percent variance of eigenvalues for specified level (1, 2, or total) for plot_shiny.mfpca(). The desired level is passed in as an argument (level = 12 for total) and a list of percent variances is returned. } \author{ Julia Wrobel \email{julia.wrobel@cuanschutz.edu} }
/man/varPercent.Rd
no_license
refunders/refund.shiny
R
false
true
819
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/varPercent.R \name{varPercent} \alias{varPercent} \title{Calculate percent variance of eigenvalues for plot_shiny.mfpca()} \usage{ varPercent(level, plotObj) } \arguments{ \item{level}{numeric, 1 or 2 for levels 1 or 2, respectively, 12 to calculate total variance.} \item{plotObj}{the mfpca object plotted in the plot_shiny.mfpca() function.} } \value{ a list of numbers that indicate percent variance for selected level. } \description{ Internal method that calculates percent variance of eigenvalues for specified level (1, 2, or total) for plot_shiny.mfpca(). The desired level is passed in as an argument (level = 12 for total) and a list of percent variances is returned. } \author{ Julia Wrobel \email{julia.wrobel@cuanschutz.edu} }
library(tidyverse) library(latex2exp) library(gridExtra) setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) dmse <- function(y, y.hat) -2*sum(y-y.hat) ddmse <- function(y, y.hat) 2*length(y) loss_gb_sum <- function(g,h,w) sum(g*w + 0.5*h*w^2) set.seed(12345) B <- 100000 n <- 1000 y <- rnorm(n) x <- sort(runif(n)) w1 <- mean(y) tr_loss_1 <- mean((y-w1)^2) tr_loss <- numeric(n-1) te_loss <- numeric(n-1) tr_R <- numeric(n-1) te_R <- numeric(n-1) # g, h g <- sapply(y, dmse, y.hat=w1) h <- sapply(y, ddmse, y.hat=w1) pb <- txtProgressBar(min=0,max=n-1, style=3) for(i in 1:(n-1)) { # ind Il <- (x <= x[i]) # w_l, w_r Gl <- sum(g[Il]) Gr <- sum(g[!Il]) Hl <- sum(h[Il]) Hr <- sum(h[!Il]) wl <- -Gl/Hl wr <- -Gr/Hr # train loss tr_loss[i] <- -1/n * 0.5 * ( Gl*Gl/Hl + Gr*Gr/Hr ) # Reduction tr_R[i] <- -1/n*0.5*(Gl+Gr)^2/(Hl+Hr) - tr_loss[i] # monte-carlo test loss te_loss_b <- numeric(B) te_R_b <- numeric(B) for(b in 1:B) { # test data yte <- rnorm(n) xte <- sort(runif(n)) # derivatives #gte <- sapply(yte, dmse, y.hat=w1) #hte <- sapply(yte, ddmse, y.hat=w1) # evaluate mse Ilte <- (xte <= x[i]) #score_l <- loss_gb_sum(gte[Ilte], hte[Ilte], wl) #score_r <- loss_gb_sum(gte[!Ilte], hte[!Ilte], wr) score_l <- sum((yte[Ilte] - (w1 + wl))^2) score_r <- sum((yte[!Ilte] - (w1 + wr))^2) te_loss_b[b] <- (score_l + score_r)/n # Reduction #te_R_b[b] <- -0.5/n*(sum(gte)^2)/sum(hte) - te_loss_b[b] te_R_b[b] <- mean((yte-w1)^2) - te_loss_b[b] } te_loss[i] <- mean(te_loss_b) te_R[i] <- mean(te_R_b) setTxtProgressBar(pb,i) } close(pb) plot(tr_loss_1 + tr_loss, type="l", ylim=range(c(tr_loss_1 + tr_loss,te_loss))) points(te_loss, type="l") plot(tr_R, type="l", ylim=range(c(tr_R, te_R))) points(te_R, type="l") # optimism delta <- 1/n u <- delta * (1:(n-1)) plot(u,te_loss - tr_loss, type="l") plot(u, tr_R - te_R, type="l") eps <- 1e-9 tau <- 0.5*log(u*(1-eps)/(eps*(1-u))) plot(tau, n/4*(te_loss - (tr_loss_1 + tr_loss)), type="l") mean(n/4*(te_loss - (tr_loss_1 + tr_loss))) plot(tau, n*(tr_R - te_R), type="l") plot(tau, n/mean((g+h*0)^2)*(tr_R - te_R), type="l") mean(n/mean(g^2)*(tr_R-te_R)) # df for plotting df <- data.frame(u=u, tau=tau, tr_l = tr_loss_1+tr_loss, te_l = te_loss, tr_R = tr_R, te_R = te_R, l0 = 1, C=4, n=n) if(F) { save(df, file="results/split_to_cir_mc.RData") load("results/split_to_cir_mc.RData") } p1 <- df %>% ggplot() + geom_line(aes(u, tr_l), colour="black", alpha=0.7, size=0.4, linetype = "solid") + geom_line(aes(u, te_l), colour="#56B4E9", alpha=0.8, size=0.4, linetype="solid") + geom_hline(yintercept = unique(df$l0), linetype="longdash", colour="#009E73", size=1) + #geom_text(data=data.frame( x = 0.4, y= unique(df$l0)), map=aes(x=x, y=y), label = "Asymptotic value", vjust=-1) + ylab("Loss") + xlab(TeX('$ u = \\frac{i}{n}')) + #xlab(TeX('$ u = p(x_j \\leq s ) $')) + ggtitle("Loss profiling") + theme_minimal() p1 p2 <- df %>% ggplot() + geom_line(aes(u, n/C*(te_l-tr_l)), colour="black", alpha=0.7, size=0.4, linetype="solid") + ylab("L.h. side of (22)") + #ylab(TeX('$ \\frac{n}{\\hat{C}_{stump}} (E_0\\[l_{te}\\]-l_{tr}) $')) + xlab(TeX('$ u = \\frac{i}{n}')) + ggtitle("Loss optimism") + theme_minimal() p2 p3 <- df %>% ggplot() + geom_line(aes(tau, n/C*(te_l-tr_l)), colour="black", alpha=0.7, size=0.4, linetype="solid") + ylab(TeX('$ 1+S(\\tau) $')) + xlab(TeX('$ \\tau = \\frac{1}{2}\\log \\frac{u(1-\\epsilon)}{\\epsilon (1-u)} $')) + #ylab(TeX('$\\alpha x^\\alpha$, where $\\alpha \\in 1\\ldots 5$')) + ggtitle("Time transform to CIR") + #ylim(0,max(0.5*s)) + theme_minimal() p3 grid.arrange(p1,p2,p3, ncol=3) if(F) { #setwd("~/Projects/Github repositories/gbtree_information/figures") #pdf("split_seq_to_cir3.pdf", width=8, height=3.5, paper="special") grid.arrange(p1,p2,p3, ncol=3) #gridExtra::grid.arrange(p1,p2,p3,p4,p5,p6, ncol=3) #dev.off() }
/article-supplementary-material/lunde2020information/Chapter3/C3_F2_loss_split_profiling.R
permissive
Barardo/agtboost
R
false
false
4,361
r
library(tidyverse) library(latex2exp) library(gridExtra) setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) dmse <- function(y, y.hat) -2*sum(y-y.hat) ddmse <- function(y, y.hat) 2*length(y) loss_gb_sum <- function(g,h,w) sum(g*w + 0.5*h*w^2) set.seed(12345) B <- 100000 n <- 1000 y <- rnorm(n) x <- sort(runif(n)) w1 <- mean(y) tr_loss_1 <- mean((y-w1)^2) tr_loss <- numeric(n-1) te_loss <- numeric(n-1) tr_R <- numeric(n-1) te_R <- numeric(n-1) # g, h g <- sapply(y, dmse, y.hat=w1) h <- sapply(y, ddmse, y.hat=w1) pb <- txtProgressBar(min=0,max=n-1, style=3) for(i in 1:(n-1)) { # ind Il <- (x <= x[i]) # w_l, w_r Gl <- sum(g[Il]) Gr <- sum(g[!Il]) Hl <- sum(h[Il]) Hr <- sum(h[!Il]) wl <- -Gl/Hl wr <- -Gr/Hr # train loss tr_loss[i] <- -1/n * 0.5 * ( Gl*Gl/Hl + Gr*Gr/Hr ) # Reduction tr_R[i] <- -1/n*0.5*(Gl+Gr)^2/(Hl+Hr) - tr_loss[i] # monte-carlo test loss te_loss_b <- numeric(B) te_R_b <- numeric(B) for(b in 1:B) { # test data yte <- rnorm(n) xte <- sort(runif(n)) # derivatives #gte <- sapply(yte, dmse, y.hat=w1) #hte <- sapply(yte, ddmse, y.hat=w1) # evaluate mse Ilte <- (xte <= x[i]) #score_l <- loss_gb_sum(gte[Ilte], hte[Ilte], wl) #score_r <- loss_gb_sum(gte[!Ilte], hte[!Ilte], wr) score_l <- sum((yte[Ilte] - (w1 + wl))^2) score_r <- sum((yte[!Ilte] - (w1 + wr))^2) te_loss_b[b] <- (score_l + score_r)/n # Reduction #te_R_b[b] <- -0.5/n*(sum(gte)^2)/sum(hte) - te_loss_b[b] te_R_b[b] <- mean((yte-w1)^2) - te_loss_b[b] } te_loss[i] <- mean(te_loss_b) te_R[i] <- mean(te_R_b) setTxtProgressBar(pb,i) } close(pb) plot(tr_loss_1 + tr_loss, type="l", ylim=range(c(tr_loss_1 + tr_loss,te_loss))) points(te_loss, type="l") plot(tr_R, type="l", ylim=range(c(tr_R, te_R))) points(te_R, type="l") # optimism delta <- 1/n u <- delta * (1:(n-1)) plot(u,te_loss - tr_loss, type="l") plot(u, tr_R - te_R, type="l") eps <- 1e-9 tau <- 0.5*log(u*(1-eps)/(eps*(1-u))) plot(tau, n/4*(te_loss - (tr_loss_1 + tr_loss)), type="l") mean(n/4*(te_loss - (tr_loss_1 + tr_loss))) plot(tau, n*(tr_R - te_R), type="l") plot(tau, n/mean((g+h*0)^2)*(tr_R - te_R), type="l") mean(n/mean(g^2)*(tr_R-te_R)) # df for plotting df <- data.frame(u=u, tau=tau, tr_l = tr_loss_1+tr_loss, te_l = te_loss, tr_R = tr_R, te_R = te_R, l0 = 1, C=4, n=n) if(F) { save(df, file="results/split_to_cir_mc.RData") load("results/split_to_cir_mc.RData") } p1 <- df %>% ggplot() + geom_line(aes(u, tr_l), colour="black", alpha=0.7, size=0.4, linetype = "solid") + geom_line(aes(u, te_l), colour="#56B4E9", alpha=0.8, size=0.4, linetype="solid") + geom_hline(yintercept = unique(df$l0), linetype="longdash", colour="#009E73", size=1) + #geom_text(data=data.frame( x = 0.4, y= unique(df$l0)), map=aes(x=x, y=y), label = "Asymptotic value", vjust=-1) + ylab("Loss") + xlab(TeX('$ u = \\frac{i}{n}')) + #xlab(TeX('$ u = p(x_j \\leq s ) $')) + ggtitle("Loss profiling") + theme_minimal() p1 p2 <- df %>% ggplot() + geom_line(aes(u, n/C*(te_l-tr_l)), colour="black", alpha=0.7, size=0.4, linetype="solid") + ylab("L.h. side of (22)") + #ylab(TeX('$ \\frac{n}{\\hat{C}_{stump}} (E_0\\[l_{te}\\]-l_{tr}) $')) + xlab(TeX('$ u = \\frac{i}{n}')) + ggtitle("Loss optimism") + theme_minimal() p2 p3 <- df %>% ggplot() + geom_line(aes(tau, n/C*(te_l-tr_l)), colour="black", alpha=0.7, size=0.4, linetype="solid") + ylab(TeX('$ 1+S(\\tau) $')) + xlab(TeX('$ \\tau = \\frac{1}{2}\\log \\frac{u(1-\\epsilon)}{\\epsilon (1-u)} $')) + #ylab(TeX('$\\alpha x^\\alpha$, where $\\alpha \\in 1\\ldots 5$')) + ggtitle("Time transform to CIR") + #ylim(0,max(0.5*s)) + theme_minimal() p3 grid.arrange(p1,p2,p3, ncol=3) if(F) { #setwd("~/Projects/Github repositories/gbtree_information/figures") #pdf("split_seq_to_cir3.pdf", width=8, height=3.5, paper="special") grid.arrange(p1,p2,p3, ncol=3) #gridExtra::grid.arrange(p1,p2,p3,p4,p5,p6, ncol=3) #dev.off() }
###### Initialize the operating model ###### library(TMB) library(dplyr) library(reshape2) library(ggplot2) library(r4ss) source('load_files_OM.R') seedz <- 125 set.seed(seedz) assessment <- read.csv('data/assessment_MLE.csv') assessment <- assessment[assessment$year > 1965,] # Get the stock assessment output from SS3 mod <- SS_output(paste(getwd(),'/data/', sep =''), printstats=FALSE, verbose = FALSE) plot.figures = FALSE # Set true for printing to file df <- load_data_seasons(nseason = 4, nspace = 2, bfuture = 0.5, movemaxinit = 0.5, movefiftyinit =8) # Prepare data for operating model simyears <- 25 # Project 30 years into the future (2048 that year) year.future <- c(df$years,(df$years[length(df$years)]+1):(df$years[length(df$years)]+simyears)) N0 <- NA sim.data <- run.agebased.true.catch(df) # Plot the biomass in ggplot df.plot <- data.frame(years = c(df$years,assessment$year), SSB = c(rowSums(sim.data$SSB.weight),assessment$SSB), source = c(rep('SSB OM', length(df$years)), rep('SSB assessment', length(assessment$year)))) p1 <- ggplot(data = df.plot, aes(x = years, y = SSB, color = source))+geom_line(size = 2)+theme_classic() p1 survey.ss <- data.frame(years = mod$cpue$Yr, survey =mod$cpue$Exp, source = 'SS', survsd = NA, kriegsd = NA) df.plot <- data.frame(years = rep(df$years[df$survey > 1],2), survey = c(df$survey[df$survey > 1],sim.data$survey[sim.data$survey > 1]), source = rep(c('Survey data','OM output'), each = length(df$years[df$survey > 1])), survsd= c(df$survey_err[df$flag_survey ==1], rep(NA,length(df$years[df$survey > 1]))), kriegsd = c(rep(exp(df$parms$logSDsurv),length(df$years[df$survey > 1])), rep(NA,length(df$years[df$survey > 1]))) ) df.plot <- rbind(df.plot,survey.ss) df.plot$survsd <- sqrt(df.plot$survey^2*exp(df.plot$survsd+df.plot$kriegsd-1)) p2 <- ggplot(data = df.plot, aes(x = years, y = survey/1e6, color = source))+ geom_point(data = df.plot[df.plot$source == 'Survey data',],size = 3)+ geom_line(data = df.plot[df.plot$source == 'OM output',], size =2)+ geom_line(data = df.plot[df.plot$source == 'SS',], size = 2)+ theme_classic()+ geom_errorbar(aes(ymin=(survey-survsd)/1e6, ymax=(survey+survsd)/1e6))+ scale_y_continuous(limit = c(0,5), name = 'survey biomass (million t)')+ scale_x_continuous(name = 'year') p2 source('calcMeanAge.R') age.comps <- sim.data$age_comps_OM[,1:df$nyear,,3] age.comps <- apply(age.comps,c(1,2),sum)/2 am <- calcMeanAge(age.comps,df$nage) age.comps.can <- sim.data$age_comps_OM[,1:df$nyear,1,3] am.can <-calcMeanAge(age.comps.can, df$nage) age.comps.US <- sim.data$age_comps_OM[,1:df$nyear,2,3] am.US <- calcMeanAge(age.comps.US, df$nage) plot(df$years,am, type ='l', lwd = 2, ylab = 'average age') lines(df$years,am.can, type ='l', col = 'red', lwd = 2) lines(df$years,am.US, type = 'l', col = 'blue', lwd = 2) # Put it in a data frame for ggplot df.am <- data.frame(year = rep(df$years,3), am = c(am,am.can,am.US), Country = rep(c('All','Can','US'), each = length(df$years))) age.catch <- sim.data$CatchN.save.age age.catch.all <- apply(age.catch,c(1,2),mean) am.catch <- matrix(NA, df$nyear) for(i in 1:(df$nyear-1)){ ac.tmp <- rep(NA,15) ac.tmp[1:14] <- age.catch.all[2:15,i]/sum(age.catch.all[,i]) ac.tmp[15] <- sum(age.catch.all[16:df$nage,i])/sum(age.catch.all[,i]) am.catch[i] <- sum(df$age[2:16]*ac.tmp) } age.catch.can <- apply(sim.data$CatchN.save.age[,,1,],c(1,2),mean) am.catch.can <- matrix(NA, df$nyear) for(i in 1:(df$nyear-1)){ ac.tmp <- rep(NA,15) ac.tmp[1:14] <- age.catch.can[2:15,i]/sum(age.catch.can[,i]) ac.tmp[15] <- sum(age.catch.can[16:df$nage,i])/sum(age.catch.can[,i]) am.catch.can[i] <- sum(df$age[2:16]*ac.tmp) } age.catch.US <- apply(sim.data$CatchN.save.age[,,2,],c(1,2),mean) am.catch.US <- matrix(NA, df$nyear) for(i in 1:(df$nyear-1)){ ac.tmp <- rep(NA,15) ac.tmp[1:14] <- age.catch.US[2:15,i]/sum(age.catch.US[,i]) ac.tmp[15] <- sum(age.catch.US[16:df$nage,i])/sum(age.catch.US[,i]) am.catch.US[i] <- sum(df$age[2:16]*ac.tmp) } df.am.catch <- data.frame(year = rep(df$years,3), am = c(am.catch,am.catch.can,am.catch.US), Country = rep(c('All','Can','US'), each = length(df$years))) p3 <- ggplot(df.am.catch, aes(x = year, y = am, color = Country))+geom_line(size = 1)+ theme_classic()+ scale_y_continuous(name = 'Average age in catch', limit = c(2,10))+scale_x_continuous() p3 # png('Age_comps_catch.png', width = 16, height = 12, res = 400,units = 'cm') # p3 # dev.off() ## See thcoe catch per country c.country <- read.csv('data/catch_per_country.csv') cps <- read.csv('data/catch_per_sector.csv') ## Calculate cps.s <- melt(cps, id.vars = c('year','nfish','nhauls','Country','Catch'), measure.vars = rep(paste('X',1:15, sep =''))) # Omit fleet from this df cps.s <- cps.s %>% group_by(year,Country, variable) %>% summarise(agecomp =weighted.mean(value,Catch)) # Make ages numbers rather than factors ages <- as.numeric(unlist(strsplit(as.character(cps.s$variable), split = "X"))) ages <- ages[is.na(ages) == 0] cps.s$age <- ages cps.am <- cps.s %>% group_by(year,Country) %>% summarise(am = sum((agecomp/100)*age)) cps.am.all <- matrix(NA, df$nyear) for(i in 1:df$nyear){ cps.am.all[i] <- sum(df$age[2:16]*df$age_catch[,i]) } df.am.all <- data.frame(year = df$years, Country = 'All',am = cps.am.all) p1 <- ggplot(cps.am, aes(x= year, y= am, color = Country, group = Country))+geom_line()+geom_point()+theme_classic()+ geom_line(data = df.am.catch, linetype = 2)+ scale_x_continuous(limit = c(1990,2017))+scale_y_continuous(limit = c(2.5,10),name = 'average age in catch')+ geom_line(data = df.am.all, col = 'red')+geom_point(data=df.am.all, col ='red') #png('age_comps.png', width = 16, height = 12, res = 400,units = 'cm') p1 #dev.off() # Is my weighted calculation correct? cps.all<- melt(cps, id.vars = c('year','nfish','nhauls','Country','Catch'),measure.vars = rep(paste('X',1:15, sep =''))) # Omit fleet from this df cps.all <- cps.all %>% group_by(year, variable) %>% summarise(agecomp =weighted.mean(value,Catch)) # Make ages numbers rather than factors ages <- as.numeric(unlist(strsplit(as.character(cps.all$variable), split = "X"))) ages <- ages[is.na(ages) == 0] cps.all$age <- ages cps.all.s <- cps.all %>% group_by(year) %>% summarise(am = sum((agecomp/100)*age))
/Spatial MSE/runOM_condtion.R
no_license
kristinmarshall-NOAA/PacifichakeMSE
R
false
false
6,771
r
###### Initialize the operating model ###### library(TMB) library(dplyr) library(reshape2) library(ggplot2) library(r4ss) source('load_files_OM.R') seedz <- 125 set.seed(seedz) assessment <- read.csv('data/assessment_MLE.csv') assessment <- assessment[assessment$year > 1965,] # Get the stock assessment output from SS3 mod <- SS_output(paste(getwd(),'/data/', sep =''), printstats=FALSE, verbose = FALSE) plot.figures = FALSE # Set true for printing to file df <- load_data_seasons(nseason = 4, nspace = 2, bfuture = 0.5, movemaxinit = 0.5, movefiftyinit =8) # Prepare data for operating model simyears <- 25 # Project 30 years into the future (2048 that year) year.future <- c(df$years,(df$years[length(df$years)]+1):(df$years[length(df$years)]+simyears)) N0 <- NA sim.data <- run.agebased.true.catch(df) # Plot the biomass in ggplot df.plot <- data.frame(years = c(df$years,assessment$year), SSB = c(rowSums(sim.data$SSB.weight),assessment$SSB), source = c(rep('SSB OM', length(df$years)), rep('SSB assessment', length(assessment$year)))) p1 <- ggplot(data = df.plot, aes(x = years, y = SSB, color = source))+geom_line(size = 2)+theme_classic() p1 survey.ss <- data.frame(years = mod$cpue$Yr, survey =mod$cpue$Exp, source = 'SS', survsd = NA, kriegsd = NA) df.plot <- data.frame(years = rep(df$years[df$survey > 1],2), survey = c(df$survey[df$survey > 1],sim.data$survey[sim.data$survey > 1]), source = rep(c('Survey data','OM output'), each = length(df$years[df$survey > 1])), survsd= c(df$survey_err[df$flag_survey ==1], rep(NA,length(df$years[df$survey > 1]))), kriegsd = c(rep(exp(df$parms$logSDsurv),length(df$years[df$survey > 1])), rep(NA,length(df$years[df$survey > 1]))) ) df.plot <- rbind(df.plot,survey.ss) df.plot$survsd <- sqrt(df.plot$survey^2*exp(df.plot$survsd+df.plot$kriegsd-1)) p2 <- ggplot(data = df.plot, aes(x = years, y = survey/1e6, color = source))+ geom_point(data = df.plot[df.plot$source == 'Survey data',],size = 3)+ geom_line(data = df.plot[df.plot$source == 'OM output',], size =2)+ geom_line(data = df.plot[df.plot$source == 'SS',], size = 2)+ theme_classic()+ geom_errorbar(aes(ymin=(survey-survsd)/1e6, ymax=(survey+survsd)/1e6))+ scale_y_continuous(limit = c(0,5), name = 'survey biomass (million t)')+ scale_x_continuous(name = 'year') p2 source('calcMeanAge.R') age.comps <- sim.data$age_comps_OM[,1:df$nyear,,3] age.comps <- apply(age.comps,c(1,2),sum)/2 am <- calcMeanAge(age.comps,df$nage) age.comps.can <- sim.data$age_comps_OM[,1:df$nyear,1,3] am.can <-calcMeanAge(age.comps.can, df$nage) age.comps.US <- sim.data$age_comps_OM[,1:df$nyear,2,3] am.US <- calcMeanAge(age.comps.US, df$nage) plot(df$years,am, type ='l', lwd = 2, ylab = 'average age') lines(df$years,am.can, type ='l', col = 'red', lwd = 2) lines(df$years,am.US, type = 'l', col = 'blue', lwd = 2) # Put it in a data frame for ggplot df.am <- data.frame(year = rep(df$years,3), am = c(am,am.can,am.US), Country = rep(c('All','Can','US'), each = length(df$years))) age.catch <- sim.data$CatchN.save.age age.catch.all <- apply(age.catch,c(1,2),mean) am.catch <- matrix(NA, df$nyear) for(i in 1:(df$nyear-1)){ ac.tmp <- rep(NA,15) ac.tmp[1:14] <- age.catch.all[2:15,i]/sum(age.catch.all[,i]) ac.tmp[15] <- sum(age.catch.all[16:df$nage,i])/sum(age.catch.all[,i]) am.catch[i] <- sum(df$age[2:16]*ac.tmp) } age.catch.can <- apply(sim.data$CatchN.save.age[,,1,],c(1,2),mean) am.catch.can <- matrix(NA, df$nyear) for(i in 1:(df$nyear-1)){ ac.tmp <- rep(NA,15) ac.tmp[1:14] <- age.catch.can[2:15,i]/sum(age.catch.can[,i]) ac.tmp[15] <- sum(age.catch.can[16:df$nage,i])/sum(age.catch.can[,i]) am.catch.can[i] <- sum(df$age[2:16]*ac.tmp) } age.catch.US <- apply(sim.data$CatchN.save.age[,,2,],c(1,2),mean) am.catch.US <- matrix(NA, df$nyear) for(i in 1:(df$nyear-1)){ ac.tmp <- rep(NA,15) ac.tmp[1:14] <- age.catch.US[2:15,i]/sum(age.catch.US[,i]) ac.tmp[15] <- sum(age.catch.US[16:df$nage,i])/sum(age.catch.US[,i]) am.catch.US[i] <- sum(df$age[2:16]*ac.tmp) } df.am.catch <- data.frame(year = rep(df$years,3), am = c(am.catch,am.catch.can,am.catch.US), Country = rep(c('All','Can','US'), each = length(df$years))) p3 <- ggplot(df.am.catch, aes(x = year, y = am, color = Country))+geom_line(size = 1)+ theme_classic()+ scale_y_continuous(name = 'Average age in catch', limit = c(2,10))+scale_x_continuous() p3 # png('Age_comps_catch.png', width = 16, height = 12, res = 400,units = 'cm') # p3 # dev.off() ## See thcoe catch per country c.country <- read.csv('data/catch_per_country.csv') cps <- read.csv('data/catch_per_sector.csv') ## Calculate cps.s <- melt(cps, id.vars = c('year','nfish','nhauls','Country','Catch'), measure.vars = rep(paste('X',1:15, sep =''))) # Omit fleet from this df cps.s <- cps.s %>% group_by(year,Country, variable) %>% summarise(agecomp =weighted.mean(value,Catch)) # Make ages numbers rather than factors ages <- as.numeric(unlist(strsplit(as.character(cps.s$variable), split = "X"))) ages <- ages[is.na(ages) == 0] cps.s$age <- ages cps.am <- cps.s %>% group_by(year,Country) %>% summarise(am = sum((agecomp/100)*age)) cps.am.all <- matrix(NA, df$nyear) for(i in 1:df$nyear){ cps.am.all[i] <- sum(df$age[2:16]*df$age_catch[,i]) } df.am.all <- data.frame(year = df$years, Country = 'All',am = cps.am.all) p1 <- ggplot(cps.am, aes(x= year, y= am, color = Country, group = Country))+geom_line()+geom_point()+theme_classic()+ geom_line(data = df.am.catch, linetype = 2)+ scale_x_continuous(limit = c(1990,2017))+scale_y_continuous(limit = c(2.5,10),name = 'average age in catch')+ geom_line(data = df.am.all, col = 'red')+geom_point(data=df.am.all, col ='red') #png('age_comps.png', width = 16, height = 12, res = 400,units = 'cm') p1 #dev.off() # Is my weighted calculation correct? cps.all<- melt(cps, id.vars = c('year','nfish','nhauls','Country','Catch'),measure.vars = rep(paste('X',1:15, sep =''))) # Omit fleet from this df cps.all <- cps.all %>% group_by(year, variable) %>% summarise(agecomp =weighted.mean(value,Catch)) # Make ages numbers rather than factors ages <- as.numeric(unlist(strsplit(as.character(cps.all$variable), split = "X"))) ages <- ages[is.na(ages) == 0] cps.all$age <- ages cps.all.s <- cps.all %>% group_by(year) %>% summarise(am = sum((agecomp/100)*age))
\name{horizon} \alias{horizon} \title{Distance to the horizon} \description{ Empirical function to compute the distance to the horizon from a given altitude. The earth is assumed to be smooth, i.e. mountains and other obstacles are ignored. } \usage{ horizon(h, r=6378137) } \arguments{ \item{h}{altitude, numeric >= 0. Should have the same unit as r} \item{r}{radius of the earth; default value is 6378137 m} } \value{ Distance in units of \code{h} (default is meters) } \references{ \url{https://www.edwilliams.org/avform147.htm#Horizon} Bowditch, 1995. American Practical Navigator. Table 12. } \author{ Robert J. Hijmans } \examples{ horizon(1.80) # me horizon(324) # Eiffel tower } \keyword{ spatial }
/man/horizon.Rd
no_license
cran/geosphere
R
false
false
765
rd
\name{horizon} \alias{horizon} \title{Distance to the horizon} \description{ Empirical function to compute the distance to the horizon from a given altitude. The earth is assumed to be smooth, i.e. mountains and other obstacles are ignored. } \usage{ horizon(h, r=6378137) } \arguments{ \item{h}{altitude, numeric >= 0. Should have the same unit as r} \item{r}{radius of the earth; default value is 6378137 m} } \value{ Distance in units of \code{h} (default is meters) } \references{ \url{https://www.edwilliams.org/avform147.htm#Horizon} Bowditch, 1995. American Practical Navigator. Table 12. } \author{ Robert J. Hijmans } \examples{ horizon(1.80) # me horizon(324) # Eiffel tower } \keyword{ spatial }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/effect_size.R \name{effect_size} \alias{effect_size} \title{Calculate effect sizes in a model} \usage{ effect_size(model, formula, step = NULL, bootstrap = FALSE, to = step, data = NULL, at = NULL, ...) } \arguments{ \item{model}{the model from which the effect size is to be calculated} \item{formula}{a formula whose right-hand side is the variable with respect to which the effect size is to be calculated.} \item{step}{the numerical stepsize for the change var, or a comparison category for a categorical change var. This will be either a character string or a number, depending on the type of variable specified in the formula.} \item{bootstrap}{If \code{TRUE}, calculate a standard error using bootstrapping. Alternatively, you can specify the number of bootstrap replications (default:100).} \item{to}{a synonym for step. (In English, "to" is more appropriate for a categorical input, "step" for a quantitative. But you can use either.)} \item{data}{Specifies exactly the cases at which you want to calculate the effect size. Unlike \code{...} or \code{at}, no new combinations will be created.} \item{at}{similar to \code{...} but expects a list or dataframe of the values you want to set. Like \code{...}, all combinations of the values specified will be used as inputs.} \item{...}{additional arguments for evaluation levels of explanatory variables or to be passed to \code{predict()}. For instance, for a glm, perhaps you want \code{type = "response"}.} } \description{ Like a derivative or finite-difference } \details{ When you want to force or restrict the effect size calculation to specific values for explanatory variables, list those variables and levels as a vector in ... For example, \code{educ = c(10, 12, 16)} will cause the effect size to be calculated at each of those three levels of education. Any variables whose levels are not specified in ... will have values selected automatically. } \examples{ mod1 <- lm(wage ~ age * sex * educ + sector, data = mosaicData::CPS85) effect_size(mod1, ~ sex) effect_size(mod1, ~ sector) effect_size(mod1, ~ age, sex = "M", educ = c(10, 12, 16), age = c(30, 40)) effect_size(mod1, ~ age, sex = "F", age = 34, step = 1) effect_size(mod1, ~ sex, age = 35, sex = "M", to = "F" ) }
/man/effect_size.Rd
no_license
rpruim/statisticalModeling
R
false
true
2,335
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/effect_size.R \name{effect_size} \alias{effect_size} \title{Calculate effect sizes in a model} \usage{ effect_size(model, formula, step = NULL, bootstrap = FALSE, to = step, data = NULL, at = NULL, ...) } \arguments{ \item{model}{the model from which the effect size is to be calculated} \item{formula}{a formula whose right-hand side is the variable with respect to which the effect size is to be calculated.} \item{step}{the numerical stepsize for the change var, or a comparison category for a categorical change var. This will be either a character string or a number, depending on the type of variable specified in the formula.} \item{bootstrap}{If \code{TRUE}, calculate a standard error using bootstrapping. Alternatively, you can specify the number of bootstrap replications (default:100).} \item{to}{a synonym for step. (In English, "to" is more appropriate for a categorical input, "step" for a quantitative. But you can use either.)} \item{data}{Specifies exactly the cases at which you want to calculate the effect size. Unlike \code{...} or \code{at}, no new combinations will be created.} \item{at}{similar to \code{...} but expects a list or dataframe of the values you want to set. Like \code{...}, all combinations of the values specified will be used as inputs.} \item{...}{additional arguments for evaluation levels of explanatory variables or to be passed to \code{predict()}. For instance, for a glm, perhaps you want \code{type = "response"}.} } \description{ Like a derivative or finite-difference } \details{ When you want to force or restrict the effect size calculation to specific values for explanatory variables, list those variables and levels as a vector in ... For example, \code{educ = c(10, 12, 16)} will cause the effect size to be calculated at each of those three levels of education. Any variables whose levels are not specified in ... will have values selected automatically. } \examples{ mod1 <- lm(wage ~ age * sex * educ + sector, data = mosaicData::CPS85) effect_size(mod1, ~ sex) effect_size(mod1, ~ sector) effect_size(mod1, ~ age, sex = "M", educ = c(10, 12, 16), age = c(30, 40)) effect_size(mod1, ~ age, sex = "F", age = 34, step = 1) effect_size(mod1, ~ sex, age = 35, sex = "M", to = "F" ) }
#' Bin tables of environmental conditions in M and for occurrences from objects #' #' @description bin_tables helps in creating bin tables of environmental #' conditions in accessible areas (M) and species occurrence records #' (i.e., table of characters). This is done using results from previous #' analyses, and can be applied to various species and multiple variables. #' #' @param ranges list of ranges of environmental values in M and in species #' occurrences derived from using the function \code{\link{histograms_env}}. #' @param percentage_out (numeric) percentage of extreme environmental data in M #' to be excluded in bin creation for further analyses. See details. Default = 5. #' @param n_bins (numeric) number of bins to be created from the range of #' environmental values considered when creating each character in bin tables. #' Default = 20. See details. #' @param bin_size (numeric) argument deprecated, use n_bins instead. #' @param save (logical) whether or not to save the results in working directory. #' Default = FALSE. #' @param output_directory (character) name of the folder in which results will be #' written. #' @param overwrite (logical) whether or not to overwrite existing results #' in \code{output_directory}. Default = FALSE. #' @param verbose (logical) whether messages should be printed. Default = TRUE. #' #' @details #' The percentage to be defined in \code{percentage_out} must correspond with #' one of the confidence limits defined in \code{\link{histograms_env}} #' (argument \code{CL_lines}). For instance, if \code{CL_lines} = 95, then #' \code{percentage_out} can only be either 5 (keeping data inside the 95 CL) or #' 0 (to avoid exclusion of extreme values in M). #' #' Excluding a certain percentage of extreme environmental values prevents the #' algorithm from considering extremely rare environmental values in the #' accessible area for the species (M). Being too rare, these values may have #' never been explored by the species; therefore, including them in the process #' of preparation of the table of characters (bin table) is risky. #' #' The argument \code{n_bins} helps to define how many characters (bins) will be #' considered for the range of values in each variable. This is, a value of 20 #' determines that a range of temperature (5-25) will be split approximately #' every 1 degree. The argument \code{bin_size} has been deprecated. #' #' @return #' A list named as in \code{ranges} containing the table(s) of characters. #' A folder named as in \code{output_directory} containing all resulting csv #' files with the tables of characters will be created if \code{save} is set as #' TRUE. #' #' Potential values for characters are: #' - "1" = the species is present in those environmental conditions. #' - "0" = the species is not present in those environmental conditions. This is, #' those environmental conditions inside the accessible area (M) are more extreme #' than the ones used for the species. #' - "?" = there is no certainty about the species presence in those environmental #' conditions. This happens if environmental combinations are more extreme than #' the ones found in the accessible area (M), when environmental conditions in #' species records are as extreme as the most extreme ones in M. #' #' @importFrom utils write.csv #' #' @export #' #' @usage #' bin_tables(ranges, percentage_out = 5, n_bins = 20, bin_size, save = FALSE, #' output_directory, overwrite = FALSE, verbose = TRUE) #' #' @examples #' # simple list of ranges #' ranges <- list(temp = data.frame(Species = c("sp1", "sp2", "sp3"), #' Species_lower = c(120, 56, 59.75), #' Species_upper = c(265, 333, 333), #' M_lower = c(93, 39, 56), #' M_upper = c(302, 333, 333), #' M_95_lowerCL = c(158, 91, 143), #' M_95_upperCL = c(292, 290, 326)), #' prec = data.frame(Species = c("sp1", "sp2", "sp3"), #' Species_lower = c(597, 3, 3), #' Species_upper = c(3492, 2673, 6171), #' M_lower = c(228, 3, 3), #' M_upper = c(6369, 7290, 6606), #' M_95_lowerCL = c(228, 3, 3), #' M_95_upperCL = c(3114, 2376, 2568))) #' #' # bin preparation #' bins <- bin_tables(ranges, percentage_out = 5, n_bins = 20) #' #' # see arguments save and output_directory to write results in local directory bin_tables <- function(ranges, percentage_out = 5, n_bins = 20, bin_size, save = FALSE, output_directory, overwrite = FALSE, verbose = TRUE) { # checking for potential errors if (missing(ranges)) {stop("Argument 'ranges' is missing.")} if (!missing(bin_size)) { warning("Argument 'bin_size' is deprecated, using 'n_bins'.") } if (save == TRUE) { if (missing(output_directory)) { stop("Argument 'output_directory' is missing.") } else { if (overwrite == FALSE & dir.exists(output_directory)) { stop("'output_directory' already exists, to replace it use 'overwrite' = TRUE.") } if (overwrite == TRUE & dir.exists(output_directory)) { unlink(x = output_directory, recursive = TRUE, force = TRUE) } } } if (verbose == TRUE) { message("\nPreparing bin tables using ranges:") } # directory for results if (save == TRUE) {dir.create(output_directory)} bin_tabs <- lapply(1:length(ranges), function(i) { # preparing ranges spnames <- ranges[[1]][, 1] cl <- paste0("M_", 100 - percentage_out, c("_lowerCL", "_upperCL")) sp_r <- paste0("Species_", c("lower", "upper")) overall_range <- range(c(ranges[[i]][, c(sp_r, cl)])) M_range <- ranges[[i]][, cl] sp_range <- ranges[[i]][, 2:3] # bin tables bin_size <- diff(overall_range) / n_bins bin_table <- bin_env(overall_range, M_range, sp_range, bin_size) rownames(bin_table) <- gsub("_", " ", spnames) # write table if (save == TRUE) { write.csv(bin_table, paste0(output_directory, "/", names(ranges)[i], "_bin_table.csv"), row.names = TRUE) } if (verbose == TRUE) { message(i, " of ", length(ranges), " variables processed") } return(bin_table) }) names(bin_tabs) <- names(ranges) return(bin_tabs) }
/R/bin_tables.R
no_license
marlonecobos/nichevol
R
false
false
6,552
r
#' Bin tables of environmental conditions in M and for occurrences from objects #' #' @description bin_tables helps in creating bin tables of environmental #' conditions in accessible areas (M) and species occurrence records #' (i.e., table of characters). This is done using results from previous #' analyses, and can be applied to various species and multiple variables. #' #' @param ranges list of ranges of environmental values in M and in species #' occurrences derived from using the function \code{\link{histograms_env}}. #' @param percentage_out (numeric) percentage of extreme environmental data in M #' to be excluded in bin creation for further analyses. See details. Default = 5. #' @param n_bins (numeric) number of bins to be created from the range of #' environmental values considered when creating each character in bin tables. #' Default = 20. See details. #' @param bin_size (numeric) argument deprecated, use n_bins instead. #' @param save (logical) whether or not to save the results in working directory. #' Default = FALSE. #' @param output_directory (character) name of the folder in which results will be #' written. #' @param overwrite (logical) whether or not to overwrite existing results #' in \code{output_directory}. Default = FALSE. #' @param verbose (logical) whether messages should be printed. Default = TRUE. #' #' @details #' The percentage to be defined in \code{percentage_out} must correspond with #' one of the confidence limits defined in \code{\link{histograms_env}} #' (argument \code{CL_lines}). For instance, if \code{CL_lines} = 95, then #' \code{percentage_out} can only be either 5 (keeping data inside the 95 CL) or #' 0 (to avoid exclusion of extreme values in M). #' #' Excluding a certain percentage of extreme environmental values prevents the #' algorithm from considering extremely rare environmental values in the #' accessible area for the species (M). Being too rare, these values may have #' never been explored by the species; therefore, including them in the process #' of preparation of the table of characters (bin table) is risky. #' #' The argument \code{n_bins} helps to define how many characters (bins) will be #' considered for the range of values in each variable. This is, a value of 20 #' determines that a range of temperature (5-25) will be split approximately #' every 1 degree. The argument \code{bin_size} has been deprecated. #' #' @return #' A list named as in \code{ranges} containing the table(s) of characters. #' A folder named as in \code{output_directory} containing all resulting csv #' files with the tables of characters will be created if \code{save} is set as #' TRUE. #' #' Potential values for characters are: #' - "1" = the species is present in those environmental conditions. #' - "0" = the species is not present in those environmental conditions. This is, #' those environmental conditions inside the accessible area (M) are more extreme #' than the ones used for the species. #' - "?" = there is no certainty about the species presence in those environmental #' conditions. This happens if environmental combinations are more extreme than #' the ones found in the accessible area (M), when environmental conditions in #' species records are as extreme as the most extreme ones in M. #' #' @importFrom utils write.csv #' #' @export #' #' @usage #' bin_tables(ranges, percentage_out = 5, n_bins = 20, bin_size, save = FALSE, #' output_directory, overwrite = FALSE, verbose = TRUE) #' #' @examples #' # simple list of ranges #' ranges <- list(temp = data.frame(Species = c("sp1", "sp2", "sp3"), #' Species_lower = c(120, 56, 59.75), #' Species_upper = c(265, 333, 333), #' M_lower = c(93, 39, 56), #' M_upper = c(302, 333, 333), #' M_95_lowerCL = c(158, 91, 143), #' M_95_upperCL = c(292, 290, 326)), #' prec = data.frame(Species = c("sp1", "sp2", "sp3"), #' Species_lower = c(597, 3, 3), #' Species_upper = c(3492, 2673, 6171), #' M_lower = c(228, 3, 3), #' M_upper = c(6369, 7290, 6606), #' M_95_lowerCL = c(228, 3, 3), #' M_95_upperCL = c(3114, 2376, 2568))) #' #' # bin preparation #' bins <- bin_tables(ranges, percentage_out = 5, n_bins = 20) #' #' # see arguments save and output_directory to write results in local directory bin_tables <- function(ranges, percentage_out = 5, n_bins = 20, bin_size, save = FALSE, output_directory, overwrite = FALSE, verbose = TRUE) { # checking for potential errors if (missing(ranges)) {stop("Argument 'ranges' is missing.")} if (!missing(bin_size)) { warning("Argument 'bin_size' is deprecated, using 'n_bins'.") } if (save == TRUE) { if (missing(output_directory)) { stop("Argument 'output_directory' is missing.") } else { if (overwrite == FALSE & dir.exists(output_directory)) { stop("'output_directory' already exists, to replace it use 'overwrite' = TRUE.") } if (overwrite == TRUE & dir.exists(output_directory)) { unlink(x = output_directory, recursive = TRUE, force = TRUE) } } } if (verbose == TRUE) { message("\nPreparing bin tables using ranges:") } # directory for results if (save == TRUE) {dir.create(output_directory)} bin_tabs <- lapply(1:length(ranges), function(i) { # preparing ranges spnames <- ranges[[1]][, 1] cl <- paste0("M_", 100 - percentage_out, c("_lowerCL", "_upperCL")) sp_r <- paste0("Species_", c("lower", "upper")) overall_range <- range(c(ranges[[i]][, c(sp_r, cl)])) M_range <- ranges[[i]][, cl] sp_range <- ranges[[i]][, 2:3] # bin tables bin_size <- diff(overall_range) / n_bins bin_table <- bin_env(overall_range, M_range, sp_range, bin_size) rownames(bin_table) <- gsub("_", " ", spnames) # write table if (save == TRUE) { write.csv(bin_table, paste0(output_directory, "/", names(ranges)[i], "_bin_table.csv"), row.names = TRUE) } if (verbose == TRUE) { message(i, " of ", length(ranges), " variables processed") } return(bin_table) }) names(bin_tabs) <- names(ranges) return(bin_tabs) }
library(shiny) require(mangoTraining) require(ggplot2) # Define server logic required to generate and plot a random distribution shinyServer(function(input, output) { data <- reactive({ if(input$subsetCheck) demoData[demoData$Age <= input$subset, ] else demoData }) xaxis <- reactive({ input$xaxis }) yaxis <- reactive({ input$yaxis }) colouring <- reactive({ switch(input$colouring, "NA"= NA, "sex" = "Sex", "smokes" = "Smokes") }) panel <- reactive({ switch(input$panel, "NA"= NA, "sex" = "Sex", "smokes" = "Smokes") }) smoother <- reactive({ input$smoothMethod }) output$demoData <- renderTable({ data() }) myPlot <- reactive({ xaxis <- xaxis() yaxis <- yaxis() title <- paste("Plot of", xaxis, "against", yaxis, sep = " ") colour <- colouring p <-qplot(data = data(), x = eval(parse(text=xaxis)), y = eval(parse(text=yaxis)), xlab = xaxis, ylab = yaxis, main = title, size = I(3)) if(!is.na(colouring())) { colour <- colouring() p <- p + geom_point(aes(col = eval(parse(text = colour ) ) ) ) + scale_colour_discrete(colour) } if(!is.na(panel())) { panelBy <- panel() p <- p + facet_grid(eval(parse(text = paste(".~", panelBy)))) } if(input$smooth){ p <- p + stat_smooth(method = smoother()) } p }) output$myPlot <- renderPlot({ print(myPlot()) }) fileName <- reactive({ paste(input$plotName, ".png", sep = "") }) dataName <- reactive({ paste(input$dataName, ".csv", sep = "") }) output$downloadPlot <- downloadHandler( filename = function() { fileName()}, content = function(file) { png(file) print(myPlot()) dev.off() }) output$downloadData <- downloadHandler( filename = function() { dataName()}, content = function(file) { write.csv(data(), file) }) })
/Example3/server.R
no_license
bip20/shinyChippenhamTechChatJune2013
R
false
false
2,214
r
library(shiny) require(mangoTraining) require(ggplot2) # Define server logic required to generate and plot a random distribution shinyServer(function(input, output) { data <- reactive({ if(input$subsetCheck) demoData[demoData$Age <= input$subset, ] else demoData }) xaxis <- reactive({ input$xaxis }) yaxis <- reactive({ input$yaxis }) colouring <- reactive({ switch(input$colouring, "NA"= NA, "sex" = "Sex", "smokes" = "Smokes") }) panel <- reactive({ switch(input$panel, "NA"= NA, "sex" = "Sex", "smokes" = "Smokes") }) smoother <- reactive({ input$smoothMethod }) output$demoData <- renderTable({ data() }) myPlot <- reactive({ xaxis <- xaxis() yaxis <- yaxis() title <- paste("Plot of", xaxis, "against", yaxis, sep = " ") colour <- colouring p <-qplot(data = data(), x = eval(parse(text=xaxis)), y = eval(parse(text=yaxis)), xlab = xaxis, ylab = yaxis, main = title, size = I(3)) if(!is.na(colouring())) { colour <- colouring() p <- p + geom_point(aes(col = eval(parse(text = colour ) ) ) ) + scale_colour_discrete(colour) } if(!is.na(panel())) { panelBy <- panel() p <- p + facet_grid(eval(parse(text = paste(".~", panelBy)))) } if(input$smooth){ p <- p + stat_smooth(method = smoother()) } p }) output$myPlot <- renderPlot({ print(myPlot()) }) fileName <- reactive({ paste(input$plotName, ".png", sep = "") }) dataName <- reactive({ paste(input$dataName, ".csv", sep = "") }) output$downloadPlot <- downloadHandler( filename = function() { fileName()}, content = function(file) { png(file) print(myPlot()) dev.off() }) output$downloadData <- downloadHandler( filename = function() { dataName()}, content = function(file) { write.csv(data(), file) }) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rogerroot.R \name{rogerroot} \alias{rogerroot} \title{Roger Root Name Coding Procedure} \usage{ rogerroot(word, maxCodeLen = 5) } \arguments{ \item{word}{string or vector of strings to encode} \item{maxCodeLen}{maximum length of the resulting encodings, in characters} } \value{ the Roger Root encoded character vector } \description{ Provides the Roger Root name coding system } \details{ The \code{rogerroot} function phentically encodes the given string using the Roger Root algorithm. The variable \code{word} is a string or vector of strings to encode. The variable \code{maxCodeLen} is the limit on how long the returned code should be. The default is 5. } \examples{ rogerroot("William") rogerroot(c("Peter", "Peady")) rogerroot("Stevenson") } \references{ Robert L. Taft, \emph{Name search techniques}, Bureau of Systems Development, Albany, New York, 1970. } \seealso{ Other phonics: \code{\link{caverphone}}, \code{\link{cologne}}, \code{\link{lein}}, \code{\link{metaphone}}, \code{\link{mra_encode}}, \code{\link{nysiis}}, \code{\link{soundex}}, \code{\link{statcan}} }
/man/rogerroot.Rd
no_license
ipea/phonics
R
false
true
1,174
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rogerroot.R \name{rogerroot} \alias{rogerroot} \title{Roger Root Name Coding Procedure} \usage{ rogerroot(word, maxCodeLen = 5) } \arguments{ \item{word}{string or vector of strings to encode} \item{maxCodeLen}{maximum length of the resulting encodings, in characters} } \value{ the Roger Root encoded character vector } \description{ Provides the Roger Root name coding system } \details{ The \code{rogerroot} function phentically encodes the given string using the Roger Root algorithm. The variable \code{word} is a string or vector of strings to encode. The variable \code{maxCodeLen} is the limit on how long the returned code should be. The default is 5. } \examples{ rogerroot("William") rogerroot(c("Peter", "Peady")) rogerroot("Stevenson") } \references{ Robert L. Taft, \emph{Name search techniques}, Bureau of Systems Development, Albany, New York, 1970. } \seealso{ Other phonics: \code{\link{caverphone}}, \code{\link{cologne}}, \code{\link{lein}}, \code{\link{metaphone}}, \code{\link{mra_encode}}, \code{\link{nysiis}}, \code{\link{soundex}}, \code{\link{statcan}} }
# Define numbers of genes and samples nGenes = ncol(datExpr); nSamples = nrow(datExpr); # Recalculate MEs with color labels MEs0 = moduleEigengenes(datExpr, moduleColors)$eigengenes MEs = orderMEs(MEs0) moduleTraitCor = cor(MEs, datTraits, use = "p"); moduleTraitPvalue = corPvalueStudent(moduleTraitCor, nSamples); sizeGrWindow(20,20) # Will display correlations and their p-values textMatrix = paste(signif(moduleTraitCor, 2), "\n(", signif(moduleTraitPvalue, 1), ")", sep = ""); dim(textMatrix) = dim(moduleTraitCor) par(mar = c(6, 8.5, 3, 3)); # Display the correlation values within a heatmap plot labeledHeatmap(Matrix = moduleTraitCor, xLabels = names(datTraits), yLabels = names(MEs), ySymbols = names(MEs), colorLabels = FALSE, colors = greenWhiteRed(50), textMatrix = textMatrix, setStdMargins = FALSE, cex.text = 0.5, zlim = c(-1,1), main = paste("Module-trait relationships"))
/Lect10/WGCNA-10.R
no_license
suknamgoong1970/CBNU-Bioinformatics
R
false
false
1,064
r
# Define numbers of genes and samples nGenes = ncol(datExpr); nSamples = nrow(datExpr); # Recalculate MEs with color labels MEs0 = moduleEigengenes(datExpr, moduleColors)$eigengenes MEs = orderMEs(MEs0) moduleTraitCor = cor(MEs, datTraits, use = "p"); moduleTraitPvalue = corPvalueStudent(moduleTraitCor, nSamples); sizeGrWindow(20,20) # Will display correlations and their p-values textMatrix = paste(signif(moduleTraitCor, 2), "\n(", signif(moduleTraitPvalue, 1), ")", sep = ""); dim(textMatrix) = dim(moduleTraitCor) par(mar = c(6, 8.5, 3, 3)); # Display the correlation values within a heatmap plot labeledHeatmap(Matrix = moduleTraitCor, xLabels = names(datTraits), yLabels = names(MEs), ySymbols = names(MEs), colorLabels = FALSE, colors = greenWhiteRed(50), textMatrix = textMatrix, setStdMargins = FALSE, cex.text = 0.5, zlim = c(-1,1), main = paste("Module-trait relationships"))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # INSTALL PACKAGES #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ install.packages(c("pROC", "mvabund", "randomForest", "caret", "e1071", "gbm", "dismo", "yaImpute", "earth", "devtools", "glmnet", "boral", "gjam", "spaMM", "nlme", "MASS", "spaMM", "vegan", "BayesComm", "mvtnorm", "parallel", "kmed", "xgboost")) require(devtools) install_github('davharris/mistnet2') install_github('goldingn/BayesComm') install.packages(paste0(WD,"/sdmCom_0.1.tar.gz"), repos = NULL, type = "source") install.packages(paste0(WD,"MODELS/mvpart_pkg/mvpart_1.6-2.tar"), repos = NULL, type = "source") if (OS == "osx" | OS == "unix") { install.packages("doMC") } if (OS == "win") { install.packages("doParallel") }
/SCRIPTS/pipe/pkgs.r
no_license
davan690/SDM-comparison
R
false
false
1,068
r
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # INSTALL PACKAGES #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ install.packages(c("pROC", "mvabund", "randomForest", "caret", "e1071", "gbm", "dismo", "yaImpute", "earth", "devtools", "glmnet", "boral", "gjam", "spaMM", "nlme", "MASS", "spaMM", "vegan", "BayesComm", "mvtnorm", "parallel", "kmed", "xgboost")) require(devtools) install_github('davharris/mistnet2') install_github('goldingn/BayesComm') install.packages(paste0(WD,"/sdmCom_0.1.tar.gz"), repos = NULL, type = "source") install.packages(paste0(WD,"MODELS/mvpart_pkg/mvpart_1.6-2.tar"), repos = NULL, type = "source") if (OS == "osx" | OS == "unix") { install.packages("doMC") } if (OS == "win") { install.packages("doParallel") }
source("readcsvs.R") complete <- function(directory, id = 1:332) { data <- readCSVs(directory, id) data <- t(sapply(data, function(x) c(x[1,4], sum(!is.na(x[,"sulfate"]) & !is.na(x[,"nitrate"]))))) colnames(data) <- c("id", "nobs") data.frame(data) }
/rprog/assignment1/complete.R
no_license
Sardtok/datasciencecoursera
R
false
false
334
r
source("readcsvs.R") complete <- function(directory, id = 1:332) { data <- readCSVs(directory, id) data <- t(sapply(data, function(x) c(x[1,4], sum(!is.na(x[,"sulfate"]) & !is.na(x[,"nitrate"]))))) colnames(data) <- c("id", "nobs") data.frame(data) }
##### Pacotes necessários ##### # manipulação dos dados library(dplyr) library(tidyr) # importação de dados library(readr) # mineração de dados library(arules) library(C50) library(e1071) #### Diretorios ##### dir_root <- getwd() dir_dados <- paste0(dir_root,"/DATA") dir_dados_pre_processados <- paste0(dir_dados,"/pre_processados") dir_dados_minerados <- paste0(dir_dados,"/minerados") dir_plot <- paste0(dir_root,"/PLOT") dir_plot_mineracao_dados <- paste0(dir_plot,"/mineracao_dados") # Cria diretorio, caso não exista dir.create(dir_plot_mineracao_dados, recursive = TRUE) dir.create(dir_dados_minerados, recursive = TRUE) ##### Carregar data frames ##### df_alunos_2015_melhores_cidades <- read.csv2(file = paste0(dir_dados_pre_processados, "/df_alunos_2015_melhores_cidades.csv"), sep = ";", stringsAsFactors = TRUE) df_alunos_2015_piores_cidades <- read.csv2(file = paste0(dir_dados_pre_processados, "/df_alunos_2015_piores_cidades.csv"), sep = ";", stringsAsFactors = TRUE) # C5.0 (Classificação) ========================================================= set.seed(500) # Configuração para o algoritmo ## Utilizando poda c5_0_config <- C5.0Control(noGlobalPruning = FALSE) ## Melhores cidades ------------------------------------------------------------ # Árvore de decisão vars <- df_alunos_2015_melhores_cidades %>% select(TX_RESP_Q013_alunos:TX_RESP_Q057_alunos) %>% names # Removendo entradas que prejudicam/confundem o algoritmo ## "TX_RESP_Q013_alunos" -> Na sua casa tem computador? ## "TX_RESP_Q026_alunos" -> Com qual frequência seus pais, ou responsáveis por ### você, vão à reunião de pais? ## "TX_RESP_Q054_alunos" -> Você faz o dever de casa de Matemática? vars <- vars[c(-1, -5, -15)] tree_mod_melhores <- C5.0(x = df_alunos_2015_melhores_cidades[,vars], y = df_alunos_2015_melhores_cidades$nivel_proficiencia, control = c5_0_config) summary(tree_mod_melhores) write(tree_mod_melhores$output, file = paste0(dir_dados_minerados, "/C50_arvore_descricao_melhores")) jpeg(paste0(dir_plot_mineracao_dados, "/melhores_cidades_c50.jpeg"), quality = 100, width = 6000, height = 2500) plot(tree_mod_melhores) dev.off() # Regras rules_mod_melhores <- C5.0(x = df_alunos_2015_melhores_cidades[,vars], y = df_alunos_2015_melhores_cidades$nivel_proficiencia, control = c5_0_config, rules = TRUE) summary(rules_mod_melhores) write(rules_mod_melhores$rules, file = paste0(dir_dados_minerados, "/C50_regras_melhores.csv")) ## Piores cidades -------------------------------------------------------------- # Árvore de decisão vars <- df_alunos_2015_piores_cidades %>% select(TX_RESP_Q013_alunos:TX_RESP_Q057_alunos) %>% names # Removendo entrada que prejudica/confunde o algoritmo ## "TX_RESP_Q043_alunos" -> Em dias de aula, quanto tempo você gasta assistindo ## à TV, navegando na internet ou jogando jogos eletrônicos? vars <- vars[-10] tree_mod_piores <- C5.0(x = df_alunos_2015_piores_cidades[,vars], y = df_alunos_2015_piores_cidades$nivel_proficiencia, control = c5_0_config) summary(tree_mod_piores) write(tree_mod_piores$output, file = paste0(dir_dados_minerados, "/C50_arvore_descricao_piores")) jpeg(paste0(dir_plot_mineracao_dados, "/piores_cidades_c50.jpeg"), quality = 100, width = 6000, height = 2500) plot(tree_mod_piores) dev.off() # Regras rules_mod_piores <- C5.0(x = df_alunos_2015_piores_cidades[,vars], y = df_alunos_2015_piores_cidades$nivel_proficiencia, control = c5_0_config, rules = TRUE) summary(rules_mod_piores) write(rules_mod_piores$rules, file = paste0(dir_dados_minerados, "/C50_regras_piores.csv")) # Apriori (Associação) ========================================================= #### Melhores cidades ---------------------------------------------------------- df_melhores_questionario_nivel_prof <- df_alunos_2015_melhores_cidades %>% select(nivel_proficiencia, TX_RESP_Q013_alunos:TX_RESP_Q057_alunos) rules_melhores <- apriori(df_melhores_questionario_nivel_prof, parameter = list(minlen=2, supp = 0.6, conf = 0.8, target = "rules") ) df_rules_melhores <- as(rules_melhores, "data.frame") df_rules_melhores2 <- df_rules_melhores %>% separate(col = rules, into = c("rules_LHS", "rules_RHS"), sep = " => ") write.table(x = df_rules_melhores, file = paste0(dir_dados_minerados, "/apriori_regras_melhores_cidades.csv"), row.names = FALSE, sep = ";") #### Piores cidades ------------------------------------------------------------ df_piores_questionario_nivel_prof <- df_alunos_2015_piores_cidades %>% select(nivel_proficiencia, TX_RESP_Q013_alunos:TX_RESP_Q057_alunos) rules_piores <- apriori(df_piores_questionario_nivel_prof, parameter = list(minlen=2, supp = 0.5, conf = 0.8, target = "rules") ) df_rules_piores <- as(rules_piores, "data.frame") df_rules_piores2 <- df_rules_piores %>% separate(col = rules, into = c("rules_LHS", "rules_RHS"), sep = " => ") write.table(x = df_rules_piores, file = paste0(dir_dados_minerados, "/apriori_regras_piores_cidades.csv"), row.names = FALSE, sep = ";") # Naives Bayes (Classificação) ================================================= ## Melhores cidades ------------------------------------------------------------ df_alunos_2015_melhores_cidades_nb <- df_alunos_2015_melhores_cidades %>% select(nivel_proficiencia, TX_RESP_Q013_alunos:TX_RESP_Q057_alunos) melhores_cidades_nb_model <- naiveBayes(nivel_proficiencia ~ ., data = df_alunos_2015_melhores_cidades_nb) melhores_cidades_NB_Predictions <- predict(melhores_cidades_nb_model, df_alunos_2015_melhores_cidades_nb) melhores_cidades_confusion <- table(melhores_cidades_NB_Predictions, df_alunos_2015_melhores_cidades_nb$nivel_proficiencia) ## Piores cidades -------------------------------------------------------------- df_alunos_2015_piores_cidades_nb <- df_alunos_2015_piores_cidades %>% select(nivel_proficiencia, TX_RESP_Q013_alunos:TX_RESP_Q057_alunos) piores_cidades_nb_model <- naiveBayes(nivel_proficiencia ~ ., data = df_alunos_2015_piores_cidades_nb) piores_cidades_NB_Predictions <- predict(piores_cidades_nb_model, df_alunos_2015_piores_cidades_nb) piores_cidades_confusion <- table(piores_cidades_NB_Predictions, df_alunos_2015_piores_cidades_nb$nivel_proficiencia)
/2_mineração_dados.R
no_license
aismaniotto/KDD_PROVA_BRASIL
R
false
false
7,304
r
##### Pacotes necessários ##### # manipulação dos dados library(dplyr) library(tidyr) # importação de dados library(readr) # mineração de dados library(arules) library(C50) library(e1071) #### Diretorios ##### dir_root <- getwd() dir_dados <- paste0(dir_root,"/DATA") dir_dados_pre_processados <- paste0(dir_dados,"/pre_processados") dir_dados_minerados <- paste0(dir_dados,"/minerados") dir_plot <- paste0(dir_root,"/PLOT") dir_plot_mineracao_dados <- paste0(dir_plot,"/mineracao_dados") # Cria diretorio, caso não exista dir.create(dir_plot_mineracao_dados, recursive = TRUE) dir.create(dir_dados_minerados, recursive = TRUE) ##### Carregar data frames ##### df_alunos_2015_melhores_cidades <- read.csv2(file = paste0(dir_dados_pre_processados, "/df_alunos_2015_melhores_cidades.csv"), sep = ";", stringsAsFactors = TRUE) df_alunos_2015_piores_cidades <- read.csv2(file = paste0(dir_dados_pre_processados, "/df_alunos_2015_piores_cidades.csv"), sep = ";", stringsAsFactors = TRUE) # C5.0 (Classificação) ========================================================= set.seed(500) # Configuração para o algoritmo ## Utilizando poda c5_0_config <- C5.0Control(noGlobalPruning = FALSE) ## Melhores cidades ------------------------------------------------------------ # Árvore de decisão vars <- df_alunos_2015_melhores_cidades %>% select(TX_RESP_Q013_alunos:TX_RESP_Q057_alunos) %>% names # Removendo entradas que prejudicam/confundem o algoritmo ## "TX_RESP_Q013_alunos" -> Na sua casa tem computador? ## "TX_RESP_Q026_alunos" -> Com qual frequência seus pais, ou responsáveis por ### você, vão à reunião de pais? ## "TX_RESP_Q054_alunos" -> Você faz o dever de casa de Matemática? vars <- vars[c(-1, -5, -15)] tree_mod_melhores <- C5.0(x = df_alunos_2015_melhores_cidades[,vars], y = df_alunos_2015_melhores_cidades$nivel_proficiencia, control = c5_0_config) summary(tree_mod_melhores) write(tree_mod_melhores$output, file = paste0(dir_dados_minerados, "/C50_arvore_descricao_melhores")) jpeg(paste0(dir_plot_mineracao_dados, "/melhores_cidades_c50.jpeg"), quality = 100, width = 6000, height = 2500) plot(tree_mod_melhores) dev.off() # Regras rules_mod_melhores <- C5.0(x = df_alunos_2015_melhores_cidades[,vars], y = df_alunos_2015_melhores_cidades$nivel_proficiencia, control = c5_0_config, rules = TRUE) summary(rules_mod_melhores) write(rules_mod_melhores$rules, file = paste0(dir_dados_minerados, "/C50_regras_melhores.csv")) ## Piores cidades -------------------------------------------------------------- # Árvore de decisão vars <- df_alunos_2015_piores_cidades %>% select(TX_RESP_Q013_alunos:TX_RESP_Q057_alunos) %>% names # Removendo entrada que prejudica/confunde o algoritmo ## "TX_RESP_Q043_alunos" -> Em dias de aula, quanto tempo você gasta assistindo ## à TV, navegando na internet ou jogando jogos eletrônicos? vars <- vars[-10] tree_mod_piores <- C5.0(x = df_alunos_2015_piores_cidades[,vars], y = df_alunos_2015_piores_cidades$nivel_proficiencia, control = c5_0_config) summary(tree_mod_piores) write(tree_mod_piores$output, file = paste0(dir_dados_minerados, "/C50_arvore_descricao_piores")) jpeg(paste0(dir_plot_mineracao_dados, "/piores_cidades_c50.jpeg"), quality = 100, width = 6000, height = 2500) plot(tree_mod_piores) dev.off() # Regras rules_mod_piores <- C5.0(x = df_alunos_2015_piores_cidades[,vars], y = df_alunos_2015_piores_cidades$nivel_proficiencia, control = c5_0_config, rules = TRUE) summary(rules_mod_piores) write(rules_mod_piores$rules, file = paste0(dir_dados_minerados, "/C50_regras_piores.csv")) # Apriori (Associação) ========================================================= #### Melhores cidades ---------------------------------------------------------- df_melhores_questionario_nivel_prof <- df_alunos_2015_melhores_cidades %>% select(nivel_proficiencia, TX_RESP_Q013_alunos:TX_RESP_Q057_alunos) rules_melhores <- apriori(df_melhores_questionario_nivel_prof, parameter = list(minlen=2, supp = 0.6, conf = 0.8, target = "rules") ) df_rules_melhores <- as(rules_melhores, "data.frame") df_rules_melhores2 <- df_rules_melhores %>% separate(col = rules, into = c("rules_LHS", "rules_RHS"), sep = " => ") write.table(x = df_rules_melhores, file = paste0(dir_dados_minerados, "/apriori_regras_melhores_cidades.csv"), row.names = FALSE, sep = ";") #### Piores cidades ------------------------------------------------------------ df_piores_questionario_nivel_prof <- df_alunos_2015_piores_cidades %>% select(nivel_proficiencia, TX_RESP_Q013_alunos:TX_RESP_Q057_alunos) rules_piores <- apriori(df_piores_questionario_nivel_prof, parameter = list(minlen=2, supp = 0.5, conf = 0.8, target = "rules") ) df_rules_piores <- as(rules_piores, "data.frame") df_rules_piores2 <- df_rules_piores %>% separate(col = rules, into = c("rules_LHS", "rules_RHS"), sep = " => ") write.table(x = df_rules_piores, file = paste0(dir_dados_minerados, "/apriori_regras_piores_cidades.csv"), row.names = FALSE, sep = ";") # Naives Bayes (Classificação) ================================================= ## Melhores cidades ------------------------------------------------------------ df_alunos_2015_melhores_cidades_nb <- df_alunos_2015_melhores_cidades %>% select(nivel_proficiencia, TX_RESP_Q013_alunos:TX_RESP_Q057_alunos) melhores_cidades_nb_model <- naiveBayes(nivel_proficiencia ~ ., data = df_alunos_2015_melhores_cidades_nb) melhores_cidades_NB_Predictions <- predict(melhores_cidades_nb_model, df_alunos_2015_melhores_cidades_nb) melhores_cidades_confusion <- table(melhores_cidades_NB_Predictions, df_alunos_2015_melhores_cidades_nb$nivel_proficiencia) ## Piores cidades -------------------------------------------------------------- df_alunos_2015_piores_cidades_nb <- df_alunos_2015_piores_cidades %>% select(nivel_proficiencia, TX_RESP_Q013_alunos:TX_RESP_Q057_alunos) piores_cidades_nb_model <- naiveBayes(nivel_proficiencia ~ ., data = df_alunos_2015_piores_cidades_nb) piores_cidades_NB_Predictions <- predict(piores_cidades_nb_model, df_alunos_2015_piores_cidades_nb) piores_cidades_confusion <- table(piores_cidades_NB_Predictions, df_alunos_2015_piores_cidades_nb$nivel_proficiencia)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pooled_cov.R \name{pooled_cov} \alias{pooled_cov} \title{Pooled Covariance Estimator} \usage{ pooled_cov(x, ..., covEst = stats::cov) } \arguments{ \item{x}{data as a \code{data.frame}, \code{grouped_df}, or \code{resample} objects} \item{...}{other options passed to estimation method} \item{covEst}{covariance or precision matrix estimation method} } \value{ The pooled covariance or precision matrix with class \code{covariance} and total degrees of freedom attribute \code{df} as a formula. } \description{ Pooled Covariance Estimator } \details{ This function returns the weighted average of a collection of group- or class-specific covariance or precision matrices. The weights are proportional to the degrees of freedom of each matrix. This matrix has the total degrees of freedom stored within the \code{df} attribute as a formula for simple evaluation by the \code{\link{degreesFreedom}} function } \examples{ pooled_cov(iris, group = Species) }
/man/pooled_cov.Rd
no_license
BenBarnard/covEstR
R
false
true
1,051
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pooled_cov.R \name{pooled_cov} \alias{pooled_cov} \title{Pooled Covariance Estimator} \usage{ pooled_cov(x, ..., covEst = stats::cov) } \arguments{ \item{x}{data as a \code{data.frame}, \code{grouped_df}, or \code{resample} objects} \item{...}{other options passed to estimation method} \item{covEst}{covariance or precision matrix estimation method} } \value{ The pooled covariance or precision matrix with class \code{covariance} and total degrees of freedom attribute \code{df} as a formula. } \description{ Pooled Covariance Estimator } \details{ This function returns the weighted average of a collection of group- or class-specific covariance or precision matrices. The weights are proportional to the degrees of freedom of each matrix. This matrix has the total degrees of freedom stored within the \code{df} attribute as a formula for simple evaluation by the \code{\link{degreesFreedom}} function } \examples{ pooled_cov(iris, group = Species) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/8_mdtFunctions.R \name{Axn.mdt} \alias{Axn.mdt} \title{Multiple decrement life insurance} \usage{ Axn.mdt(object, x, n, i, decrement) } \arguments{ \item{object}{an \code{mdt} or \code{actuarialtable} object} \item{x}{policyholder's age} \item{n}{contract duration} \item{i}{interest rate} \item{decrement}{decrement category} } \value{ The scalar representing APV of the insurance } \description{ Function to evaluate multiple decrement insurances } \section{Warning}{ The function is experimental and very basic. Testing is still needed. Use at own risk! } \examples{ #creates a temporary mdt myTable<-data.frame(x=41:43,lx=c(800,776,752),d1=rep(8,3),d2=rep(16,3)) myMdt<-new("mdt",table=myTable,name="ciao") Axn.mdt(myMdt, x=41,n=2,i=.05,decrement="d2") }
/man/multidecrins.Rd
permissive
spedygiorgio/lifecontingencies
R
false
true
843
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/8_mdtFunctions.R \name{Axn.mdt} \alias{Axn.mdt} \title{Multiple decrement life insurance} \usage{ Axn.mdt(object, x, n, i, decrement) } \arguments{ \item{object}{an \code{mdt} or \code{actuarialtable} object} \item{x}{policyholder's age} \item{n}{contract duration} \item{i}{interest rate} \item{decrement}{decrement category} } \value{ The scalar representing APV of the insurance } \description{ Function to evaluate multiple decrement insurances } \section{Warning}{ The function is experimental and very basic. Testing is still needed. Use at own risk! } \examples{ #creates a temporary mdt myTable<-data.frame(x=41:43,lx=c(800,776,752),d1=rep(8,3),d2=rep(16,3)) myMdt<-new("mdt",table=myTable,name="ciao") Axn.mdt(myMdt, x=41,n=2,i=.05,decrement="d2") }
library(EML) ### Name: is_standardUnit ### Title: is_standardUnit ### Aliases: is_standardUnit ### ** Examples is_standardUnit("amperePerMeter") # TRUE is_standardUnit("speciesPerSquareMeter") # FALSE
/data/genthat_extracted_code/EML/examples/is_standardUnit.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
208
r
library(EML) ### Name: is_standardUnit ### Title: is_standardUnit ### Aliases: is_standardUnit ### ** Examples is_standardUnit("amperePerMeter") # TRUE is_standardUnit("speciesPerSquareMeter") # FALSE
#=====TarotreadR App=====# #Library Load-in. library(shiny) library(shinythemes) library(shiny) library(shinyjs) library(shinyanimate) library(tableHTML) #Loading the cards/data into the environment. source(here::here("scripts","tarot_process.R")) #Defining the UI for the TarotreadR==== ui <- fluidPage(title ="TarotreadR", #Using Darkly bootstrap theme as a base. theme = bslib::bs_theme(bootswatch = "darkly"), #Adding additional CSS onto darkly themed foundation. Parallax background won't display properly if CSS is sourced externally. Believe this is because I'm using the Darkly theme as a base. not sure. tags$head(HTML("<div style='position: absolute; overflow:hidden; width: 100%;'> <style>html { height: 100%; /* max-height: 100%; */ width: 100%; background-image: url('Bckgd4.png'), url('Bckgd3.png'), url('Bckgd2.png'), url('Bckgd1.png'); background-repeat: repeat-x 0 bottom; background-position: 0 100%, 0 100%, 0 100%, 0 100%, 0 0; animation: 100s loop infinite linear; } @keyframes loop { 100% { background-position: 60%, -800px 95%, 500px 50%, 1000px 100%, 400px 0; } } #keywordsone1{ text-align:center; border: 10px solid #000000; border-radius: 15px 15px 15px 15px; background-color:#1f1f1f; opacity: 0.95; margin: %50 auto; } #keywordstwo1{ text-align:center; border: 10px solid #000000; border-radius: 15px 0px 0px 15px; background-color:#1f1f1f; opacity: 0.95; } #keywordstwo2{ text-align:center; border: 10px solid #000000; border-radius: 0px 15px 15px 0px; background-color:#1f1f1f; opacity: 0.95; } #keywordsthree1{ text-align:center; border: 10px solid #000000; border-radius: 0px 15px 15px 0px; background-color:#1f1f1f; opacity: 0.95; } #keywordsthree2{ text-align:center; border: 10px solid #000000; border-radius: 0px 15px 15px 0px; background-color:#1f1f1f; opacity: 0.95; } #keywordsthree3{ text-align:center; border: 10px solid #000000; border-radius: 0px 15px 15px 0px; background-color:#1f1f1f; opacity: 0.95; } #keywordslookup{ text-align:center; border: 10px solid #000000; border-radius: 15px 15px 15px 15px; background-color:#1f1f1f; opacity: 0.95; margin: %50 auto; } </style>")), #Addding shinyjs to toggle info divs to remain hidden until action button is pressed useShinyjs(), #App title/header with custom font. HTML("<center>"), img(src = "tarotreadr.svg", width = "50%"), HTML("</center>"), tags$script(src = "mobile_detect.js"), #Creation of the app's tabs. tabsetPanel( #About panel==== tabPanel(style = "background-color: #222;", "About", HTML("<br>"), HTML('<center>'), "This Shiny app was created for the", HTML("<a href= 'https://blog.rstudio.com/2021/03/11/time-to-shiny/'>2021 Shiny Contest hosted by R Studio</a>"), "by", HTML("<a href = 'https://twitter.com/meghansharris'>Meghan Harris.</a>"), HTML("</center>"), HTML('&emsp;'), HTML("<h4>"),"General Information",HTML("</h4>"), "Tarot cards have existed for centuries. The tarot card deck consists of 78 total cards divided into two main categories. The major arcana and the minor arcana. The major arcana cards consist of 22 trump cards that are numbered from 0 to 21, starting with 'The Fool' and ending with 'The World' respectively. These cards are typically said to symbolize major life themes.",HTML("<br><br>"),"The other 56 cards are the minor arcana. The minor arcana consists of four suits (Pentacles, Swords, Cups, and Wands). The minor arcana cards are said to depict everyday themes that can change more frequently day-to-day. It is also believed that each court symbolizes a different domain of themes. For example, pentacles are associated with finances and material possessions. Cups are emotions and feelings. Wands are energy, motivation, and passion, and swords are thoughts and logic.",HTML("<br><br>"),"Although originally designed as a standard playing card game, tarot has involved into a practice of divination, self-help guidance, and general entertainment. That being said, this app should be used for entertainment purposes only. The thought behind this app was a means to practice reactivity in Shiny in an alternative and fun way.", HTML("<br>"), HTML("<h4>"),"How to Use This App",HTML("</h4><br>"), "This app provides four different tabs of interactivity:", HTML("<br><br>"), HTML("<dl><strong><dt>One Card Draw:</dt></strong> <dd>Typically used for simple 'Yes/No' questions but could also be used for simple meditation and a daily focus.</dd><br> <strong><dt>Two Card Draw:</dt></strong> <dd>Typically used for 'decision-making' questions but could also be used for instances where two perspectives are desired like 'mental and physical', 'yes and no', or 'situation and outcome'.</dd><br> <strong><dt>Three Card Draw:</dt></strong> <dd>Can be used for more involved spreads like 'past, present, future', 'situation, action required, outcome', and thinking through three separate choices to a decision that needs to be made.</dd><br> <strong><dt>Card Lookup:</dt></strong> <dd>Can be used to look up all tarot cards and keywords in the deck without drawing the cards for a reading.</dd></dl>")), #One Card Draw panel==== tabPanel("One Card Draw", actionButton("button1","Draw One Card",icon("hand-sparkles"), style="color: #fff; background-color: #000000; border-color: #2e6da4"), fixedRow(id = "row", withAnim(), column(width = 12, align = "center", div(style ="display: inline-block; center-align;",id="imageone1",tags$img(imageOutput("imageone1", inline = TRUE))), HTML("<br><br>"), hidden(div(style = "display: inline-block; center-align; width: 35%;", id="kwone1",tableOutput("keywordsone1")))))), #Two Card Draw panel==== tabPanel("Two Card Draw", actionButton("button2","Draw Two Cards",icon("hand-sparkles"), style="color: #fff; background-color: #000000; border-color: #2e6da4"), fixedRow(id = "row", withAnim(), column(width = 6, align = "right", div(style ="display: inline-block; right-align;",id="imagetwo1",tags$img(imageOutput("imagetwo1", inline = TRUE))), hidden(div(style = "display: inline-block; margin: 3%; width: 35% ", id="kwtwo1",tableOutput("keywordstwo1")))), column(width = 6, align = "left", div(style ="display: inline-block; left-align;",id="imagetwo2",tags$img(imageOutput("imagetwo2", inline = TRUE))), hidden(div(style = "display: inline-block; margin: 3%; width: 35% ", id="kwtwo2",tableOutput("keywordstwo2")))))), #Three Card Draw panel==== tabPanel("Three Card Draw", actionButton("button3","Draw Three Cards", icon("hand-sparkles"), style="color: #fff; background-color: #000000; border-color: #2e6da4"), fixedRow(id = "row", withAnim(), column(width = 4, align = "right", div(style ="display: inline-block; center-align;", id="imagethree1", tags$img(imageOutput("imagethree1", inline = TRUE))), hidden(div(style = "display: inline-block; margin: 3%; width: 55% ", id="kwthree1",tableOutput("keywordsthree1")))), column(width = 4, align = "center", div(style ="display: inline-block; center-align;", id="imagethree2", tags$img(imageOutput("imagethree2", inline = TRUE))), hidden(div(style = "display: inline-block; margin: 3%; width: 55% ", id="kwthree2",tableOutput("keywordsthree2")))), column(width = 4, align = "left", div(style ="display: inline-block; center-align;", id="imagethree3", tags$img(imageOutput("imagethree3", inline = TRUE))), hidden(div(style = "display: inline-block; margin: 3%; width: 55% ", id="kwthree3",tableOutput("keywordsthree3")))))), # Card Lookup Tab==== tabPanel("Card Lookup", sidebarLayout(sidebarPanel(style = "background-color: #222; opacity: .90; height: '100%';", selectInput("cardlookup", "Tarot Card Lookup:", choices = mastercardset), selectInput("reversed", "Reversed Position?", choices = c("Yes","No")), actionButton("cardsearch", " Search", icon("search-plus"), style="color: #fff; background-color: #000000; border-color: #2e6da4"), ), mainPanel(fixedRow(id = "row", withAnim(), column(width = 12, align = "center", div(style ="display: inline-block; center-align;",id="lookupimage",tags$img(imageOutput("lookupimage", inline = TRUE))), HTML("<br><br>"), hidden(div(style = "display: inline-block; center-align; width: 35%;", id="kwlookup",tableOutput("keywordslookup"))))))) ))) #Defining the server logic to construct card/info randomizer==== server <- function(input, output, session) { #One Card Draw Server Logic==== #Defining reactivity for "button 2" = "two card draw". observeEvent(input$button1, { #Setting a randomized seed number for card pulls. seednum <- runif(1,0,10000) set.seed(seednum) #Randomly selecting one card from the deck. Pulling it's relative file path... onecardfile <- unlist(lapply(sample(tarotdeck$Card,1), function(x) tarotdeck$Path[tarotdeck$Card == x])) onecardfile1 <- onecardfile[1] #...and title for alt text. onecardalt1 <- tarotdeck$Card[tarotdeck$Path == onecardfile1] #Staging the first image for rendering. output$imageone1 <- renderImage({ # Return a list list(src = onecardfile1, contentType = "image/png", width = "20%", align = "center", alt = onecardalt1)}, deleteFile = FALSE) #Staging the keywords for the first card for rendering. output$keywordsone1 <- renderTable(tarotdeck %>% filter(Path == onecardfile1) %>% select(Keywords), width = "100%", align ="c", sanitize.text.function=identity, bordered = FALSE) }) #Two Card Draw Server Logic==== #Defining reactivity for "button 2" = "two card draw". observeEvent(input$button2, { #Setting a randomized seed number for card pulls. seednum <- runif(1,0,10000) set.seed(seednum) #Randomly selecting two cards from the deck and their positions... twocardpositions <- sample(reversalset,2, replace = TRUE) twocards <- sample(mastercardset,2, replace = FALSE) #Combining the cards and positions. for (i in seq_along(twocards)){ twocards[i] <- str_trim(paste(twocards[i],twocardpositions[i])) } #Pulling their relative file paths twocardfile <- unique(unlist(lapply(twocards, function(x) tarotdeck$Path[tarotdeck$Card == x]))) twocardfile1 <- twocardfile[1] twocardfile2 <- twocardfile[2] #...and titles for alt text. twocardalt1 <- tarotdeck$Card[tarotdeck$Path == twocardfile1] twocardalt2 <- tarotdeck$Card[tarotdeck$Path == twocardfile2] #Staging the first image for rendering. output$imagetwo1 <- renderImage({ # Return a list list(src = twocardfile1, contentType = "image/png", width = "40%", align = "center", alt = twocardalt1)}, deleteFile = FALSE) #Staging the second image for rendering. output$imagetwo2 <- renderImage({ # Return a list list(src = twocardfile2, contentType = "image/png", width = "40%", align = "center", alt = twocardalt2)}, deleteFile = FALSE) #Staging the keywords for the first card for rendering. output$keywordstwo1 <- renderTable(tarotdeck %>% filter(Path == twocardfile1) %>% select(Keywords), width = "100%", align ="c", sanitize.text.function=identity, bordered = FALSE) #Staging the keywords for the second card for rendering. output$keywordstwo2 <- renderTable(tarotdeck %>% filter(Path == twocardfile2) %>% select(Keywords), width = "100%", align ="c", sanitize.text.function=identity, bordered = FALSE) }) #Three Card Draw Server Logic==== #Defining reactivity for "button 3" = "three card draw". observeEvent(input$button3, { #Setting a randomized seed number for card pulls. seednum <- runif(1,0,10000) set.seed(seednum) #Randomly selecting two cards from the deck and their positions... threecardpositions <- sample(reversalset,3, replace = TRUE) threecards <- sample(mastercardset,3, replace = FALSE) #Combining the cards and positions. for (i in seq_along(threecards)){ threecards[i] <- str_trim(paste(threecards[i],threecardpositions[i])) } #Pulling their relative file paths threecardfile <- unique(unlist(lapply(threecards, function(x) tarotdeck$Path[tarotdeck$Card == x]))) threecardfile1 <- threecardfile[1] threecardfile2 <- threecardfile[2] threecardfile3 <- threecardfile[3] #...and titles for alt text. threecardalt1 <- tarotdeck$Card[tarotdeck$Path == threecardfile1] threecardalt2 <- tarotdeck$Card[tarotdeck$Path == threecardfile2] threecardalt3 <- tarotdeck$Card[tarotdeck$Path == threecardfile3] #Staging the first image for rendering. output$imagethree1 <- renderImage({ # Return a list list(src = threecardfile1, contentType = "image/png", width = "60%", align = "center", alt = threecardalt1)}, deleteFile = FALSE) #Staging the second image for rendering. output$imagethree2 <- renderImage({ # Return a list list(src = threecardfile2, contentType = "image/png", width = "60%", align = "center", alt = threecardalt2)}, deleteFile = FALSE) #Staging the third image for rendering. output$imagethree3 <- renderImage({ # Return a list list(src = threecardfile3, contentType = "image/png", width = "60%", align = "center", alt = threecardalt3)}, deleteFile = FALSE) #Staging the keywords for the first card for rendering. output$keywordsthree1 <- renderTable(tarotdeck %>% filter(Path == threecardfile1) %>% select(Keywords), width = "100%", align ="c", sanitize.text.function=identity, bordered = FALSE) #Staging the keywords for the second card for rendering. output$keywordsthree2 <- renderTable(tarotdeck %>% filter(Path == threecardfile2) %>% select(Keywords), width = "100%", align ="c", sanitize.text.function=identity, bordered = FALSE) #Staging the keywords for the third card for rendering. output$keywordsthree3 <- renderTable(tarotdeck %>% filter(Path == threecardfile3) %>% select(Keywords), width = "100%", align ="c", sanitize.text.function=identity, bordered = FALSE) }) #Staging for Cardlookup tab==== #Defining reactivity for "cardsearch" = "Card Lookup Button". observeEvent(input$cardsearch, { #Setting logic for card position. cardlookupname <- ifelse(input$reversed == "Yes",paste0(input$cardlookup," Reversed"),input$cardlookup) #Pulling the file path with card name. cardlookuppath <- tarotdeck$Path[tarotdeck$Card == cardlookupname] #Pulling alt text. cardlookupalt <- tarotdeck$Card[tarotdeck$Path == cardlookuppath] #Staging the first image for rendering. output$lookupimage <- renderImage({ # Return a list list(src = cardlookuppath, contentType = "image/png", width = "30%", align = "center", alt = cardlookupalt)}, deleteFile = FALSE) #Staging the keywords for the first card for rendering. output$keywordslookup <- renderTable(tarotdeck %>% filter(Path == cardlookuppath) %>% select(Keywords), width = "100%", align ="c", sanitize.text.function=identity, bordered = FALSE) }) #Adding the toggle for the keyword divs==== #This allows the information to only show AFTER the action button is clicked. observe({ toggle(id = "kwone1", condition = (input$button1 > 0)) toggle(id = "kwtwo1", condition = (input$button2 > 0)) toggle(id = "kwtwo2", condition = (input$button2 > 0)) toggle(id = "kwthree1", condition = (input$button3 > 0)) toggle(id = "kwthree2", condition = (input$button3 > 0)) toggle(id = "kwthree3", condition = (input$button3 > 0)) toggle(id = "kwlookup", condition = (input$cardsearch > 0)) }) # Card/info animations==== #One Card Pull=== observeEvent(input$button1, {startAnim(session, "imageone1", "fadeInDown") startAnim(session, "kwone1", "flipInX") delay(500, insertUI(selector = "#button1", where = "afterEnd", ui = tags$audio(src = "fairyglitter.wav", type = "audio/wav", autoplay = F, controls = NA, style="display:none;") ))}) #Two Card Pull=== observeEvent(input$button2, {startAnim(session, "imagetwo1", "fadeInLeft") startAnim(session, "imagetwo2", "fadeInRight") startAnim(session, "kwtwo1", "flipInX") startAnim(session, "kwtwo2", "flipInX") delay(500, insertUI(selector = "#button1", where = "afterEnd", ui = tags$audio(src = "fairyglitter.wav", type = "audio/wav", autoplay = F, controls = NA, style="display:none;") ))}) #Three Card Pull=== observeEvent(input$button3, {startAnim(session, "imagethree1", "fadeInLeft") startAnim(session, "imagethree2", "fadeInDown") startAnim(session, "imagethree3", "fadeInRight") startAnim(session, "kwthree1", "flipInX") startAnim(session, "kwthree2", "flipInX") startAnim(session, "kwthree3", "flipInX") delay(500, insertUI(selector = "#button1", where = "afterEnd", ui = tags$audio(src = "fairyglitter.wav", type = "audio/wav", autoplay = F, controls = NA, style="display:none;")))}) #Card Lookup Pull=== observeEvent(input$cardsearch, {startAnim(session, "lookupimage", "fadeInDown") startAnim(session, "kwlookup", "flipInX") delay(500, insertUI(selector = "#cardsearch", where = "afterEnd", ui = tags$audio(src = "ding.wav", type = "audio/wav", autoplay = F, controls = NA, style="display:none;")))}) } shinyApp(ui = ui, server = server)
/app.R
no_license
Meghansaha/TarotreadR
R
false
false
21,684
r
#=====TarotreadR App=====# #Library Load-in. library(shiny) library(shinythemes) library(shiny) library(shinyjs) library(shinyanimate) library(tableHTML) #Loading the cards/data into the environment. source(here::here("scripts","tarot_process.R")) #Defining the UI for the TarotreadR==== ui <- fluidPage(title ="TarotreadR", #Using Darkly bootstrap theme as a base. theme = bslib::bs_theme(bootswatch = "darkly"), #Adding additional CSS onto darkly themed foundation. Parallax background won't display properly if CSS is sourced externally. Believe this is because I'm using the Darkly theme as a base. not sure. tags$head(HTML("<div style='position: absolute; overflow:hidden; width: 100%;'> <style>html { height: 100%; /* max-height: 100%; */ width: 100%; background-image: url('Bckgd4.png'), url('Bckgd3.png'), url('Bckgd2.png'), url('Bckgd1.png'); background-repeat: repeat-x 0 bottom; background-position: 0 100%, 0 100%, 0 100%, 0 100%, 0 0; animation: 100s loop infinite linear; } @keyframes loop { 100% { background-position: 60%, -800px 95%, 500px 50%, 1000px 100%, 400px 0; } } #keywordsone1{ text-align:center; border: 10px solid #000000; border-radius: 15px 15px 15px 15px; background-color:#1f1f1f; opacity: 0.95; margin: %50 auto; } #keywordstwo1{ text-align:center; border: 10px solid #000000; border-radius: 15px 0px 0px 15px; background-color:#1f1f1f; opacity: 0.95; } #keywordstwo2{ text-align:center; border: 10px solid #000000; border-radius: 0px 15px 15px 0px; background-color:#1f1f1f; opacity: 0.95; } #keywordsthree1{ text-align:center; border: 10px solid #000000; border-radius: 0px 15px 15px 0px; background-color:#1f1f1f; opacity: 0.95; } #keywordsthree2{ text-align:center; border: 10px solid #000000; border-radius: 0px 15px 15px 0px; background-color:#1f1f1f; opacity: 0.95; } #keywordsthree3{ text-align:center; border: 10px solid #000000; border-radius: 0px 15px 15px 0px; background-color:#1f1f1f; opacity: 0.95; } #keywordslookup{ text-align:center; border: 10px solid #000000; border-radius: 15px 15px 15px 15px; background-color:#1f1f1f; opacity: 0.95; margin: %50 auto; } </style>")), #Addding shinyjs to toggle info divs to remain hidden until action button is pressed useShinyjs(), #App title/header with custom font. HTML("<center>"), img(src = "tarotreadr.svg", width = "50%"), HTML("</center>"), tags$script(src = "mobile_detect.js"), #Creation of the app's tabs. tabsetPanel( #About panel==== tabPanel(style = "background-color: #222;", "About", HTML("<br>"), HTML('<center>'), "This Shiny app was created for the", HTML("<a href= 'https://blog.rstudio.com/2021/03/11/time-to-shiny/'>2021 Shiny Contest hosted by R Studio</a>"), "by", HTML("<a href = 'https://twitter.com/meghansharris'>Meghan Harris.</a>"), HTML("</center>"), HTML('&emsp;'), HTML("<h4>"),"General Information",HTML("</h4>"), "Tarot cards have existed for centuries. The tarot card deck consists of 78 total cards divided into two main categories. The major arcana and the minor arcana. The major arcana cards consist of 22 trump cards that are numbered from 0 to 21, starting with 'The Fool' and ending with 'The World' respectively. These cards are typically said to symbolize major life themes.",HTML("<br><br>"),"The other 56 cards are the minor arcana. The minor arcana consists of four suits (Pentacles, Swords, Cups, and Wands). The minor arcana cards are said to depict everyday themes that can change more frequently day-to-day. It is also believed that each court symbolizes a different domain of themes. For example, pentacles are associated with finances and material possessions. Cups are emotions and feelings. Wands are energy, motivation, and passion, and swords are thoughts and logic.",HTML("<br><br>"),"Although originally designed as a standard playing card game, tarot has involved into a practice of divination, self-help guidance, and general entertainment. That being said, this app should be used for entertainment purposes only. The thought behind this app was a means to practice reactivity in Shiny in an alternative and fun way.", HTML("<br>"), HTML("<h4>"),"How to Use This App",HTML("</h4><br>"), "This app provides four different tabs of interactivity:", HTML("<br><br>"), HTML("<dl><strong><dt>One Card Draw:</dt></strong> <dd>Typically used for simple 'Yes/No' questions but could also be used for simple meditation and a daily focus.</dd><br> <strong><dt>Two Card Draw:</dt></strong> <dd>Typically used for 'decision-making' questions but could also be used for instances where two perspectives are desired like 'mental and physical', 'yes and no', or 'situation and outcome'.</dd><br> <strong><dt>Three Card Draw:</dt></strong> <dd>Can be used for more involved spreads like 'past, present, future', 'situation, action required, outcome', and thinking through three separate choices to a decision that needs to be made.</dd><br> <strong><dt>Card Lookup:</dt></strong> <dd>Can be used to look up all tarot cards and keywords in the deck without drawing the cards for a reading.</dd></dl>")), #One Card Draw panel==== tabPanel("One Card Draw", actionButton("button1","Draw One Card",icon("hand-sparkles"), style="color: #fff; background-color: #000000; border-color: #2e6da4"), fixedRow(id = "row", withAnim(), column(width = 12, align = "center", div(style ="display: inline-block; center-align;",id="imageone1",tags$img(imageOutput("imageone1", inline = TRUE))), HTML("<br><br>"), hidden(div(style = "display: inline-block; center-align; width: 35%;", id="kwone1",tableOutput("keywordsone1")))))), #Two Card Draw panel==== tabPanel("Two Card Draw", actionButton("button2","Draw Two Cards",icon("hand-sparkles"), style="color: #fff; background-color: #000000; border-color: #2e6da4"), fixedRow(id = "row", withAnim(), column(width = 6, align = "right", div(style ="display: inline-block; right-align;",id="imagetwo1",tags$img(imageOutput("imagetwo1", inline = TRUE))), hidden(div(style = "display: inline-block; margin: 3%; width: 35% ", id="kwtwo1",tableOutput("keywordstwo1")))), column(width = 6, align = "left", div(style ="display: inline-block; left-align;",id="imagetwo2",tags$img(imageOutput("imagetwo2", inline = TRUE))), hidden(div(style = "display: inline-block; margin: 3%; width: 35% ", id="kwtwo2",tableOutput("keywordstwo2")))))), #Three Card Draw panel==== tabPanel("Three Card Draw", actionButton("button3","Draw Three Cards", icon("hand-sparkles"), style="color: #fff; background-color: #000000; border-color: #2e6da4"), fixedRow(id = "row", withAnim(), column(width = 4, align = "right", div(style ="display: inline-block; center-align;", id="imagethree1", tags$img(imageOutput("imagethree1", inline = TRUE))), hidden(div(style = "display: inline-block; margin: 3%; width: 55% ", id="kwthree1",tableOutput("keywordsthree1")))), column(width = 4, align = "center", div(style ="display: inline-block; center-align;", id="imagethree2", tags$img(imageOutput("imagethree2", inline = TRUE))), hidden(div(style = "display: inline-block; margin: 3%; width: 55% ", id="kwthree2",tableOutput("keywordsthree2")))), column(width = 4, align = "left", div(style ="display: inline-block; center-align;", id="imagethree3", tags$img(imageOutput("imagethree3", inline = TRUE))), hidden(div(style = "display: inline-block; margin: 3%; width: 55% ", id="kwthree3",tableOutput("keywordsthree3")))))), # Card Lookup Tab==== tabPanel("Card Lookup", sidebarLayout(sidebarPanel(style = "background-color: #222; opacity: .90; height: '100%';", selectInput("cardlookup", "Tarot Card Lookup:", choices = mastercardset), selectInput("reversed", "Reversed Position?", choices = c("Yes","No")), actionButton("cardsearch", " Search", icon("search-plus"), style="color: #fff; background-color: #000000; border-color: #2e6da4"), ), mainPanel(fixedRow(id = "row", withAnim(), column(width = 12, align = "center", div(style ="display: inline-block; center-align;",id="lookupimage",tags$img(imageOutput("lookupimage", inline = TRUE))), HTML("<br><br>"), hidden(div(style = "display: inline-block; center-align; width: 35%;", id="kwlookup",tableOutput("keywordslookup"))))))) ))) #Defining the server logic to construct card/info randomizer==== server <- function(input, output, session) { #One Card Draw Server Logic==== #Defining reactivity for "button 2" = "two card draw". observeEvent(input$button1, { #Setting a randomized seed number for card pulls. seednum <- runif(1,0,10000) set.seed(seednum) #Randomly selecting one card from the deck. Pulling it's relative file path... onecardfile <- unlist(lapply(sample(tarotdeck$Card,1), function(x) tarotdeck$Path[tarotdeck$Card == x])) onecardfile1 <- onecardfile[1] #...and title for alt text. onecardalt1 <- tarotdeck$Card[tarotdeck$Path == onecardfile1] #Staging the first image for rendering. output$imageone1 <- renderImage({ # Return a list list(src = onecardfile1, contentType = "image/png", width = "20%", align = "center", alt = onecardalt1)}, deleteFile = FALSE) #Staging the keywords for the first card for rendering. output$keywordsone1 <- renderTable(tarotdeck %>% filter(Path == onecardfile1) %>% select(Keywords), width = "100%", align ="c", sanitize.text.function=identity, bordered = FALSE) }) #Two Card Draw Server Logic==== #Defining reactivity for "button 2" = "two card draw". observeEvent(input$button2, { #Setting a randomized seed number for card pulls. seednum <- runif(1,0,10000) set.seed(seednum) #Randomly selecting two cards from the deck and their positions... twocardpositions <- sample(reversalset,2, replace = TRUE) twocards <- sample(mastercardset,2, replace = FALSE) #Combining the cards and positions. for (i in seq_along(twocards)){ twocards[i] <- str_trim(paste(twocards[i],twocardpositions[i])) } #Pulling their relative file paths twocardfile <- unique(unlist(lapply(twocards, function(x) tarotdeck$Path[tarotdeck$Card == x]))) twocardfile1 <- twocardfile[1] twocardfile2 <- twocardfile[2] #...and titles for alt text. twocardalt1 <- tarotdeck$Card[tarotdeck$Path == twocardfile1] twocardalt2 <- tarotdeck$Card[tarotdeck$Path == twocardfile2] #Staging the first image for rendering. output$imagetwo1 <- renderImage({ # Return a list list(src = twocardfile1, contentType = "image/png", width = "40%", align = "center", alt = twocardalt1)}, deleteFile = FALSE) #Staging the second image for rendering. output$imagetwo2 <- renderImage({ # Return a list list(src = twocardfile2, contentType = "image/png", width = "40%", align = "center", alt = twocardalt2)}, deleteFile = FALSE) #Staging the keywords for the first card for rendering. output$keywordstwo1 <- renderTable(tarotdeck %>% filter(Path == twocardfile1) %>% select(Keywords), width = "100%", align ="c", sanitize.text.function=identity, bordered = FALSE) #Staging the keywords for the second card for rendering. output$keywordstwo2 <- renderTable(tarotdeck %>% filter(Path == twocardfile2) %>% select(Keywords), width = "100%", align ="c", sanitize.text.function=identity, bordered = FALSE) }) #Three Card Draw Server Logic==== #Defining reactivity for "button 3" = "three card draw". observeEvent(input$button3, { #Setting a randomized seed number for card pulls. seednum <- runif(1,0,10000) set.seed(seednum) #Randomly selecting two cards from the deck and their positions... threecardpositions <- sample(reversalset,3, replace = TRUE) threecards <- sample(mastercardset,3, replace = FALSE) #Combining the cards and positions. for (i in seq_along(threecards)){ threecards[i] <- str_trim(paste(threecards[i],threecardpositions[i])) } #Pulling their relative file paths threecardfile <- unique(unlist(lapply(threecards, function(x) tarotdeck$Path[tarotdeck$Card == x]))) threecardfile1 <- threecardfile[1] threecardfile2 <- threecardfile[2] threecardfile3 <- threecardfile[3] #...and titles for alt text. threecardalt1 <- tarotdeck$Card[tarotdeck$Path == threecardfile1] threecardalt2 <- tarotdeck$Card[tarotdeck$Path == threecardfile2] threecardalt3 <- tarotdeck$Card[tarotdeck$Path == threecardfile3] #Staging the first image for rendering. output$imagethree1 <- renderImage({ # Return a list list(src = threecardfile1, contentType = "image/png", width = "60%", align = "center", alt = threecardalt1)}, deleteFile = FALSE) #Staging the second image for rendering. output$imagethree2 <- renderImage({ # Return a list list(src = threecardfile2, contentType = "image/png", width = "60%", align = "center", alt = threecardalt2)}, deleteFile = FALSE) #Staging the third image for rendering. output$imagethree3 <- renderImage({ # Return a list list(src = threecardfile3, contentType = "image/png", width = "60%", align = "center", alt = threecardalt3)}, deleteFile = FALSE) #Staging the keywords for the first card for rendering. output$keywordsthree1 <- renderTable(tarotdeck %>% filter(Path == threecardfile1) %>% select(Keywords), width = "100%", align ="c", sanitize.text.function=identity, bordered = FALSE) #Staging the keywords for the second card for rendering. output$keywordsthree2 <- renderTable(tarotdeck %>% filter(Path == threecardfile2) %>% select(Keywords), width = "100%", align ="c", sanitize.text.function=identity, bordered = FALSE) #Staging the keywords for the third card for rendering. output$keywordsthree3 <- renderTable(tarotdeck %>% filter(Path == threecardfile3) %>% select(Keywords), width = "100%", align ="c", sanitize.text.function=identity, bordered = FALSE) }) #Staging for Cardlookup tab==== #Defining reactivity for "cardsearch" = "Card Lookup Button". observeEvent(input$cardsearch, { #Setting logic for card position. cardlookupname <- ifelse(input$reversed == "Yes",paste0(input$cardlookup," Reversed"),input$cardlookup) #Pulling the file path with card name. cardlookuppath <- tarotdeck$Path[tarotdeck$Card == cardlookupname] #Pulling alt text. cardlookupalt <- tarotdeck$Card[tarotdeck$Path == cardlookuppath] #Staging the first image for rendering. output$lookupimage <- renderImage({ # Return a list list(src = cardlookuppath, contentType = "image/png", width = "30%", align = "center", alt = cardlookupalt)}, deleteFile = FALSE) #Staging the keywords for the first card for rendering. output$keywordslookup <- renderTable(tarotdeck %>% filter(Path == cardlookuppath) %>% select(Keywords), width = "100%", align ="c", sanitize.text.function=identity, bordered = FALSE) }) #Adding the toggle for the keyword divs==== #This allows the information to only show AFTER the action button is clicked. observe({ toggle(id = "kwone1", condition = (input$button1 > 0)) toggle(id = "kwtwo1", condition = (input$button2 > 0)) toggle(id = "kwtwo2", condition = (input$button2 > 0)) toggle(id = "kwthree1", condition = (input$button3 > 0)) toggle(id = "kwthree2", condition = (input$button3 > 0)) toggle(id = "kwthree3", condition = (input$button3 > 0)) toggle(id = "kwlookup", condition = (input$cardsearch > 0)) }) # Card/info animations==== #One Card Pull=== observeEvent(input$button1, {startAnim(session, "imageone1", "fadeInDown") startAnim(session, "kwone1", "flipInX") delay(500, insertUI(selector = "#button1", where = "afterEnd", ui = tags$audio(src = "fairyglitter.wav", type = "audio/wav", autoplay = F, controls = NA, style="display:none;") ))}) #Two Card Pull=== observeEvent(input$button2, {startAnim(session, "imagetwo1", "fadeInLeft") startAnim(session, "imagetwo2", "fadeInRight") startAnim(session, "kwtwo1", "flipInX") startAnim(session, "kwtwo2", "flipInX") delay(500, insertUI(selector = "#button1", where = "afterEnd", ui = tags$audio(src = "fairyglitter.wav", type = "audio/wav", autoplay = F, controls = NA, style="display:none;") ))}) #Three Card Pull=== observeEvent(input$button3, {startAnim(session, "imagethree1", "fadeInLeft") startAnim(session, "imagethree2", "fadeInDown") startAnim(session, "imagethree3", "fadeInRight") startAnim(session, "kwthree1", "flipInX") startAnim(session, "kwthree2", "flipInX") startAnim(session, "kwthree3", "flipInX") delay(500, insertUI(selector = "#button1", where = "afterEnd", ui = tags$audio(src = "fairyglitter.wav", type = "audio/wav", autoplay = F, controls = NA, style="display:none;")))}) #Card Lookup Pull=== observeEvent(input$cardsearch, {startAnim(session, "lookupimage", "fadeInDown") startAnim(session, "kwlookup", "flipInX") delay(500, insertUI(selector = "#cardsearch", where = "afterEnd", ui = tags$audio(src = "ding.wav", type = "audio/wav", autoplay = F, controls = NA, style="display:none;")))}) } shinyApp(ui = ui, server = server)
source('FinModels.R') source('FundVariables.R') # Load Data --------------------------------------------------------------------------------------------------------- LoadFundData <- function(stockCodes) { prefix = "http://finance.yahoo.com/d/quotes.csv?s=" varfix = "&f=" url = prefix for(i in 1:length(stockCodes)) { url = paste(url, stockCodes[i]) } url = paste(url, varfix, env$data.varStr) data = read.csv(url, header=FALSE) data } # Testing --------------------------------------------------------------------------------------------------------- varSymbols = LoadVarSymbols() stockCodes = c('MSFT', 'BAC', 'CAT', 'AMZN' ); stockCodes varStr = 'snghw' data = LoadFundData(stockCodes); data
/Finance/Fundamental.R
no_license
miltonluaces/data_science_in_R
R
false
false
742
r
source('FinModels.R') source('FundVariables.R') # Load Data --------------------------------------------------------------------------------------------------------- LoadFundData <- function(stockCodes) { prefix = "http://finance.yahoo.com/d/quotes.csv?s=" varfix = "&f=" url = prefix for(i in 1:length(stockCodes)) { url = paste(url, stockCodes[i]) } url = paste(url, varfix, env$data.varStr) data = read.csv(url, header=FALSE) data } # Testing --------------------------------------------------------------------------------------------------------- varSymbols = LoadVarSymbols() stockCodes = c('MSFT', 'BAC', 'CAT', 'AMZN' ); stockCodes varStr = 'snghw' data = LoadFundData(stockCodes); data
library(ape) testtree <- read.tree("9268_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="9268_0_unrooted.txt")
/codeml_files/newick_trees_processed/9268_0/rinput.R
no_license
DaniBoo/cyanobacteria_project
R
false
false
135
r
library(ape) testtree <- read.tree("9268_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="9268_0_unrooted.txt")
\name{mk_codons.R} \alias{mk_codons.R} \title{ function to separate DNA sequences to codons } \description{ The function transform a DNA sequence to a series of codons. } \usage{ mk_codons.R(dna) } \arguments{ \item{dna}{ input your dna sequence } } \details{ %% ~~ If necessary, more details than the description above ~~ } \value{ Then return of the function is a set of strings. %% ... } \references{ %% ~put references to the literature/web site here ~ } \author{ Guangya Shen } \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ ## input sequence should be not less than 3 nucleotides mk_condons(dna = "ATG") } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory (show via RShowDoc("KEYWORDS")): % \keyword{ ~kwd1 } % \keyword{ ~kwd2 } % Use only one keyword per line. % For non-standard keywords, use \concept instead of \keyword: % \concept{ ~cpt1 } % \concept{ ~cpt2 } % Use only one concept per line.
/man/mk_codons.Rd
permissive
rforbiodatascience21/2021_group_5_rpackage
R
false
false
1,080
rd
\name{mk_codons.R} \alias{mk_codons.R} \title{ function to separate DNA sequences to codons } \description{ The function transform a DNA sequence to a series of codons. } \usage{ mk_codons.R(dna) } \arguments{ \item{dna}{ input your dna sequence } } \details{ %% ~~ If necessary, more details than the description above ~~ } \value{ Then return of the function is a set of strings. %% ... } \references{ %% ~put references to the literature/web site here ~ } \author{ Guangya Shen } \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ ## input sequence should be not less than 3 nucleotides mk_condons(dna = "ATG") } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory (show via RShowDoc("KEYWORDS")): % \keyword{ ~kwd1 } % \keyword{ ~kwd2 } % Use only one keyword per line. % For non-standard keywords, use \concept instead of \keyword: % \concept{ ~cpt1 } % \concept{ ~cpt2 } % Use only one concept per line.
# first we need to read and clean up the data a little # this inclues altering the structure of data matrix that SAS returns # since every probability is a response, each player for each hole, year, and round # has six probabilities. We need each row to contain all six probs for each hole, # round and year. Going to use package tidyr, super simple data shape managing. setwd("~/Documents/masters") temp <- read.csv("data/Player probabilities - model 6.csv") names <- names(temp) names(temp) <- c(names[-(39:40)], "pwith", "pwithout") rm(names) masters2016 <- read.csv("~/Documents/masters/data/ranks.2016.csv") masters2016 <- masters2016[order(masters2016$name), ] modtest <- numeric() # all these variables prevent us from turning the table # from "long" to "wide". So they will need to be NULL # remove the columns that cause a problem with tidyr::spread temp$rtp_score <- NULL; temp$score <- NULL; temp$pwithout <- NULL df <- tidyr::spread(temp, Level, pwith) # now we have a "wide" table ### calculate the probs for the individual score on the hole # there are some holes where we want prob = 0 for triple bogey ### calculate the probs for the individual score on the hole df$P.triple <- ifelse(is.na(df$`3`), 0, df$`3`) df$P.double <- ifelse(is.na(df$`3`), df$`2`, df$`2` - df$`3`) df$P.bogey <- df$`1` - df$`2` df$P.par <- df$`0` - df$`1` df$P.birdie <- ifelse(is.na(df$`-1`), 1 - df$`0`, df$`-1` - df$`0`) df$P.eagle <- ifelse(!is.na(df$`-1`), 1 - df$`-1`, 0) # get rid of some columns we don't need anymore df[, c("-1", "0", "1", "2", "3")] <- NULL df.sim <- df; rm(df) rm(temp) # done with this now # create 2 matrices with the probs for each hole for rounds 1 and 2 probs1 <- df.sim[df.sim$round == 1, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs2 <- df.sim[df.sim$round == 2, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] finish <- data.frame() topar <- c(3, 2, 1, 0, -1, -2) ########################### START of SIMULATIONS ################## # here is the beginning of the loop. Each loop is one masters. n <- 1000 # 1 - 1000 print(system.time(for(i in 1:n) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. if(!file.exists("data/sims")) dir.create("data/sims") write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 1001 - 2000 print(system.time(for(i in (n+1):(n*2)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 2001 - 3000 print(system.time(for(i in ((n*2) + 1):(n*3)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 3001 - 4000 print(system.time(for(i in ((n*3) + 1):(n*4)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 4001 - 5000 print(system.time(for(i in ((n*4) + 1):(n*5)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 5001 - 6000 print(system.time(for(i in ((n*5) + 1):(n*6)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 6001 - 7000 print(system.time(for(i in ((n*6) + 1):(n*7)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 7001 - 8000 print(system.time(for(i in ((n*7) + 1):(n*8)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 8001 - 9000 print(system.time(for(i in ((n*8) + 1):(n*9)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 9001 - 10000 print(system.time(for(i in ((n*9) + 1):(n*10)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) #rm(list = ls()) ########################### END of SIMULATIONS ###################### hist(modtest, col = "green", main = "10000 Spearman's Rho", xlab = "Model 5 With Player Effect") print(quantile(modtest, c(0.025, 0.975))) print(summary(modtest))
/R/obsolete/model6WITH.R
no_license
codyfrisby/masters
R
false
false
31,896
r
# first we need to read and clean up the data a little # this inclues altering the structure of data matrix that SAS returns # since every probability is a response, each player for each hole, year, and round # has six probabilities. We need each row to contain all six probs for each hole, # round and year. Going to use package tidyr, super simple data shape managing. setwd("~/Documents/masters") temp <- read.csv("data/Player probabilities - model 6.csv") names <- names(temp) names(temp) <- c(names[-(39:40)], "pwith", "pwithout") rm(names) masters2016 <- read.csv("~/Documents/masters/data/ranks.2016.csv") masters2016 <- masters2016[order(masters2016$name), ] modtest <- numeric() # all these variables prevent us from turning the table # from "long" to "wide". So they will need to be NULL # remove the columns that cause a problem with tidyr::spread temp$rtp_score <- NULL; temp$score <- NULL; temp$pwithout <- NULL df <- tidyr::spread(temp, Level, pwith) # now we have a "wide" table ### calculate the probs for the individual score on the hole # there are some holes where we want prob = 0 for triple bogey ### calculate the probs for the individual score on the hole df$P.triple <- ifelse(is.na(df$`3`), 0, df$`3`) df$P.double <- ifelse(is.na(df$`3`), df$`2`, df$`2` - df$`3`) df$P.bogey <- df$`1` - df$`2` df$P.par <- df$`0` - df$`1` df$P.birdie <- ifelse(is.na(df$`-1`), 1 - df$`0`, df$`-1` - df$`0`) df$P.eagle <- ifelse(!is.na(df$`-1`), 1 - df$`-1`, 0) # get rid of some columns we don't need anymore df[, c("-1", "0", "1", "2", "3")] <- NULL df.sim <- df; rm(df) rm(temp) # done with this now # create 2 matrices with the probs for each hole for rounds 1 and 2 probs1 <- df.sim[df.sim$round == 1, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs2 <- df.sim[df.sim$round == 2, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] finish <- data.frame() topar <- c(3, 2, 1, 0, -1, -2) ########################### START of SIMULATIONS ################## # here is the beginning of the loop. Each loop is one masters. n <- 1000 # 1 - 1000 print(system.time(for(i in 1:n) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. if(!file.exists("data/sims")) dir.create("data/sims") write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 1001 - 2000 print(system.time(for(i in (n+1):(n*2)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 2001 - 3000 print(system.time(for(i in ((n*2) + 1):(n*3)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 3001 - 4000 print(system.time(for(i in ((n*3) + 1):(n*4)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 4001 - 5000 print(system.time(for(i in ((n*4) + 1):(n*5)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 5001 - 6000 print(system.time(for(i in ((n*5) + 1):(n*6)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 6001 - 7000 print(system.time(for(i in ((n*6) + 1):(n*7)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 7001 - 8000 print(system.time(for(i in ((n*7) + 1):(n*8)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 8001 - 9000 print(system.time(for(i in ((n*8) + 1):(n*9)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) finish <- data.frame() # 9001 - 10000 print(system.time(for(i in ((n*9) + 1):(n*10)) { # simulate rounds 1 and 2 score1 <- apply(probs1, 1, function(x) # simulate round 1 sample(topar, 1, prob = x)) score2 <- apply(probs2, 1, function(x) # simulate round 2 sample(topar, 1, prob = x)) df.sim$score1[df.sim$round == 1] <- score1 + df.sim$par[df.sim$round == 1] df.sim$score1[df.sim$round == 2] <- score2 + df.sim$par[df.sim$round == 2] # summarising the first two rounds df.sim$friday <- ave(df.sim$score1, df.sim$name, df.sim$year, FUN = function(x) sum(x, na.rm = TRUE)) # need to create the cut. Here we will cut all players who are not # within 10 shots of the lead. cut <- df.sim[!duplicated(df.sim$name), c("name", "friday")] cut$rank <- ave(cut$friday, FUN = function(x) rank(x, ties.method = "min")) # the masters likes to cut players who aren't within 10 shots of the # leader and/or top 50. Here we do that notcut <- cut[cut$friday <= min(cut$friday) + 10 | cut$rank <= 50, ] cut <- cut[!(cut$friday <= min(cut$friday) + 10 | cut$rank <= 50), ] dfcut <- df.sim[df.sim$name %in% cut$name, ] dfcut <- merge(dfcut, cut) dfnotcut <- df.sim[df.sim$name %in% notcut$name, ] # simulate rounds 3 and 4 probs3 <- dfnotcut[dfnotcut$round == 3, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] probs4 <- dfnotcut[dfnotcut$round == 4, c("P.triple", "P.double", "P.bogey", "P.par", "P.birdie", "P.eagle")] score3 <- apply(probs3, 1, function(x) sample(topar, 1, prob = x)) score4 <- apply(probs4, 1, function(x) sample(topar, 1, prob = x)) dfnotcut$score1[dfnotcut$round == 3] <- score3 + dfnotcut$par[dfnotcut$round == 3] dfnotcut$score1[dfnotcut$round == 4] <- score4 + dfnotcut$par[dfnotcut$round == 4] # summarising the next two rounds dfnotcut$sunday <- ave(dfnotcut$score1, dfnotcut$name, FUN = function(x) sum(x, na.rm = TRUE)) round4 <- dfnotcut[!duplicated(dfnotcut$name), ] round4$rank <- rank(round4$sunday, ties.method = "min") dfnotcut <- merge(dfnotcut, round4[, c("name", "rank")], by = "name", all.x = TRUE) cut$sunday <- NA finish1 <- rbind(round4[, c("name", "rank", "friday", "sunday")], cut[, c("name", "rank", "friday", "sunday")]) finish1$simulationID <- as.factor(i) finish1 <- finish1[order(finish1$name), ] modtest[i] <- cor(X$rank, finish1$rank, method = "spearman") finish <- rbind(finish, finish1) rm(finish1); rm(dfnotcut); rm(dfcut); rm(cut); rm(notcut) rm(probs3); rm(probs4); rm(round4); rm(score1); rm(score2) rm(score3); rm(score4) })) # writing to file so I can remove large frame from RAM. write.csv(finish, paste("~/Documents/masters/data/sims/finish", i, ".csv", sep = "")) rm(finish) #rm(list = ls()) ########################### END of SIMULATIONS ###################### hist(modtest, col = "green", main = "10000 Spearman's Rho", xlab = "Model 5 With Player Effect") print(quantile(modtest, c(0.025, 0.975))) print(summary(modtest))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/impl-idf.R \name{assign_idf_value_default} \alias{assign_idf_value_default} \title{Assign default field values} \usage{ assign_idf_value_default(idd_env, idf_env, dt_value) } \arguments{ \item{idd_env}{An environment or list contains IDD tables including class, field, and reference.} \item{idf_env}{An environment or list contains IDF tables including object, value, and reference.} \item{dt_value}{A \code{\link[data.table:data.table]{data.table::data.table()}} that contains object value data.} } \value{ The updated version of \code{\link[data.table:data.table]{data.table::data.table()}}. } \description{ Assign default field values } \keyword{internal}
/man/assign_idf_value_default.Rd
permissive
hongyuanjia/eplusr
R
false
true
739
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/impl-idf.R \name{assign_idf_value_default} \alias{assign_idf_value_default} \title{Assign default field values} \usage{ assign_idf_value_default(idd_env, idf_env, dt_value) } \arguments{ \item{idd_env}{An environment or list contains IDD tables including class, field, and reference.} \item{idf_env}{An environment or list contains IDF tables including object, value, and reference.} \item{dt_value}{A \code{\link[data.table:data.table]{data.table::data.table()}} that contains object value data.} } \value{ The updated version of \code{\link[data.table:data.table]{data.table::data.table()}}. } \description{ Assign default field values } \keyword{internal}
/ElasticNet예제.R
no_license
Eeun-ju/DataAnalysis-Basic
R
false
false
813
r
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compare.R \name{frob} \alias{frob} \title{A function to} \usage{ frob(CS, NS) } \value{ val } \description{ A function to }
/man/frob.Rd
no_license
sahatava/MicNet
R
false
true
202
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compare.R \name{frob} \alias{frob} \title{A function to} \usage{ frob(CS, NS) } \value{ val } \description{ A function to }
library(NLP) library(tm) library(RColorBrewer) library(wordcloud) library(memoise) library(tibble) library(data.table) library(NLP) library(tm) library(jiebaRD) library(jiebaR) library(magrittr) library(RColorBrewer) library(wordcloud) news_data = read.csv("all_news.csv", sep = "") # Using "memoise" to automatically cache the results getTermMatrix <- memoise(function(Media,Candi,Month) { # Careful not to let just any name slip in here; news <- news_data[(news_data$media == Media)&(news_data$candi == Candi)&(news_data$month == Month),] myCorpus = Corpus(VectorSource(news$content)) toSpace <- content_transformer(function(x,pattern){ return(gsub(pattern," ",x)) }) clean_doc <- function(docs){ clean_words <- c("[A-Za-z0-9]","、","《","『","』","【","】","/",",","。","!","「","(","」",")","\n",";",">","<","<",">","分享","記者","攝影","提及","表示","報導","我們","他們","的","也","都","就","與","但","是","在","和","及","為","或","且","有","含","為達最佳瀏覽效果,建議使用 Chrome、Firefox 或 Internet Explorer 10","新聞送上來!快加入自由電子報APP、LINE好友 2018年6月13日‧星期三‧戊戌年四月卅 注目新聞 1 中國卡車罷工延燒 司機高喊「打倒共產黨」! 2 新版身分證票選出爐 設計師魯少綸奪首獎! 3 洋腸又來!夜店外帶18歲妹 網站取名「台女很容易」... 4 侯友宜妻1塊土地99個門牌 蘇貞昌:太荒唐 5 連俞涵超正弟媳曝光…根本巨乳林志玲,","以上版本的瀏覽器。 爆") for(i in 1:length(clean_words)){ docs <- tm_map(docs,toSpace, clean_words[i]) } return(docs) } myCorpus <- clean_doc(myCorpus) mixseg = worker() segment <- c("柯文哲","姚文智","丁守中","台北市長","選舉","候選人","台灣","選票","柯市長","民進黨","國民黨","台北市民","市民") new_user_word(mixseg,segment) # 有詞頻之後就可以去畫文字雲 jieba_tokenizer=function(d){ unlist(segment(d[[1]],mixseg)) } seg = lapply(myCorpus, jieba_tokenizer) freqFrame = as.data.frame(table(unlist(seg))) # 清除單字 for(i in c(1:length(freqFrame$Var1))){ if((freqFrame$Var1[i] %>% as.character %>% nchar) == 1){ freqFrame[i,] <- NA } } freqFrame <- na.omit(freqFrame) write.table(freqFrame, paste(Media,"_",Candi,"_",Month,".csv"), sep = ",", row.names = FALSE) }) # apple getTermMatrix("apple","Ko",1) getTermMatrix("apple","Ko",2) getTermMatrix("apple","Ko",3) getTermMatrix("apple","Ko",4) getTermMatrix("apple","Ko",5) getTermMatrix("apple","Di",1) getTermMatrix("apple","Di",2) getTermMatrix("apple","Di",3) getTermMatrix("apple","Di",4) getTermMatrix("apple","Di",5) getTermMatrix("apple","Yao",1) getTermMatrix("apple","Yao",2) getTermMatrix("apple","Yao",3) getTermMatrix("apple","Yao",4) getTermMatrix("apple","Yao",5) # ct getTermMatrix("ct","Ko",1) getTermMatrix("ct","Ko",2) getTermMatrix("ct","Ko",3) getTermMatrix("ct","Ko",4) getTermMatrix("ct","Ko",5) getTermMatrix("ct","Di",1) getTermMatrix("ct","Di",2) getTermMatrix("ct","Di",3) getTermMatrix("ct","Di",4) getTermMatrix("ct","Di",5) getTermMatrix("ct","Yao",1) getTermMatrix("ct","Yao",2) getTermMatrix("ct","Yao",3) getTermMatrix("ct","Yao",4) getTermMatrix("ct","Yao",5) # ltn getTermMatrix("ltn","Ko",1) getTermMatrix("ltn","Ko",2) getTermMatrix("ltn","Ko",3) getTermMatrix("ltn","Ko",4) getTermMatrix("ltn","Ko",5) getTermMatrix("ltn","Di",1) getTermMatrix("ltn","Di",2) getTermMatrix("ltn","Di",3) getTermMatrix("ltn","Di",4) getTermMatrix("ltn","Di",5) getTermMatrix("ltn","Yao",1) getTermMatrix("ltn","Yao",2) getTermMatrix("ltn","Yao",3) getTermMatrix("ltn","Yao",4) getTermMatrix("ltn","Yao",5) # udn getTermMatrix("udn","Ko",1) getTermMatrix("udn","Ko",2) getTermMatrix("udn","Ko",3) getTermMatrix("udn","Ko",4) getTermMatrix("udn","Ko",5) getTermMatrix("udn","Di",1) getTermMatrix("udn","Di",2) getTermMatrix("udn","Di",3) getTermMatrix("udn","Di",4) getTermMatrix("udn","Di",5) getTermMatrix("udn","Yao",1) getTermMatrix("udn","Yao",2) getTermMatrix("udn","Yao",3) getTermMatrix("udn","Yao",4) getTermMatrix("udn","Yao",5)
/Final_Project/Wordcloud/wordcloud_all_news-shiny/get_freq_function.R
no_license
Suuuuny/NTU-CSX-DataScience-group5
R
false
false
4,207
r
library(NLP) library(tm) library(RColorBrewer) library(wordcloud) library(memoise) library(tibble) library(data.table) library(NLP) library(tm) library(jiebaRD) library(jiebaR) library(magrittr) library(RColorBrewer) library(wordcloud) news_data = read.csv("all_news.csv", sep = "") # Using "memoise" to automatically cache the results getTermMatrix <- memoise(function(Media,Candi,Month) { # Careful not to let just any name slip in here; news <- news_data[(news_data$media == Media)&(news_data$candi == Candi)&(news_data$month == Month),] myCorpus = Corpus(VectorSource(news$content)) toSpace <- content_transformer(function(x,pattern){ return(gsub(pattern," ",x)) }) clean_doc <- function(docs){ clean_words <- c("[A-Za-z0-9]","、","《","『","』","【","】","/",",","。","!","「","(","」",")","\n",";",">","<","<",">","分享","記者","攝影","提及","表示","報導","我們","他們","的","也","都","就","與","但","是","在","和","及","為","或","且","有","含","為達最佳瀏覽效果,建議使用 Chrome、Firefox 或 Internet Explorer 10","新聞送上來!快加入自由電子報APP、LINE好友 2018年6月13日‧星期三‧戊戌年四月卅 注目新聞 1 中國卡車罷工延燒 司機高喊「打倒共產黨」! 2 新版身分證票選出爐 設計師魯少綸奪首獎! 3 洋腸又來!夜店外帶18歲妹 網站取名「台女很容易」... 4 侯友宜妻1塊土地99個門牌 蘇貞昌:太荒唐 5 連俞涵超正弟媳曝光…根本巨乳林志玲,","以上版本的瀏覽器。 爆") for(i in 1:length(clean_words)){ docs <- tm_map(docs,toSpace, clean_words[i]) } return(docs) } myCorpus <- clean_doc(myCorpus) mixseg = worker() segment <- c("柯文哲","姚文智","丁守中","台北市長","選舉","候選人","台灣","選票","柯市長","民進黨","國民黨","台北市民","市民") new_user_word(mixseg,segment) # 有詞頻之後就可以去畫文字雲 jieba_tokenizer=function(d){ unlist(segment(d[[1]],mixseg)) } seg = lapply(myCorpus, jieba_tokenizer) freqFrame = as.data.frame(table(unlist(seg))) # 清除單字 for(i in c(1:length(freqFrame$Var1))){ if((freqFrame$Var1[i] %>% as.character %>% nchar) == 1){ freqFrame[i,] <- NA } } freqFrame <- na.omit(freqFrame) write.table(freqFrame, paste(Media,"_",Candi,"_",Month,".csv"), sep = ",", row.names = FALSE) }) # apple getTermMatrix("apple","Ko",1) getTermMatrix("apple","Ko",2) getTermMatrix("apple","Ko",3) getTermMatrix("apple","Ko",4) getTermMatrix("apple","Ko",5) getTermMatrix("apple","Di",1) getTermMatrix("apple","Di",2) getTermMatrix("apple","Di",3) getTermMatrix("apple","Di",4) getTermMatrix("apple","Di",5) getTermMatrix("apple","Yao",1) getTermMatrix("apple","Yao",2) getTermMatrix("apple","Yao",3) getTermMatrix("apple","Yao",4) getTermMatrix("apple","Yao",5) # ct getTermMatrix("ct","Ko",1) getTermMatrix("ct","Ko",2) getTermMatrix("ct","Ko",3) getTermMatrix("ct","Ko",4) getTermMatrix("ct","Ko",5) getTermMatrix("ct","Di",1) getTermMatrix("ct","Di",2) getTermMatrix("ct","Di",3) getTermMatrix("ct","Di",4) getTermMatrix("ct","Di",5) getTermMatrix("ct","Yao",1) getTermMatrix("ct","Yao",2) getTermMatrix("ct","Yao",3) getTermMatrix("ct","Yao",4) getTermMatrix("ct","Yao",5) # ltn getTermMatrix("ltn","Ko",1) getTermMatrix("ltn","Ko",2) getTermMatrix("ltn","Ko",3) getTermMatrix("ltn","Ko",4) getTermMatrix("ltn","Ko",5) getTermMatrix("ltn","Di",1) getTermMatrix("ltn","Di",2) getTermMatrix("ltn","Di",3) getTermMatrix("ltn","Di",4) getTermMatrix("ltn","Di",5) getTermMatrix("ltn","Yao",1) getTermMatrix("ltn","Yao",2) getTermMatrix("ltn","Yao",3) getTermMatrix("ltn","Yao",4) getTermMatrix("ltn","Yao",5) # udn getTermMatrix("udn","Ko",1) getTermMatrix("udn","Ko",2) getTermMatrix("udn","Ko",3) getTermMatrix("udn","Ko",4) getTermMatrix("udn","Ko",5) getTermMatrix("udn","Di",1) getTermMatrix("udn","Di",2) getTermMatrix("udn","Di",3) getTermMatrix("udn","Di",4) getTermMatrix("udn","Di",5) getTermMatrix("udn","Yao",1) getTermMatrix("udn","Yao",2) getTermMatrix("udn","Yao",3) getTermMatrix("udn","Yao",4) getTermMatrix("udn","Yao",5)
x = 1;x if(x==2){print("x=2")};x if(x>=0){x="A"}else{x="B"};x for(i in 1:5){print (i)} for(i in letters[1:5]){print(i)} a=numeric(20) for(i in 1:20) {a[i]=i} a letters [1:26] exp(10) log10(2) a=1:3 b=2:4 c(a,b) c(1,a) array(1,3) seq(1:5) seq(from=1, to=3,lenght.out = 4) AA = letters[1:3];AA K=c(3,2,1,3,2) lenght(K) K[2] K[1:3] K[-1] Twice = c("Nayeon","Jeongyeon","Momo","Sana","Jihyo","Mina","Dahyun","Chaeyoung","Tzuyu");Twice x = c(95,96,96,96,97,97,98,99,99);x y = c(1,2,3,4,5,6,7,8,9);y data1=data.frame(Twice,x,y);data1 colnames(data1)=c("Member","Tahun Lahir","Urutan");data1 data = read.csv("E:/kuliah/Semester 4/DMC/59677_titanicdata/titanicdata.csv",sep=";") View(data) library(gtable) class=table(data$Survived) classdata=data.frame(class) colnames(classdata)=c("class","Count") bp=ggplot(clasdata,aes(x="",y=Count,fill=class))+geom_bar(width=1,stat="identify") Pie=bp+coord_polar("y",start=0) Pie
/DMC(1).R
no_license
evamarella48/DMC-SCC
R
false
false
961
r
x = 1;x if(x==2){print("x=2")};x if(x>=0){x="A"}else{x="B"};x for(i in 1:5){print (i)} for(i in letters[1:5]){print(i)} a=numeric(20) for(i in 1:20) {a[i]=i} a letters [1:26] exp(10) log10(2) a=1:3 b=2:4 c(a,b) c(1,a) array(1,3) seq(1:5) seq(from=1, to=3,lenght.out = 4) AA = letters[1:3];AA K=c(3,2,1,3,2) lenght(K) K[2] K[1:3] K[-1] Twice = c("Nayeon","Jeongyeon","Momo","Sana","Jihyo","Mina","Dahyun","Chaeyoung","Tzuyu");Twice x = c(95,96,96,96,97,97,98,99,99);x y = c(1,2,3,4,5,6,7,8,9);y data1=data.frame(Twice,x,y);data1 colnames(data1)=c("Member","Tahun Lahir","Urutan");data1 data = read.csv("E:/kuliah/Semester 4/DMC/59677_titanicdata/titanicdata.csv",sep=";") View(data) library(gtable) class=table(data$Survived) classdata=data.frame(class) colnames(classdata)=c("class","Count") bp=ggplot(clasdata,aes(x="",y=Count,fill=class))+geom_bar(width=1,stat="identify") Pie=bp+coord_polar("y",start=0) Pie
library(ropensecretsapi) ### Name: SetAPIKey ### Title: Allows the user to set the OpenSecrets.org API key once thereby ### removing the need to pass it in for each function call in this ### package. ### Aliases: SetAPIKey ### ** Examples SetAPIKey ("Example API Key")
/data/genthat_extracted_code/ropensecretsapi/examples/SetAPIKey.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
280
r
library(ropensecretsapi) ### Name: SetAPIKey ### Title: Allows the user to set the OpenSecrets.org API key once thereby ### removing the need to pass it in for each function call in this ### package. ### Aliases: SetAPIKey ### ** Examples SetAPIKey ("Example API Key")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/healthlake_operations.R \name{healthlake_start_fhir_import_job} \alias{healthlake_start_fhir_import_job} \title{Begins a FHIR Import job} \usage{ healthlake_start_fhir_import_job( JobName = NULL, InputDataConfig, JobOutputDataConfig, DatastoreId, DataAccessRoleArn, ClientToken ) } \arguments{ \item{JobName}{The name of the FHIR Import job in the StartFHIRImport job request.} \item{InputDataConfig}{[required] The input properties of the FHIR Import job in the StartFHIRImport job request.} \item{JobOutputDataConfig}{[required]} \item{DatastoreId}{[required] The AWS-generated data store ID.} \item{DataAccessRoleArn}{[required] The Amazon Resource Name (ARN) that gives AWS HealthLake access permission.} \item{ClientToken}{[required] Optional user provided token used for ensuring idempotency.} } \description{ Begins a FHIR Import job. See \url{https://www.paws-r-sdk.com/docs/healthlake_start_fhir_import_job/} for full documentation. } \keyword{internal}
/cran/paws.analytics/man/healthlake_start_fhir_import_job.Rd
permissive
paws-r/paws
R
false
true
1,059
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/healthlake_operations.R \name{healthlake_start_fhir_import_job} \alias{healthlake_start_fhir_import_job} \title{Begins a FHIR Import job} \usage{ healthlake_start_fhir_import_job( JobName = NULL, InputDataConfig, JobOutputDataConfig, DatastoreId, DataAccessRoleArn, ClientToken ) } \arguments{ \item{JobName}{The name of the FHIR Import job in the StartFHIRImport job request.} \item{InputDataConfig}{[required] The input properties of the FHIR Import job in the StartFHIRImport job request.} \item{JobOutputDataConfig}{[required]} \item{DatastoreId}{[required] The AWS-generated data store ID.} \item{DataAccessRoleArn}{[required] The Amazon Resource Name (ARN) that gives AWS HealthLake access permission.} \item{ClientToken}{[required] Optional user provided token used for ensuring idempotency.} } \description{ Begins a FHIR Import job. See \url{https://www.paws-r-sdk.com/docs/healthlake_start_fhir_import_job/} for full documentation. } \keyword{internal}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mllaplace.R \name{mllaplace} \alias{mllaplace} \title{Laplace distribution maximum likelihood estimation} \usage{ mllaplace(x, na.rm = FALSE) } \arguments{ \item{x}{a (non-empty) numeric vector of data values.} \item{na.rm}{logical. Should missing values be removed?} } \value{ \code{mllaplace} returns an object of \link[base]{class} \code{univariateML}. This is a named numeric vector with maximum likelihood estimates for \code{mu} and \code{sigma} and the following attributes: \item{\code{model}}{The name of the model.} \item{\code{density}}{The density associated with the estimates.} \item{\code{logLik}}{The loglikelihood at the maximum.} \item{\code{support}}{The support of the density.} \item{\code{n}}{The number of observations.} \item{\code{call}}{The call as captured my \code{match.call}} } \description{ The maximum likelihood estimate of \code{mu} is the sample median while the maximum likelihood estimate of \code{sigma} is mean absolute deviation from the median. } \details{ For the density function of the Laplace distribution see \link[extraDistr]{Laplace}. } \examples{ mllaplace(precip) } \references{ Johnson, N. L., Kotz, S. and Balakrishnan, N. (1995) Continuous Univariate Distributions, Volume 2, Chapter 24. Wiley, New York. } \seealso{ \link[extraDistr]{Laplace} for the Laplace density. }
/man/mllaplace.Rd
permissive
vbaliga/univariateML
R
false
true
1,436
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mllaplace.R \name{mllaplace} \alias{mllaplace} \title{Laplace distribution maximum likelihood estimation} \usage{ mllaplace(x, na.rm = FALSE) } \arguments{ \item{x}{a (non-empty) numeric vector of data values.} \item{na.rm}{logical. Should missing values be removed?} } \value{ \code{mllaplace} returns an object of \link[base]{class} \code{univariateML}. This is a named numeric vector with maximum likelihood estimates for \code{mu} and \code{sigma} and the following attributes: \item{\code{model}}{The name of the model.} \item{\code{density}}{The density associated with the estimates.} \item{\code{logLik}}{The loglikelihood at the maximum.} \item{\code{support}}{The support of the density.} \item{\code{n}}{The number of observations.} \item{\code{call}}{The call as captured my \code{match.call}} } \description{ The maximum likelihood estimate of \code{mu} is the sample median while the maximum likelihood estimate of \code{sigma} is mean absolute deviation from the median. } \details{ For the density function of the Laplace distribution see \link[extraDistr]{Laplace}. } \examples{ mllaplace(precip) } \references{ Johnson, N. L., Kotz, S. and Balakrishnan, N. (1995) Continuous Univariate Distributions, Volume 2, Chapter 24. Wiley, New York. } \seealso{ \link[extraDistr]{Laplace} for the Laplace density. }
# library ------------------------------------------------------------------ library(tidyverse) library(broom) library(magrittr) library(MuMIn) library(ape) library(nlme) library(car) library(rgdal) library(lmtest) library(gvlma) rm(list = ls()) # data --------------------------------------------------------------------- # Aus raster names setwd("C:/Users/s436862/Dropbox/NZ/Results/rasters/log") files_list <- list.files(pattern = ".grd") aus_list <- Filter(function(x) grepl("Aus_", x), files_list) aus_stack <- stack(aus_list) spp <- gsub(pattern = "\\.grd$", "", aus_list) setwd("C:/Users/s436862/Dropbox/NZ") # predictor variables pv <- read.csv("Results/csv/predictor variables/Australia predictor variables 2538.csv") # species richness (log of iNEXT) spp_df <- data.frame(getValues(aus_stack)) names(spp_df) <- spp # species predictor variable data frame (spv) spv <- bind_cols(spp_df, pv) %>% filter(cell_category == "land") head(spv) # outputs -------------------------------------------------- # store all results for supplementary materials moran_l <- list() # Moran's I spatial autocorrelation test gls_l <- list() # general least squares parameter estimates cor_str <- matrix(nrow = length(spp)) # identifies best model from model selection model_list <- list() # stores best models ci_list <- list() # stores gls vonfidence intervals # identify spatial auto-correlation -------------------------------------------------------------- # identify spatial autocorrelation function (returns p-value) moran_fun <- function(spp_col, col_no) { xy <- spv %>% filter(!is.na(spp_col)) %>% dplyr::select(all_of(col_no), long, lat) coords = cbind(xy$long, xy$lat) w = fields:::rdist(coords) m_i <- Moran.I(x = xy[, 1], w = w, na.rm = T) return(m_i) } # run for (i in 1:length(spp)){ moran_l[[i]] <- moran_fun(spv[, i], i) } names(moran_l) <- spp moran_l # Moran's I data frame for saving: 4 x 6 m_mat <- round(matrix(unlist(moran_l), byrow = T, nrow = length(spp)), 2) row.names(m_mat) <- spp colnames(m_mat) <- c("observed","expected", "sd", "p.value") m_mat # all spp have signif. autocorrelation # model selection -------------------------------------------------------------------------------- # test different methods for modelling spatial autocorrelation model_sel_fun <- function(spp_col) { # model methods to account for spatial autocorrelation model_e <- gls(spp_col ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, correlation = corExp(form = ~long + lat, nugget=T) , na.action = na.omit, method = "ML") model_g <- gls(spp_col ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, correlation = corGaus(form = ~long + lat, nugget=T), na.action = na.omit, method = "ML") model_s <- gls(spp_col ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, correlation = corSpher(form = ~long + lat, nugget=T), na.action = na.omit, method = "ML") model_r <- gls(spp_col ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, correlation = corRatio(form = ~long + lat, nugget=T), na.action = na.omit, method = "ML") model_lm <- lm(spp_col ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, na.action = na.omit) # compare models using AICc model_sel <- model.sel(model_e, model_g , model_s, model_r, model_lm) return(model_sel) } # run and choose best model by AICc ------------------------------------------------ for (i in 1:length(spp)){ spp_col <- spv[, i] gls_l[[i]] <- model_sel_fun(spp_col) cor_str[i] <- gls_l[[i]]$correlation[1] # best correlation structure } cor_str # save all gls models gls_mat <- matrix(nrow = 5, ncol = 72) # ncol = 18 * no. of species gls_mat[,] <- unlist(gls_l, recursive = T) gls_mat2 <- rbind(gls_mat[, 1:18], gls_mat[, 19:36], gls_mat[, 37:54], gls_mat[, 55:72]) colnames(gls_mat2) <- colnames(gls_l[[1]]) rownames(gls_mat2) <- rep(spp, each = 5) # run lowest-AIC models ------------------------------------------------------------------ # identified in correlation structure cor_str # native C3 = corSpher(form = ~long + lat, nugget=T) model_list[[1]] <- gls(Aus_native_C3 ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, correlation = corSpher(form = ~long + lat, nugget=T), na.action = na.omit, method = "ML") ci_list[[1]] <- data.frame(intervals(model_list[[1]], 0.95, which = "coef")$coef) # native C4 = corExp(form = ~long + lat, nugget=T) model_list[[2]] <- gls(Aus_native_C4 ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, correlation = corExp(form = ~long+lat,T), na.action = na.omit, method = "ML") ci_list[[2]] <- data.frame(intervals(model_list[[2]], 0.95, which = "coef")$coef) # nonnative C3 = corExp(form = ~long + lat, nugget=T) model_list[[3]] <- gls(Aus_nonnative_C3 ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, correlation = corExp(form = ~long + lat, nugget=T), na.action = na.omit, method = "ML") ci_list[[3]] <- data.frame(intervals(model_list[[3]], 0.95, which = "coef")$coef) # nonnative C4 = corExp(form = ~long + lat, nugget=T) model_list[[4]] <- gls(Aus_nonnative_C4 ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, correlation = corExp(form = ~long+lat,T), na.action = na.omit, method = "ML") ci_list[[4]] <- data.frame(intervals(model_list[[4]], 0.95, which = "coef")$coef) names(model_list) <- spp names(ci_list) <- spp # wrangle parameter estimates into plot-ready tables --------------- model_df <- function(cis, stat, location){ df <- cis %>% mutate(pv = rownames(cis), status = stat) %>% slice(3:9) %>% mutate(country = location, status = as.factor(status), estimate = est.) %>% dplyr::select(pv, country, status, lower, estimate, upper) return(df) } # run natausc3 <- model_df(ci_list[[1]], "Native", "AU") natausc3 natausc4 <- model_df(ci_list[[2]], "Native", "AU") natausc4 nonausc3 <- model_df(ci_list[[3]], "Nonnative", "AU") nonausc3 nonausc4 <- model_df(ci_list[[4]], "Nonnative", "AU") nonausc4 # save data ------------------------------------------------------------------ write.csv(m_mat, "Results/csv/models/Aus Morans I.csv", row.names = T) write.csv(gls_mat2, "Results/csv/models/Aus GLS model structures.csv", row.names = T) write.csv(natausc3, "Results/csv/models/native Aus C3 mean estimates.csv", row.names = F) write.csv(nonausc3, "Results/csv/models/nonnative Aus C3 mean estimates.csv", row.names = F) write.csv(natausc4, "Results/csv/models/native Aus C4 mean estimates.csv", row.names = F) write.csv(nonausc4, "Results/csv/models/nonnative Aus C4 mean estimates.csv", row.names = F) save.image("Data files/rdata/Aus models.RData") # ----------------------------------------------------------------------------
/Rscripts/3. modelling species richness/step 5 - Aus models.R
no_license
khemming/NZ
R
false
false
7,501
r
# library ------------------------------------------------------------------ library(tidyverse) library(broom) library(magrittr) library(MuMIn) library(ape) library(nlme) library(car) library(rgdal) library(lmtest) library(gvlma) rm(list = ls()) # data --------------------------------------------------------------------- # Aus raster names setwd("C:/Users/s436862/Dropbox/NZ/Results/rasters/log") files_list <- list.files(pattern = ".grd") aus_list <- Filter(function(x) grepl("Aus_", x), files_list) aus_stack <- stack(aus_list) spp <- gsub(pattern = "\\.grd$", "", aus_list) setwd("C:/Users/s436862/Dropbox/NZ") # predictor variables pv <- read.csv("Results/csv/predictor variables/Australia predictor variables 2538.csv") # species richness (log of iNEXT) spp_df <- data.frame(getValues(aus_stack)) names(spp_df) <- spp # species predictor variable data frame (spv) spv <- bind_cols(spp_df, pv) %>% filter(cell_category == "land") head(spv) # outputs -------------------------------------------------- # store all results for supplementary materials moran_l <- list() # Moran's I spatial autocorrelation test gls_l <- list() # general least squares parameter estimates cor_str <- matrix(nrow = length(spp)) # identifies best model from model selection model_list <- list() # stores best models ci_list <- list() # stores gls vonfidence intervals # identify spatial auto-correlation -------------------------------------------------------------- # identify spatial autocorrelation function (returns p-value) moran_fun <- function(spp_col, col_no) { xy <- spv %>% filter(!is.na(spp_col)) %>% dplyr::select(all_of(col_no), long, lat) coords = cbind(xy$long, xy$lat) w = fields:::rdist(coords) m_i <- Moran.I(x = xy[, 1], w = w, na.rm = T) return(m_i) } # run for (i in 1:length(spp)){ moran_l[[i]] <- moran_fun(spv[, i], i) } names(moran_l) <- spp moran_l # Moran's I data frame for saving: 4 x 6 m_mat <- round(matrix(unlist(moran_l), byrow = T, nrow = length(spp)), 2) row.names(m_mat) <- spp colnames(m_mat) <- c("observed","expected", "sd", "p.value") m_mat # all spp have signif. autocorrelation # model selection -------------------------------------------------------------------------------- # test different methods for modelling spatial autocorrelation model_sel_fun <- function(spp_col) { # model methods to account for spatial autocorrelation model_e <- gls(spp_col ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, correlation = corExp(form = ~long + lat, nugget=T) , na.action = na.omit, method = "ML") model_g <- gls(spp_col ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, correlation = corGaus(form = ~long + lat, nugget=T), na.action = na.omit, method = "ML") model_s <- gls(spp_col ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, correlation = corSpher(form = ~long + lat, nugget=T), na.action = na.omit, method = "ML") model_r <- gls(spp_col ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, correlation = corRatio(form = ~long + lat, nugget=T), na.action = na.omit, method = "ML") model_lm <- lm(spp_col ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, na.action = na.omit) # compare models using AICc model_sel <- model.sel(model_e, model_g , model_s, model_r, model_lm) return(model_sel) } # run and choose best model by AICc ------------------------------------------------ for (i in 1:length(spp)){ spp_col <- spv[, i] gls_l[[i]] <- model_sel_fun(spp_col) cor_str[i] <- gls_l[[i]]$correlation[1] # best correlation structure } cor_str # save all gls models gls_mat <- matrix(nrow = 5, ncol = 72) # ncol = 18 * no. of species gls_mat[,] <- unlist(gls_l, recursive = T) gls_mat2 <- rbind(gls_mat[, 1:18], gls_mat[, 19:36], gls_mat[, 37:54], gls_mat[, 55:72]) colnames(gls_mat2) <- colnames(gls_l[[1]]) rownames(gls_mat2) <- rep(spp, each = 5) # run lowest-AIC models ------------------------------------------------------------------ # identified in correlation structure cor_str # native C3 = corSpher(form = ~long + lat, nugget=T) model_list[[1]] <- gls(Aus_native_C3 ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, correlation = corSpher(form = ~long + lat, nugget=T), na.action = na.omit, method = "ML") ci_list[[1]] <- data.frame(intervals(model_list[[1]], 0.95, which = "coef")$coef) # native C4 = corExp(form = ~long + lat, nugget=T) model_list[[2]] <- gls(Aus_native_C4 ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, correlation = corExp(form = ~long+lat,T), na.action = na.omit, method = "ML") ci_list[[2]] <- data.frame(intervals(model_list[[2]], 0.95, which = "coef")$coef) # nonnative C3 = corExp(form = ~long + lat, nugget=T) model_list[[3]] <- gls(Aus_nonnative_C3 ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, correlation = corExp(form = ~long + lat, nugget=T), na.action = na.omit, method = "ML") ci_list[[3]] <- data.frame(intervals(model_list[[3]], 0.95, which = "coef")$coef) # nonnative C4 = corExp(form = ~long + lat, nugget=T) model_list[[4]] <- gls(Aus_nonnative_C4 ~ proportion_cover + hii + th + pcoldq + pwarmq + ts + arid + amt, data = spv, correlation = corExp(form = ~long+lat,T), na.action = na.omit, method = "ML") ci_list[[4]] <- data.frame(intervals(model_list[[4]], 0.95, which = "coef")$coef) names(model_list) <- spp names(ci_list) <- spp # wrangle parameter estimates into plot-ready tables --------------- model_df <- function(cis, stat, location){ df <- cis %>% mutate(pv = rownames(cis), status = stat) %>% slice(3:9) %>% mutate(country = location, status = as.factor(status), estimate = est.) %>% dplyr::select(pv, country, status, lower, estimate, upper) return(df) } # run natausc3 <- model_df(ci_list[[1]], "Native", "AU") natausc3 natausc4 <- model_df(ci_list[[2]], "Native", "AU") natausc4 nonausc3 <- model_df(ci_list[[3]], "Nonnative", "AU") nonausc3 nonausc4 <- model_df(ci_list[[4]], "Nonnative", "AU") nonausc4 # save data ------------------------------------------------------------------ write.csv(m_mat, "Results/csv/models/Aus Morans I.csv", row.names = T) write.csv(gls_mat2, "Results/csv/models/Aus GLS model structures.csv", row.names = T) write.csv(natausc3, "Results/csv/models/native Aus C3 mean estimates.csv", row.names = F) write.csv(nonausc3, "Results/csv/models/nonnative Aus C3 mean estimates.csv", row.names = F) write.csv(natausc4, "Results/csv/models/native Aus C4 mean estimates.csv", row.names = F) write.csv(nonausc4, "Results/csv/models/nonnative Aus C4 mean estimates.csv", row.names = F) save.image("Data files/rdata/Aus models.RData") # ----------------------------------------------------------------------------
library(plyr) library(Pusto) rm(list=ls()) source("SSTP.R") #----------------------------- # Data-generating model #----------------------------- estimate_model <- function(Y, X, B) { n <- nrow(X) p <- ncol(X) M <- solve(t(X) %*% X) X_M <- X %*% M coefs <- colSums(Y * X_M) e <- Y - as.vector(X %*% coefs) H <- X_M %*% t(X) h <- diag(H) values <- list(X = X, Y = Y, B = B, X_M = X_M, H = H, h = h, e = e, coefs = coefs, n = n, p = p, M = M) return(values) } gdm <- function(n = 20, mdl = 1, xDist = "norm", B = c(0,1)) { require(Runuran) x1 <- switch(xDist, unif = runif(n, 0, 1), norm = rnorm(n), lap = urlaplace(n)) X <- cbind(x0 = 1, x1) e <- switch(mdl, "1" = rnorm(n, 0, .2), "2" = rnorm(n, 0, .2 + exp(x1/2)/2), "3" = rnorm(n, 0, sqrt(.1 + x1^2))) Y = as.vector(X %*% B) + e values <- estimate_model(Y, X, B) return(values) } #----------------------------------- # simulation driver #----------------------------------- runSim <- function(iterations, n, mdl, xDist, HC, tests, seed = NULL) { require(plyr) require(reshape2) HC <- as.character(unlist(strsplit(HC, " "))) tests <- as.character(unlist(strsplit(tests, " "))) if (!is.null(seed)) set.seed(seed) reps <- rdply(iterations, { model <- gdm(n = n, mdl = mdl, xDist = xDist) ldply(HC, estimate, tests = tests, model = model) }) # performance calculations if ("saddle" %in% tests) tests <- c(tests[tests != "saddle"], paste0("saddle_V",1:2)) reps <- melt(reps, id.vars = c("HC","coef","criterion"), measure.vars = tests, variable.name = "test") ddply(reps, .(HC,coef,criterion,test), summarize, p01 = mean(ifelse(is.na(value), F, value < .01)), p05 = mean(ifelse(is.na(value), F, value < .05)), p10 = mean(ifelse(is.na(value), F, value < .10)), percentNA = mean(is.na(value))) } #----------------------------- # Run Kauermann & Carroll Simulation #----------------------------- set.seed(20150819) design <- list(n = c(20, 40), mdl = 1:3, xDist = c("unif", "norm", "lap"), HC = "HC2 HC3", tests = "naive Satt edgeKC saddle") params <- expand.grid(design, stringsAsFactors = F) params$iterations <- 10000 params$seed <- round(runif(nrow(params)) * 2^30) source_obj <- ls() cluster <- start_parallel(source_obj) system.time(results <- mdply(params, .fun = runSim, .parallel = T)) stopCluster(cluster) save(results, file = "Results/edgeKC simulations 08-19-15.Rdata") load("Results/edgeKC simulations.Rdata") library(ggplot2) library(dplyr) library(tidyr) summary(results$percentNA) filter(results, percentNA > 0) filter(results, criterion == "size") %>% select(n, mdl, xDist, test, HC, coef, p01:p10) %>% gather("alpha","reject_rate",p01:p10) %>% mutate(test_HC = paste(test, HC), coverage = 1 - reject_rate, mdl = factor(mdl), xDist = factor(xDist, levels = c("unif","norm","lap"))) -> covProb # recreate KC Figure 1 filter(covProb, coef == "x1", alpha=="p05", test_HC %in% c("naive HC2","naive HC3", "edgeKC HC2")) %>% ggplot(aes(x = mdl, y = coverage, shape = test_HC, color = test_HC)) + geom_point() + geom_hline(yintercept = .95, linetype = "dashed") + facet_wrap(~n * xDist) + theme_minimal() + scale_shape(solid = FALSE) # compare all the tests alphas <- data.frame(alpha = unique(covProb$alpha), nominal = c(.99,.95,.90)) ggplot(covProb, aes(test_HC, coverage, fill = test_HC)) + geom_boxplot() + geom_hline(data = alphas, aes(yintercept = nominal), linetype="dashed") + facet_grid(alpha ~ n, scales = "free_y") + theme_bw() + theme(legend.position = "bottom")
/Backup/runKCrep.R
no_license
meghapsimatrix/HetRobust
R
false
false
3,850
r
library(plyr) library(Pusto) rm(list=ls()) source("SSTP.R") #----------------------------- # Data-generating model #----------------------------- estimate_model <- function(Y, X, B) { n <- nrow(X) p <- ncol(X) M <- solve(t(X) %*% X) X_M <- X %*% M coefs <- colSums(Y * X_M) e <- Y - as.vector(X %*% coefs) H <- X_M %*% t(X) h <- diag(H) values <- list(X = X, Y = Y, B = B, X_M = X_M, H = H, h = h, e = e, coefs = coefs, n = n, p = p, M = M) return(values) } gdm <- function(n = 20, mdl = 1, xDist = "norm", B = c(0,1)) { require(Runuran) x1 <- switch(xDist, unif = runif(n, 0, 1), norm = rnorm(n), lap = urlaplace(n)) X <- cbind(x0 = 1, x1) e <- switch(mdl, "1" = rnorm(n, 0, .2), "2" = rnorm(n, 0, .2 + exp(x1/2)/2), "3" = rnorm(n, 0, sqrt(.1 + x1^2))) Y = as.vector(X %*% B) + e values <- estimate_model(Y, X, B) return(values) } #----------------------------------- # simulation driver #----------------------------------- runSim <- function(iterations, n, mdl, xDist, HC, tests, seed = NULL) { require(plyr) require(reshape2) HC <- as.character(unlist(strsplit(HC, " "))) tests <- as.character(unlist(strsplit(tests, " "))) if (!is.null(seed)) set.seed(seed) reps <- rdply(iterations, { model <- gdm(n = n, mdl = mdl, xDist = xDist) ldply(HC, estimate, tests = tests, model = model) }) # performance calculations if ("saddle" %in% tests) tests <- c(tests[tests != "saddle"], paste0("saddle_V",1:2)) reps <- melt(reps, id.vars = c("HC","coef","criterion"), measure.vars = tests, variable.name = "test") ddply(reps, .(HC,coef,criterion,test), summarize, p01 = mean(ifelse(is.na(value), F, value < .01)), p05 = mean(ifelse(is.na(value), F, value < .05)), p10 = mean(ifelse(is.na(value), F, value < .10)), percentNA = mean(is.na(value))) } #----------------------------- # Run Kauermann & Carroll Simulation #----------------------------- set.seed(20150819) design <- list(n = c(20, 40), mdl = 1:3, xDist = c("unif", "norm", "lap"), HC = "HC2 HC3", tests = "naive Satt edgeKC saddle") params <- expand.grid(design, stringsAsFactors = F) params$iterations <- 10000 params$seed <- round(runif(nrow(params)) * 2^30) source_obj <- ls() cluster <- start_parallel(source_obj) system.time(results <- mdply(params, .fun = runSim, .parallel = T)) stopCluster(cluster) save(results, file = "Results/edgeKC simulations 08-19-15.Rdata") load("Results/edgeKC simulations.Rdata") library(ggplot2) library(dplyr) library(tidyr) summary(results$percentNA) filter(results, percentNA > 0) filter(results, criterion == "size") %>% select(n, mdl, xDist, test, HC, coef, p01:p10) %>% gather("alpha","reject_rate",p01:p10) %>% mutate(test_HC = paste(test, HC), coverage = 1 - reject_rate, mdl = factor(mdl), xDist = factor(xDist, levels = c("unif","norm","lap"))) -> covProb # recreate KC Figure 1 filter(covProb, coef == "x1", alpha=="p05", test_HC %in% c("naive HC2","naive HC3", "edgeKC HC2")) %>% ggplot(aes(x = mdl, y = coverage, shape = test_HC, color = test_HC)) + geom_point() + geom_hline(yintercept = .95, linetype = "dashed") + facet_wrap(~n * xDist) + theme_minimal() + scale_shape(solid = FALSE) # compare all the tests alphas <- data.frame(alpha = unique(covProb$alpha), nominal = c(.99,.95,.90)) ggplot(covProb, aes(test_HC, coverage, fill = test_HC)) + geom_boxplot() + geom_hline(data = alphas, aes(yintercept = nominal), linetype="dashed") + facet_grid(alpha ~ n, scales = "free_y") + theme_bw() + theme(legend.position = "bottom")
## ----setup, include=FALSE-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knitr::opts_chunk$set(echo = TRUE, message=F, warning=F, fig.width=9.475, fig.height=5) ## ----1--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Directorio de trabajo para que siempre sea en la misma carpeta. #setwd(".../Kaggle Titanic") ## ----2--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Importamos los data set que nos hemos descargado de Kaggle. train <- read.csv("../input/train.csv") test <- read.csv("../input/test.csv") ## ----3--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Si necesitamos visualizarlos: #View(train) #View(test) ## ----4--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Conocer la estructura de los datos. str(train) ## ----5--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Pasar la información que está en Factor a Texto Strings. #train <- read.csv("train.csv", stringsAsFactors=FALSE) ## ----6--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Resumen basico sobre una columna podemos usar. table(train$Survived) ## ----7--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Obtener la proporción. prop.table(table(train$Survived)) ## ----8--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Para añadir algo podemos usar <-. #En este caso añadiremos que todo el mundo muere. #Usando rep(valor a añadir, numero de veces) test$Survived <- rep(0, 418) ## ----9--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Creamos un fichero para el envio de la información demandada. #Creamos el dataframe. submit <- data.frame(PassengerId = test$PassengerId, Survived = test$Survived) #Escrivimos el csv. write.csv(submit, file = "theyallperish.csv", row.names = FALSE) ## ----10-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Directorio de trabajo para que siempre sea en la misma carpeta. #setwd(".../Kaggle Titanic") ## ----11-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Importamos los data set que nos hemos descargado de Kaggle. train <- read.csv("../input/train.csv") test <- read.csv("../input/test.csv") ## ----12-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Usamos otro Resumen con Summary. summary(train$Sex) ## ----13-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Sacamos el % e incluimos la variable de supervivientes. prop.table(table(train$Sex, train$Survived)) ## ----14-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Si ponemos un 1 al final nos dará el % por filas, 2 por Columnas. prop.table(table(train$Sex, train$Survived),1) ## ----15-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Cargamos esta nueva prediccion indicando que las mujeres sobreviven. test$Survived <- 0 test$Survived[test$Sex == 'female'] <- 1 ## ----16-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Generamos el fichero para subirlo a kaggle. submit <- data.frame(PassengerId = test$PassengerId, Survived = test$Survived) write.csv(submit, file = "theyallperish.csv", row.names = FALSE) ## ----17-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Miramos la variable edad en su resumen. summary(train$Age) #Cuidado con los NA, asumiremos que están dentro de la edad media. ## ----18-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Creamos la variable NIÑOS con edad <18 años. #Creamos la columna. train$Child <- 0 #Añadimos la variable. train$Child[train$Age < 18] <- 1 ## ----19-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Vemos el numero de supervivientes según estas condiciones. #aggragate(Variableobjetivo ~ variable1 + variable2, data=dataframe buscado, FUN=sumar) ) aggregate(Survived ~ Child + Sex, data=train, FUN=sum) ## ----20-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Sacamos el %. #Añadimos a la parte final de la funcion, la suma de los casos entre el total. aggregate(Survived ~ Child + Sex, data=train, FUN=function(x) {sum(x)/length(x)}) ## ----21-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Vamos a revisar la variable Clase. #La dividimos en 4 clases <10, 10-20, 20-30, >30. e introducimos una nueva columna. #Creamos la nueva columna y decimos que todos están por encima. train$Fare2 <- '30+' #Ajustamos segun las categorias indicadas. train$Fare2[train$Fare < 30 & train$Fare >= 20] <- '20-30' train$Fare2[train$Fare < 20 & train$Fare >= 10] <- '10-20' train$Fare2[train$Fare < 10] <- '<10' ## ----22-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Vemos el resumen en %, añadiendo esta nueva variable en lugar de NIÑO. aggregate(Survived ~ Fare2 + Pclass + Sex, data=train, FUN=function(x) {sum(x)/length(x)}) ## ----23-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Hacemos una nueva predicción. #Pongo los supervivientes a 0 (nadie sobrevive). test$Survived <- 0 #Indico que las muejeres sobreviven. test$Survived[test$Sex == 'female'] <- 1 #Indico que las mujeres de Clase 3 con gasto <20 NO sobrevien. test$Survived[test$Sex == 'female' & test$Pclass == 3 & test$Fare >= 20] <- 0 ## ----24-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Creo el fichero para subirlo a Kaggle. submit <- data.frame(PassengerId = test$PassengerId, Survived = test$Survived) write.csv(submit, file = "theyallperish.csv", row.names = FALSE) ## ----25-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Importamos los ficheros originales, sin modificaciones. #setwd(".../Kaggle Titanic") train <- read.csv("../input/train.csv") test <- read.csv("../input/test.csv") ## ----26-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Para realizar los arboles tenemos que importar la libreria rpart. library(rpart) ## ----27-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Filtramos la tabla para obtener las columnas que nos resulten utiles para el arbol. #(Los datos como el nombre no afectarán por ello los exluimos). #El comando rport es similar a aggregate. Ponemos la variable de interes y el resto de variables. #Usamos el metodo "class" ya que es más apropiado al incluir decimales. fit <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked, data=train, method="class") ## ----28-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Examinamos el arbol. plot(fit) text(fit) ## ----29-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Instalamos paquetes de visualización mejores. #install.packages('rattle') #install.packages('rpart.plot') #install.packages('RColorBrewer') #library(rattle) library(rpart.plot) library(RColorBrewer) ## ----30-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Generamos el arbol con el nuevo paquete descargado y revisamos. #fancyRpartPlot(fit) ## ----31-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Preparamos el fichero para subir a kaggle con esta prediccion. #Sacamos la variable predica del arbol de decision. Prediction <- predict(fit, test, type = "class") ## ----32-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Generamos los ficheros. #submit <- data.frame(PassengerId = test$PassengerId, Survived = Prediction) #write.csv(submit, file = "myfirstdtree.csv", row.names = FALSE) ## ----33-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- train <- read.csv("../input/train.csv") test <- read.csv("../input/test.csv") ## ----34-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Vamos a buscar nuevas variables que nos puedan ayudar a predecir. #Por ejemplo miramos el nombre nos podran indicar por ej si es señora o señorita - Está casada o es soltero. train$Name[1] ## ----35-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Antes de unir el test y train con rbind, éstos deben ser iguales. (Más información para el algoritmo) #Añadimos la columna Survive en el test para que sean iguales. test$Survived <- NA #Las unimos. combi <- rbind(train, test) ## ----36-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Ponemos los textos como texto y no como factor como vimos al princio. combi$Name <- as.character(combi$Name) combi$Name[1] ## ----37-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Separamos la información relevante del nombre, Hay que identificar el sufijo y el nombre. #Separamos el string por coma y punto. strsplit(combi$Name[1], split='[,.]') ## ----38-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Quitamos el indice [[1]] que aparece al princio. strsplit(combi$Name[1], split='[,.]')[[1]] ## ----39-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Ajustamos el comando para que nos estraiga justamente el segundo bloque que contiene el titulo del nombre. strsplit(combi$Name[1], split='[,.]')[[1]][2] ## ----40-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Para aplicar este comando a todos los datos usaremos sapply y lo introducimos como una nueva columna. combi$Title <- sapply(combi$Name, FUN=function(x) {strsplit(x, split='[,.]')[[1]][2]}) ## ----41-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Quitamos el espacio que aparece al seleccionar el titulo que acabamos de hacer. combi$Title <- sub(' ', '', combi$Title) ## ----42-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Resumen de los titulos que aparecen. table(combi$Title) ## ----43-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Aparecen algunos extraños y otros que son muy parecidos por ello vamos a unificarlos para tener menos. combi$Title[combi$Title %in% c('Mme', 'Mlle')] <- 'Mlle' ## ----44-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Miramos si hay más redundancias. combi$Title[combi$Title %in% c('Capt', 'Don', 'Major', 'Sir')] <- 'Sir' combi$Title[combi$Title %in% c('Dona', 'Lady', 'the Countess', 'Jonkheer')] <- 'Lady' ## ----45-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Ahora pasaremos esta columna a factor para que lo recozca el algoritmo. combi$Title <- factor(combi$Title) ## ----46-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Revisamos otras variables disponibles en el dataset como numero de familiares que viajan juntos. combi$FamilySize <- combi$SibSp + combi$Parch + 1 ## ----47-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Revisamos y extraemos los apellidos para conocer si las familias con el mismo apellido sobrevivieron. #Hay que tener en cuenta que en ingles solo usan un apellido por lo que pueden existir viajeros solos. combi$Surname <- sapply(combi$Name, FUN=function(x) {strsplit(x, split='[,.]')[[1]][1]}) ## ----48-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Para poder trabajar con esta informacion debemos pasarla a strig temporalmente. combi$FamilyID <- paste(as.character(combi$FamilySize), combi$Surname, sep="") ## ----49-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Por el momento categorizamos las familias de un o dos miembros a small. combi$FamilyID[combi$FamilySize <= 2] <- 'Small' ## ----50-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Revisamos esta variable. table(combi$FamilyID) ## ----51-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Vamos a revisar estos datos para limpiarlos un poco. Muchas de una persona. famIDs <- data.frame(table(combi$FamilyID)) ## ----52-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Para analizarlo mejor nos centramos en las familias pequeñas. famIDs <- famIDs[famIDs$Freq <= 2,] ## ----53-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Las convertimos en small usando esa tabla con nuestra tabla Combi. combi$FamilyID[combi$FamilyID %in% famIDs$Var1] <- 'Small' #Volvemos a ponerlo como factor. combi$FamilyID <- factor(combi$FamilyID) ## ----54-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Volvemos a separar el train y test de nustro Combi para el algoritmo con el mismo numero de objetos. train <- combi[1:891,] test <- combi[892:1309,] ## ----55-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Con las nuevas variables realizamos el Arbol al igual que hicimos antes. Sin ajustes especiales. fit <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked + Title + FamilySize + FamilyID, data=train, method="class") ## ----56-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Lo vemos. #fancyRpartPlot(fit) ## ----57-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Generamos el csv para Kaggle. #Prediction <- predict(fit, test, type = "class") #submit <- data.frame(PassengerId = test$PassengerId, Survived = Prediction) #write.csv(submit, file = "myfirstdtree.csv", row.names = FALSE) ## ----58-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #R's Random Forest algorithm sample(1:10, replace = TRUE) ## ----59-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- summary(combi$Age) ## ----60-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Agefit <- rpart(Age ~ Pclass + Sex + SibSp + Parch + Fare + Embarked + Title + FamilySize, data=combi[!is.na(combi$Age),], method="anova") combi$Age[is.na(combi$Age)] <- predict(Agefit, combi[is.na(combi$Age),]) ## ----61-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- summary(combi) ## ----62-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- summary(combi$Embarked) ## ----63-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- which(combi$Embarked == '') ## ----64-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- combi$Embarked[c(62,830)] = "S" #Lo ponemos como factor again. combi$Embarked <- factor(combi$Embarked) ## ----65-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- summary(combi$Fare) ## ----66-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- which(is.na(combi$Fare)) ## ----67-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- combi$Fare[1044] <- median(combi$Fare, na.rm=TRUE) ## ----68-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Incrementamos las small familias a 2 o 3 personas. combi$FamilyID2 <- combi$FamilyID combi$FamilyID2 <- as.character(combi$FamilyID2) combi$FamilyID2[combi$FamilySize <= 3] <- 'Small' combi$FamilyID2 <- factor(combi$FamilyID2) ## ----69-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Separamos el contenido train <- combi[1:891,] test <- combi[892:1309,] ## ----70-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Insamos y activamos el paquete. #install.packages('randomForest') library(randomForest) ## ----71-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- set.seed(415) ## ----72-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- fit <- randomForest(as.factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked + Title + FamilySize + FamilyID2, data=train, importance=TRUE, ntree=2000) ## ----73-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- varImpPlot(fit) ## ----74-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Prediction <- predict(fit, test) submit <- data.frame(PassengerId = test$PassengerId, Survived = Prediction) write.csv(submit, file = "firstforest.csv", row.names = FALSE) ## ----75-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #install.packages('party') library(party) ## ----76-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- set.seed(415) fit <- cforest(as.factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked + Title + FamilySize + FamilyID, data = train, controls=cforest_unbiased(ntree=2000, mtry=3)) ## ----77-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Prediction <- predict(fit, test, OOB=TRUE, type = "response") submit <- data.frame(PassengerId = test$PassengerId, Survived = Prediction) write.csv(submit, file = "firstforest2.csv", row.names = FALSE)
/r/kernels/alejandrorivas-titanic-beginners-principiantes-espa-ol/script/titanic-beginners-principiantes-espa-ol.R
no_license
helenaK/trustworthy-titanic
R
false
false
28,445
r
## ----setup, include=FALSE-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knitr::opts_chunk$set(echo = TRUE, message=F, warning=F, fig.width=9.475, fig.height=5) ## ----1--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Directorio de trabajo para que siempre sea en la misma carpeta. #setwd(".../Kaggle Titanic") ## ----2--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Importamos los data set que nos hemos descargado de Kaggle. train <- read.csv("../input/train.csv") test <- read.csv("../input/test.csv") ## ----3--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Si necesitamos visualizarlos: #View(train) #View(test) ## ----4--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Conocer la estructura de los datos. str(train) ## ----5--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Pasar la información que está en Factor a Texto Strings. #train <- read.csv("train.csv", stringsAsFactors=FALSE) ## ----6--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Resumen basico sobre una columna podemos usar. table(train$Survived) ## ----7--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Obtener la proporción. prop.table(table(train$Survived)) ## ----8--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Para añadir algo podemos usar <-. #En este caso añadiremos que todo el mundo muere. #Usando rep(valor a añadir, numero de veces) test$Survived <- rep(0, 418) ## ----9--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Creamos un fichero para el envio de la información demandada. #Creamos el dataframe. submit <- data.frame(PassengerId = test$PassengerId, Survived = test$Survived) #Escrivimos el csv. write.csv(submit, file = "theyallperish.csv", row.names = FALSE) ## ----10-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Directorio de trabajo para que siempre sea en la misma carpeta. #setwd(".../Kaggle Titanic") ## ----11-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Importamos los data set que nos hemos descargado de Kaggle. train <- read.csv("../input/train.csv") test <- read.csv("../input/test.csv") ## ----12-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Usamos otro Resumen con Summary. summary(train$Sex) ## ----13-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Sacamos el % e incluimos la variable de supervivientes. prop.table(table(train$Sex, train$Survived)) ## ----14-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Si ponemos un 1 al final nos dará el % por filas, 2 por Columnas. prop.table(table(train$Sex, train$Survived),1) ## ----15-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Cargamos esta nueva prediccion indicando que las mujeres sobreviven. test$Survived <- 0 test$Survived[test$Sex == 'female'] <- 1 ## ----16-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Generamos el fichero para subirlo a kaggle. submit <- data.frame(PassengerId = test$PassengerId, Survived = test$Survived) write.csv(submit, file = "theyallperish.csv", row.names = FALSE) ## ----17-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Miramos la variable edad en su resumen. summary(train$Age) #Cuidado con los NA, asumiremos que están dentro de la edad media. ## ----18-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Creamos la variable NIÑOS con edad <18 años. #Creamos la columna. train$Child <- 0 #Añadimos la variable. train$Child[train$Age < 18] <- 1 ## ----19-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Vemos el numero de supervivientes según estas condiciones. #aggragate(Variableobjetivo ~ variable1 + variable2, data=dataframe buscado, FUN=sumar) ) aggregate(Survived ~ Child + Sex, data=train, FUN=sum) ## ----20-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Sacamos el %. #Añadimos a la parte final de la funcion, la suma de los casos entre el total. aggregate(Survived ~ Child + Sex, data=train, FUN=function(x) {sum(x)/length(x)}) ## ----21-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Vamos a revisar la variable Clase. #La dividimos en 4 clases <10, 10-20, 20-30, >30. e introducimos una nueva columna. #Creamos la nueva columna y decimos que todos están por encima. train$Fare2 <- '30+' #Ajustamos segun las categorias indicadas. train$Fare2[train$Fare < 30 & train$Fare >= 20] <- '20-30' train$Fare2[train$Fare < 20 & train$Fare >= 10] <- '10-20' train$Fare2[train$Fare < 10] <- '<10' ## ----22-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Vemos el resumen en %, añadiendo esta nueva variable en lugar de NIÑO. aggregate(Survived ~ Fare2 + Pclass + Sex, data=train, FUN=function(x) {sum(x)/length(x)}) ## ----23-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Hacemos una nueva predicción. #Pongo los supervivientes a 0 (nadie sobrevive). test$Survived <- 0 #Indico que las muejeres sobreviven. test$Survived[test$Sex == 'female'] <- 1 #Indico que las mujeres de Clase 3 con gasto <20 NO sobrevien. test$Survived[test$Sex == 'female' & test$Pclass == 3 & test$Fare >= 20] <- 0 ## ----24-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Creo el fichero para subirlo a Kaggle. submit <- data.frame(PassengerId = test$PassengerId, Survived = test$Survived) write.csv(submit, file = "theyallperish.csv", row.names = FALSE) ## ----25-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Importamos los ficheros originales, sin modificaciones. #setwd(".../Kaggle Titanic") train <- read.csv("../input/train.csv") test <- read.csv("../input/test.csv") ## ----26-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Para realizar los arboles tenemos que importar la libreria rpart. library(rpart) ## ----27-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Filtramos la tabla para obtener las columnas que nos resulten utiles para el arbol. #(Los datos como el nombre no afectarán por ello los exluimos). #El comando rport es similar a aggregate. Ponemos la variable de interes y el resto de variables. #Usamos el metodo "class" ya que es más apropiado al incluir decimales. fit <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked, data=train, method="class") ## ----28-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Examinamos el arbol. plot(fit) text(fit) ## ----29-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Instalamos paquetes de visualización mejores. #install.packages('rattle') #install.packages('rpart.plot') #install.packages('RColorBrewer') #library(rattle) library(rpart.plot) library(RColorBrewer) ## ----30-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Generamos el arbol con el nuevo paquete descargado y revisamos. #fancyRpartPlot(fit) ## ----31-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Preparamos el fichero para subir a kaggle con esta prediccion. #Sacamos la variable predica del arbol de decision. Prediction <- predict(fit, test, type = "class") ## ----32-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Generamos los ficheros. #submit <- data.frame(PassengerId = test$PassengerId, Survived = Prediction) #write.csv(submit, file = "myfirstdtree.csv", row.names = FALSE) ## ----33-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- train <- read.csv("../input/train.csv") test <- read.csv("../input/test.csv") ## ----34-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Vamos a buscar nuevas variables que nos puedan ayudar a predecir. #Por ejemplo miramos el nombre nos podran indicar por ej si es señora o señorita - Está casada o es soltero. train$Name[1] ## ----35-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Antes de unir el test y train con rbind, éstos deben ser iguales. (Más información para el algoritmo) #Añadimos la columna Survive en el test para que sean iguales. test$Survived <- NA #Las unimos. combi <- rbind(train, test) ## ----36-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Ponemos los textos como texto y no como factor como vimos al princio. combi$Name <- as.character(combi$Name) combi$Name[1] ## ----37-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Separamos la información relevante del nombre, Hay que identificar el sufijo y el nombre. #Separamos el string por coma y punto. strsplit(combi$Name[1], split='[,.]') ## ----38-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Quitamos el indice [[1]] que aparece al princio. strsplit(combi$Name[1], split='[,.]')[[1]] ## ----39-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Ajustamos el comando para que nos estraiga justamente el segundo bloque que contiene el titulo del nombre. strsplit(combi$Name[1], split='[,.]')[[1]][2] ## ----40-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Para aplicar este comando a todos los datos usaremos sapply y lo introducimos como una nueva columna. combi$Title <- sapply(combi$Name, FUN=function(x) {strsplit(x, split='[,.]')[[1]][2]}) ## ----41-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Quitamos el espacio que aparece al seleccionar el titulo que acabamos de hacer. combi$Title <- sub(' ', '', combi$Title) ## ----42-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Resumen de los titulos que aparecen. table(combi$Title) ## ----43-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Aparecen algunos extraños y otros que son muy parecidos por ello vamos a unificarlos para tener menos. combi$Title[combi$Title %in% c('Mme', 'Mlle')] <- 'Mlle' ## ----44-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Miramos si hay más redundancias. combi$Title[combi$Title %in% c('Capt', 'Don', 'Major', 'Sir')] <- 'Sir' combi$Title[combi$Title %in% c('Dona', 'Lady', 'the Countess', 'Jonkheer')] <- 'Lady' ## ----45-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Ahora pasaremos esta columna a factor para que lo recozca el algoritmo. combi$Title <- factor(combi$Title) ## ----46-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Revisamos otras variables disponibles en el dataset como numero de familiares que viajan juntos. combi$FamilySize <- combi$SibSp + combi$Parch + 1 ## ----47-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Revisamos y extraemos los apellidos para conocer si las familias con el mismo apellido sobrevivieron. #Hay que tener en cuenta que en ingles solo usan un apellido por lo que pueden existir viajeros solos. combi$Surname <- sapply(combi$Name, FUN=function(x) {strsplit(x, split='[,.]')[[1]][1]}) ## ----48-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Para poder trabajar con esta informacion debemos pasarla a strig temporalmente. combi$FamilyID <- paste(as.character(combi$FamilySize), combi$Surname, sep="") ## ----49-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Por el momento categorizamos las familias de un o dos miembros a small. combi$FamilyID[combi$FamilySize <= 2] <- 'Small' ## ----50-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Revisamos esta variable. table(combi$FamilyID) ## ----51-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Vamos a revisar estos datos para limpiarlos un poco. Muchas de una persona. famIDs <- data.frame(table(combi$FamilyID)) ## ----52-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Para analizarlo mejor nos centramos en las familias pequeñas. famIDs <- famIDs[famIDs$Freq <= 2,] ## ----53-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Las convertimos en small usando esa tabla con nuestra tabla Combi. combi$FamilyID[combi$FamilyID %in% famIDs$Var1] <- 'Small' #Volvemos a ponerlo como factor. combi$FamilyID <- factor(combi$FamilyID) ## ----54-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Volvemos a separar el train y test de nustro Combi para el algoritmo con el mismo numero de objetos. train <- combi[1:891,] test <- combi[892:1309,] ## ----55-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Con las nuevas variables realizamos el Arbol al igual que hicimos antes. Sin ajustes especiales. fit <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked + Title + FamilySize + FamilyID, data=train, method="class") ## ----56-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Lo vemos. #fancyRpartPlot(fit) ## ----57-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Generamos el csv para Kaggle. #Prediction <- predict(fit, test, type = "class") #submit <- data.frame(PassengerId = test$PassengerId, Survived = Prediction) #write.csv(submit, file = "myfirstdtree.csv", row.names = FALSE) ## ----58-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #R's Random Forest algorithm sample(1:10, replace = TRUE) ## ----59-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- summary(combi$Age) ## ----60-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Agefit <- rpart(Age ~ Pclass + Sex + SibSp + Parch + Fare + Embarked + Title + FamilySize, data=combi[!is.na(combi$Age),], method="anova") combi$Age[is.na(combi$Age)] <- predict(Agefit, combi[is.na(combi$Age),]) ## ----61-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- summary(combi) ## ----62-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- summary(combi$Embarked) ## ----63-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- which(combi$Embarked == '') ## ----64-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- combi$Embarked[c(62,830)] = "S" #Lo ponemos como factor again. combi$Embarked <- factor(combi$Embarked) ## ----65-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- summary(combi$Fare) ## ----66-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- which(is.na(combi$Fare)) ## ----67-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- combi$Fare[1044] <- median(combi$Fare, na.rm=TRUE) ## ----68-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Incrementamos las small familias a 2 o 3 personas. combi$FamilyID2 <- combi$FamilyID combi$FamilyID2 <- as.character(combi$FamilyID2) combi$FamilyID2[combi$FamilySize <= 3] <- 'Small' combi$FamilyID2 <- factor(combi$FamilyID2) ## ----69-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Separamos el contenido train <- combi[1:891,] test <- combi[892:1309,] ## ----70-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #Insamos y activamos el paquete. #install.packages('randomForest') library(randomForest) ## ----71-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- set.seed(415) ## ----72-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- fit <- randomForest(as.factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked + Title + FamilySize + FamilyID2, data=train, importance=TRUE, ntree=2000) ## ----73-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- varImpPlot(fit) ## ----74-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Prediction <- predict(fit, test) submit <- data.frame(PassengerId = test$PassengerId, Survived = Prediction) write.csv(submit, file = "firstforest.csv", row.names = FALSE) ## ----75-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #install.packages('party') library(party) ## ----76-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- set.seed(415) fit <- cforest(as.factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked + Title + FamilySize + FamilyID, data = train, controls=cforest_unbiased(ntree=2000, mtry=3)) ## ----77-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Prediction <- predict(fit, test, OOB=TRUE, type = "response") submit <- data.frame(PassengerId = test$PassengerId, Survived = Prediction) write.csv(submit, file = "firstforest2.csv", row.names = FALSE)
#' R Shiny application to simulate clinical trials with restricted randomization #' @export restricted_app <- function() { appDir <- system.file("shiny-apps", "unequal-allocation", package = "restricted") if (appDir == "") { stop("Could not find example directory. Try re-installing `restricted`.", call. = FALSE) } shiny::runApp(appDir, display.mode = "normal") }
/R/restricted-app.R
no_license
yevgenryeznik/restricted
R
false
false
379
r
#' R Shiny application to simulate clinical trials with restricted randomization #' @export restricted_app <- function() { appDir <- system.file("shiny-apps", "unequal-allocation", package = "restricted") if (appDir == "") { stop("Could not find example directory. Try re-installing `restricted`.", call. = FALSE) } shiny::runApp(appDir, display.mode = "normal") }
#' @title Get predictions from resample results. #' #' @description #' Very simple getter. #' #' @param res [\code{ResampleResult}]\cr #' The result of \code{\link{resample}} run with \code{keep.pred = TRUE}. #' @return [\code{ResamplePrediction}]. #' @export #' @family resample getRRPredictions = function(res) { if (is.null(res$pred)) stopf("The 'pred' slot is empty because the ResampleResult was generated with keep.pred = FALSE.") else res$pred } #' @title Get task description from resample results. #' #' @description #' Get a summarizing task description. #' #' @param res [\code{ResampleResult}]\cr #' The result of \code{\link{resample}}. #' @return [\code{TaskDesc}]. #' @export #' @family resample getRRTaskDescription = function(res) { res$task.desc } #' @title Get list of predictions for train and test set of each single resample iteration. #' #' @description #' This function creates a list with two slots \code{train} and \code{test} where #' each slot is again a list of \code{\link{Prediction}} objects for each single #' resample iteration. #' In case that \code{predict = "train"} was used for the resample description #' (see \code{\link{makeResampleDesc}}), the slot \code{test} will be \code{NULL} #' and in case that \code{predict = "test"} was used, the slot \code{train} will be #' \code{NULL}. #' #' @param res [\code{ResampleResult}]\cr #' The result of \code{\link{resample}} run with \code{keep.pred = TRUE}. #' @param ... [any]\cr #' Further options passed to \code{\link{makePrediction}}. #' @return [list]. #' @export #' @family resample getRRPredictionList = function(res, ...) { assertClass(res, "ResampleResult") # We need to force keep.pred = TRUE (will be checked in getRRPredictions) pred = getRRPredictions(res) predict.type = pred$predict.type time = pred$time task.desc = getRRTaskDescription(res) # split by train and test set set = levels(pred$data$set) # get prediction objects for train and test set prediction = lapply(set, function(s) { # split by resample iterations p.split = pred$data[pred$data$set == s,, drop = FALSE] p.split = split(p.split, as.factor(p.split$iter)) # create prediction object for each resample iteration p.split = lapply(p.split, function (p) { # get predictions based on predict.type if (predict.type == "prob") { y = p[, stri_startswith_fixed(colnames(p), "prob."), drop = FALSE] # we need to remove the "prob." part in the colnames, otherwise # makePrediction thinks that the factor starts with "prob." colnames(y) = stri_replace_first_fixed(colnames(y), "prob.", replacement = "") } else { y = p$response } makePrediction(task.desc, id = p$id, truth = p$truth, y = y, row.names = p$id, predict.type = predict.type, time = NA_real_, ...) }) # add time info afterwards for(i in seq_along(p.split)) p.split[[i]]$time = time[i] return(p.split) }) ret = setNames(prediction, set) if (is.null(ret$train)) ret = append(ret, list(train = NULL)) if (is.null(ret$test)) ret = append(ret, list(test = NULL)) return(ret[c("train", "test")]) } #' @title Compute new measures for existing ResampleResult #' @description #' Adds new measures to an existing \code{ResampleResult}. #' @param res [\code{ResampleResult}]\cr #' The result of \code{\link{resample}} run with \code{keep.pred = TRUE}. #' @template arg_measures #' @return [\code{\link{ResampleResult}}]. #' @export #' @family resample addRRMeasure = function(res, measures) { assertClass(res, "ResampleResult") if (inherits(measures, "Measure")) measures = list(measures) # check if measures are missing in ResampleResult object measures.id = vcapply(measures, function(x) x$id) missing.measures = setdiff(measures.id, colnames(res$measures.test)) # if there are missing measures if (length(missing.measures) != 0) { # get list of prediction objects per iteration from resample result pred = getRRPredictionList(res) # recompute missing performance for train and/or test set set = names(pred)[!vlapply(pred, is.null)] perf = setNames(lapply(set, function(s) { as.data.frame(do.call("rbind", lapply(pred[[s]], function(p) { ret = performance(p, measures) matrix(ret, ncol = length(measures), dimnames = list(NULL, names(ret))) }))) }), set) # add missing measures to resample result if (is.null(perf$train)) res$measures.train[, missing.measures] = NA else res$measures.train = cbind(res$measures.train, perf$train[, missing.measures, drop = FALSE]) if (is.null(perf$test)) res$measures.test[, missing.measures] = NA else res$measures.test = cbind(res$measures.test, perf$test[, missing.measures, drop = FALSE]) aggr = vnapply(measures[measures.id %in% missing.measures], function(m) { m$aggr$fun(task = NULL, perf.test = res$measures.test[, m$id], perf.train = res$measures.train[, m$id], measure = m, pred = getRRPredictions(res), group = res$pred$instance$group) }) names(aggr) = vcapply(measures[measures.id %in% missing.measures], measureAggrName) res$aggr = c(res$aggr, aggr) } return(res) }
/R/ResampleResult_operators.R
no_license
HeidiSeibold/mlr
R
false
false
5,287
r
#' @title Get predictions from resample results. #' #' @description #' Very simple getter. #' #' @param res [\code{ResampleResult}]\cr #' The result of \code{\link{resample}} run with \code{keep.pred = TRUE}. #' @return [\code{ResamplePrediction}]. #' @export #' @family resample getRRPredictions = function(res) { if (is.null(res$pred)) stopf("The 'pred' slot is empty because the ResampleResult was generated with keep.pred = FALSE.") else res$pred } #' @title Get task description from resample results. #' #' @description #' Get a summarizing task description. #' #' @param res [\code{ResampleResult}]\cr #' The result of \code{\link{resample}}. #' @return [\code{TaskDesc}]. #' @export #' @family resample getRRTaskDescription = function(res) { res$task.desc } #' @title Get list of predictions for train and test set of each single resample iteration. #' #' @description #' This function creates a list with two slots \code{train} and \code{test} where #' each slot is again a list of \code{\link{Prediction}} objects for each single #' resample iteration. #' In case that \code{predict = "train"} was used for the resample description #' (see \code{\link{makeResampleDesc}}), the slot \code{test} will be \code{NULL} #' and in case that \code{predict = "test"} was used, the slot \code{train} will be #' \code{NULL}. #' #' @param res [\code{ResampleResult}]\cr #' The result of \code{\link{resample}} run with \code{keep.pred = TRUE}. #' @param ... [any]\cr #' Further options passed to \code{\link{makePrediction}}. #' @return [list]. #' @export #' @family resample getRRPredictionList = function(res, ...) { assertClass(res, "ResampleResult") # We need to force keep.pred = TRUE (will be checked in getRRPredictions) pred = getRRPredictions(res) predict.type = pred$predict.type time = pred$time task.desc = getRRTaskDescription(res) # split by train and test set set = levels(pred$data$set) # get prediction objects for train and test set prediction = lapply(set, function(s) { # split by resample iterations p.split = pred$data[pred$data$set == s,, drop = FALSE] p.split = split(p.split, as.factor(p.split$iter)) # create prediction object for each resample iteration p.split = lapply(p.split, function (p) { # get predictions based on predict.type if (predict.type == "prob") { y = p[, stri_startswith_fixed(colnames(p), "prob."), drop = FALSE] # we need to remove the "prob." part in the colnames, otherwise # makePrediction thinks that the factor starts with "prob." colnames(y) = stri_replace_first_fixed(colnames(y), "prob.", replacement = "") } else { y = p$response } makePrediction(task.desc, id = p$id, truth = p$truth, y = y, row.names = p$id, predict.type = predict.type, time = NA_real_, ...) }) # add time info afterwards for(i in seq_along(p.split)) p.split[[i]]$time = time[i] return(p.split) }) ret = setNames(prediction, set) if (is.null(ret$train)) ret = append(ret, list(train = NULL)) if (is.null(ret$test)) ret = append(ret, list(test = NULL)) return(ret[c("train", "test")]) } #' @title Compute new measures for existing ResampleResult #' @description #' Adds new measures to an existing \code{ResampleResult}. #' @param res [\code{ResampleResult}]\cr #' The result of \code{\link{resample}} run with \code{keep.pred = TRUE}. #' @template arg_measures #' @return [\code{\link{ResampleResult}}]. #' @export #' @family resample addRRMeasure = function(res, measures) { assertClass(res, "ResampleResult") if (inherits(measures, "Measure")) measures = list(measures) # check if measures are missing in ResampleResult object measures.id = vcapply(measures, function(x) x$id) missing.measures = setdiff(measures.id, colnames(res$measures.test)) # if there are missing measures if (length(missing.measures) != 0) { # get list of prediction objects per iteration from resample result pred = getRRPredictionList(res) # recompute missing performance for train and/or test set set = names(pred)[!vlapply(pred, is.null)] perf = setNames(lapply(set, function(s) { as.data.frame(do.call("rbind", lapply(pred[[s]], function(p) { ret = performance(p, measures) matrix(ret, ncol = length(measures), dimnames = list(NULL, names(ret))) }))) }), set) # add missing measures to resample result if (is.null(perf$train)) res$measures.train[, missing.measures] = NA else res$measures.train = cbind(res$measures.train, perf$train[, missing.measures, drop = FALSE]) if (is.null(perf$test)) res$measures.test[, missing.measures] = NA else res$measures.test = cbind(res$measures.test, perf$test[, missing.measures, drop = FALSE]) aggr = vnapply(measures[measures.id %in% missing.measures], function(m) { m$aggr$fun(task = NULL, perf.test = res$measures.test[, m$id], perf.train = res$measures.train[, m$id], measure = m, pred = getRRPredictions(res), group = res$pred$instance$group) }) names(aggr) = vcapply(measures[measures.id %in% missing.measures], measureAggrName) res$aggr = c(res$aggr, aggr) } return(res) }
ui <- fluidPage( h1("Gapminder"), sliderInput(inputId = "life", label = "Life expectancy", min = 0, max = 120, value = c(30, 50)), selectInput("continent", "Continent", choices = c("All", levels(gapminder$continent))), downloadButton("download_data"), plotOutput("plot"), # Replace the tableOutput() with DT's version DT::dataTableOutput("table") ) server <- function(input, output) { filtered_data <- reactive({ data <- gapminder data <- subset( data, lifeExp >= input$life[1] & lifeExp <= input$life[2] ) if (input$continent != "All") { data <- subset( data, continent == input$continent ) } data }) # Replace the renderTable() with DT's version output$table <- DT::renderDataTable({ data <- filtered_data() data }) output$download_data <- downloadHandler( filename = "gapminder_data.csv", content = function(file) { data <- filtered_data() write.csv(data, file, row.names = FALSE) } ) output$plot <- renderPlot({ data <- filtered_data() ggplot(data, aes(gdpPercap, lifeExp)) + geom_point() + scale_x_log10() }) } shinyApp(ui, server)
/shiny/Make the table interactive.R
no_license
jyeazell/DataCamp_practice
R
false
false
1,398
r
ui <- fluidPage( h1("Gapminder"), sliderInput(inputId = "life", label = "Life expectancy", min = 0, max = 120, value = c(30, 50)), selectInput("continent", "Continent", choices = c("All", levels(gapminder$continent))), downloadButton("download_data"), plotOutput("plot"), # Replace the tableOutput() with DT's version DT::dataTableOutput("table") ) server <- function(input, output) { filtered_data <- reactive({ data <- gapminder data <- subset( data, lifeExp >= input$life[1] & lifeExp <= input$life[2] ) if (input$continent != "All") { data <- subset( data, continent == input$continent ) } data }) # Replace the renderTable() with DT's version output$table <- DT::renderDataTable({ data <- filtered_data() data }) output$download_data <- downloadHandler( filename = "gapminder_data.csv", content = function(file) { data <- filtered_data() write.csv(data, file, row.names = FALSE) } ) output$plot <- renderPlot({ data <- filtered_data() ggplot(data, aes(gdpPercap, lifeExp)) + geom_point() + scale_x_log10() }) } shinyApp(ui, server)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_token.R \name{get_token} \alias{get_token} \title{Generate access token} \usage{ get_token(base_url, client_id, client_secret, redirect_url, code) } \arguments{ \item{base_url}{A url. [The base URL of your Zoho Account](https://accounts.zoho.com/oauth/serverinfo) For example, it's accounts.zoho.com if your account belongs to Zoho's US DC.} \item{client_id}{A string. The client credentials that were generated when you registered the [client application](https://www.zoho.com/creator/help/api/v2/register-client.html) @param client_secret A string. The client credentials that were generated when you registered the [client application](https://www.zoho.com/creator/help/api/v2/register-client.html)} \item{redirect_url}{A url. One of the authorized redirect URIs that you associated while registering the [client application.](https://www.zoho.com/creator/help/api/v2/register-client.html)} \item{code}{The authorization code that was generated upon making the [authorization request](https://www.zoho.com/creator/help/api/v2/authorization-request.html)} } \value{ A list with a token and the refresh token that the requesting user will need to access the resources that correspond to the scopes that were included while making the authorization request. } \description{ Once the client application receives an authorization code (see `generate_auth_code()`), it can exchange for an access token. See [possible errors](https://www.zoho.com/creator/help/api/v2/generate-token.html) } \details{ *Note*: * An access token is valid for only an hour and can be used only to perform the operations defined by the scopes that were included while making the authorization request. * A refresh token has no expiry. However, it can be revoked. It's purpose is to refresh the access token upon its expiry. * A maximum of five refresh tokens can be generated per minute. }
/man/get_token.Rd
no_license
araupontones/zohor
R
false
true
1,951
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_token.R \name{get_token} \alias{get_token} \title{Generate access token} \usage{ get_token(base_url, client_id, client_secret, redirect_url, code) } \arguments{ \item{base_url}{A url. [The base URL of your Zoho Account](https://accounts.zoho.com/oauth/serverinfo) For example, it's accounts.zoho.com if your account belongs to Zoho's US DC.} \item{client_id}{A string. The client credentials that were generated when you registered the [client application](https://www.zoho.com/creator/help/api/v2/register-client.html) @param client_secret A string. The client credentials that were generated when you registered the [client application](https://www.zoho.com/creator/help/api/v2/register-client.html)} \item{redirect_url}{A url. One of the authorized redirect URIs that you associated while registering the [client application.](https://www.zoho.com/creator/help/api/v2/register-client.html)} \item{code}{The authorization code that was generated upon making the [authorization request](https://www.zoho.com/creator/help/api/v2/authorization-request.html)} } \value{ A list with a token and the refresh token that the requesting user will need to access the resources that correspond to the scopes that were included while making the authorization request. } \description{ Once the client application receives an authorization code (see `generate_auth_code()`), it can exchange for an access token. See [possible errors](https://www.zoho.com/creator/help/api/v2/generate-token.html) } \details{ *Note*: * An access token is valid for only an hour and can be used only to perform the operations defined by the scopes that were included while making the authorization request. * A refresh token has no expiry. However, it can be revoked. It's purpose is to refresh the access token upon its expiry. * A maximum of five refresh tokens can be generated per minute. }
# Multiple Linear Regression, unique intercepts and slopes # * Multiple Linear regression is the backbone of modern statistics # * incorporate multiple explanatory variables, such that the number of estimated parmaters #is less than the sample size. # * For now only one categorical and one numerical explanatory variable, for simplicity. # * with this understanding, could explore multiple categorical and numerical explanetory #vars # * picture to motivate the idea # * example in R library(ggplot2) library(dplyr) library(boot) df <- read.csv("https://raw.githubusercontent.com/roualdes/data/master/finches.csv") ggplot(df, aes(middletoelength, beakwidth, color = island)) + geom_point() + geom_smooth(method="lm", formula = "y ~ x", se=FALSE) fit <- lm(beakwidth ~ island * middletoelength, data = df) beta <- coef(fit) ## hat(beakwidth) = beta[1] + beta[2] * sancristobal + beta[3] * santacruz ## beta[4] * middletoelength + beta[5] * sancristobal * middletoelength + ## beta[6] * santacruz * middletoelength ## san cristobal with middletoelength of 19 sum(beta * c(1, 1, 0, 19, 1 * 19, 0)) X <- model.matrix(fit) df$yhat <- apply(X, 1, function(row) sum(beta * row)) ggplot(df, aes(middletoelength, beakwidth, color = island)) + geom_point() + geom_line(aes(y = yhat)) boot_mlr <- function(data, idx) { fit <- lm(beakwidth ~ island * middletoelength, data = df[idx, ]) coef(fit) } b <- boot(df, boot_mlr, 1001) boot.ci(b, type = "perc", index = 3) # We are 95% confident that when middle toe length is equal to 0, we expect finches from the # island santa cruz to have a beak width larger than finches from the island floreana by # between -7.1 and 13.1 cm. boot.ci(b, type = "perc", index = 6) # We are 95% confident that for every 1 cm increase in middle toe length, the increase in # beak width for finches from the island santa cruz is greater than the increase in beak # width for finches from the island floreana by between -0.7 and 0.4 cm. boot.ci(b, type = "perc", index = 5) boot_mlr_predictions <- function(data, idx) { fit <- lm(breakwidth ~ island * middletoelength, data = df[idx, ]) beta <- coef(fit) sum(beta * x) # x needs to be a vector of the same length as beta } # index is which beta you are using b <- boot(df, boot_mlr_predictions, 1001, x = c(1, 1, 0, 19, 1 * 19, 0)) boot.ci(b, type = "perc") # We are 95% confident that when a finch san cristobal has middle toe length of 19cm # we expect the beak width to between 10 and 11.1 cm.
/notes/intro-multiple-linear-regression.R
no_license
Niyy/math314-notes
R
false
false
2,502
r
# Multiple Linear Regression, unique intercepts and slopes # * Multiple Linear regression is the backbone of modern statistics # * incorporate multiple explanatory variables, such that the number of estimated parmaters #is less than the sample size. # * For now only one categorical and one numerical explanatory variable, for simplicity. # * with this understanding, could explore multiple categorical and numerical explanetory #vars # * picture to motivate the idea # * example in R library(ggplot2) library(dplyr) library(boot) df <- read.csv("https://raw.githubusercontent.com/roualdes/data/master/finches.csv") ggplot(df, aes(middletoelength, beakwidth, color = island)) + geom_point() + geom_smooth(method="lm", formula = "y ~ x", se=FALSE) fit <- lm(beakwidth ~ island * middletoelength, data = df) beta <- coef(fit) ## hat(beakwidth) = beta[1] + beta[2] * sancristobal + beta[3] * santacruz ## beta[4] * middletoelength + beta[5] * sancristobal * middletoelength + ## beta[6] * santacruz * middletoelength ## san cristobal with middletoelength of 19 sum(beta * c(1, 1, 0, 19, 1 * 19, 0)) X <- model.matrix(fit) df$yhat <- apply(X, 1, function(row) sum(beta * row)) ggplot(df, aes(middletoelength, beakwidth, color = island)) + geom_point() + geom_line(aes(y = yhat)) boot_mlr <- function(data, idx) { fit <- lm(beakwidth ~ island * middletoelength, data = df[idx, ]) coef(fit) } b <- boot(df, boot_mlr, 1001) boot.ci(b, type = "perc", index = 3) # We are 95% confident that when middle toe length is equal to 0, we expect finches from the # island santa cruz to have a beak width larger than finches from the island floreana by # between -7.1 and 13.1 cm. boot.ci(b, type = "perc", index = 6) # We are 95% confident that for every 1 cm increase in middle toe length, the increase in # beak width for finches from the island santa cruz is greater than the increase in beak # width for finches from the island floreana by between -0.7 and 0.4 cm. boot.ci(b, type = "perc", index = 5) boot_mlr_predictions <- function(data, idx) { fit <- lm(breakwidth ~ island * middletoelength, data = df[idx, ]) beta <- coef(fit) sum(beta * x) # x needs to be a vector of the same length as beta } # index is which beta you are using b <- boot(df, boot_mlr_predictions, 1001, x = c(1, 1, 0, 19, 1 * 19, 0)) boot.ci(b, type = "perc") # We are 95% confident that when a finch san cristobal has middle toe length of 19cm # we expect the beak width to between 10 and 11.1 cm.
library(rhoR) ### Name: kappa ### Title: Calculate kappa ### Aliases: kappa ### Keywords: kappa ### ** Examples #Given a code set kappa(data = codeSet) #Given a contingency Table kappa(data = contingencyTable)
/data/genthat_extracted_code/rhoR/examples/kappa.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
219
r
library(rhoR) ### Name: kappa ### Title: Calculate kappa ### Aliases: kappa ### Keywords: kappa ### ** Examples #Given a code set kappa(data = codeSet) #Given a contingency Table kappa(data = contingencyTable)
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/visNDVI.R \name{visNDVI} \alias{visNDVI} \title{Visulize point coordinates with one NDVI map} \usage{ visNDVI(yrs, wks, lonlim, latlim, lns = T) } \arguments{ \item{yrs}{vector c() or sequence seq() of years do be visualized} \item{wks}{vector c() or sequence seq() of weeks do be visualized} \item{lonlim}{longitude range c(min,max) for the plot} \item{latlim}{latitude range c(min,max) for the plot} } \value{ A multi-panel graph of the NDVI rasters is plotted. } \description{ Point coordinates are visualized with a weekly NDVI map of choice. }
/man/visNDVI.Rd
no_license
tavimalara/rndvi
R
false
false
665
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/visNDVI.R \name{visNDVI} \alias{visNDVI} \title{Visulize point coordinates with one NDVI map} \usage{ visNDVI(yrs, wks, lonlim, latlim, lns = T) } \arguments{ \item{yrs}{vector c() or sequence seq() of years do be visualized} \item{wks}{vector c() or sequence seq() of weeks do be visualized} \item{lonlim}{longitude range c(min,max) for the plot} \item{latlim}{latitude range c(min,max) for the plot} } \value{ A multi-panel graph of the NDVI rasters is plotted. } \description{ Point coordinates are visualized with a weekly NDVI map of choice. }
#' @keywords internal queue <- function() { self <- new.env(parent = emptyenv()) self$front <- new.env(parent = emptyenv()) self$rear <- new.env(parent = emptyenv()) self$enqueue <- function(value) { temp_node <- new.env(parent = emptyenv()) temp_node$value <- value temp_node$next_element <- NULL if (is.null(self$front$value)) { self$front <- temp_node self$rear <- temp_node } else { self$rear$next_element <- temp_node self$rear <- temp_node } } self$dequeue <- function() { temp_node <- self$front if (identical(self$front, self$rear)) { self$front <- NULL self$rear <- NULL } else { self$front <- self$front$next_element } temp_node$value } self$is_empty <- function() { is.null(self$front$value) && is.null(self$rear$value) } class(self) <- 'queue' self } #' @keywords internal option_map <- function() { self <- new.env(parent = emptyenv()) self$envir <- new.env(parent = emptyenv()) self$clear <- function() { self$envir <- new.env(parent = emptyenv()) invisible(self) } self$containsKey <- function(key) { exists(as.character(key), envir = self$envir, inherits = FALSE) } self$containsValue <- function(value) { !is.na(Position(function(v) value == v, self$values())) } self$entrySet <- function() { setNames( lapply(self$keySet(), function(k) self$get(k)), self$keySet() ) } self$get <- function(key) { get0(as.character(key), envir = self$envir, inherits = FALSE, ifnotfound = NULL) } self$keySet <- function() { ls(self$envir) } self$put <- function(key, value) { key <- as.character(key) prev <- self$get(key) assign(key, value = value, envir = self$envir, inherits = FALSE) prev } self$putAll <- function(m) { switch( class(m), 'list' = for (k in names(m)) self$put(k, m[[k]]), stop('Unsupported class `', class(m), '`', call. = FALSE) ) invisible(self) } self$size <- function() { length(self$keySet()) } self$values <- function() { lapply(self$keySet(), function(k) private$envir[[k]]) } class(self) <- 'option_map' self }
/R/map.R
no_license
travisbyrum/clinkr
R
false
false
2,219
r
#' @keywords internal queue <- function() { self <- new.env(parent = emptyenv()) self$front <- new.env(parent = emptyenv()) self$rear <- new.env(parent = emptyenv()) self$enqueue <- function(value) { temp_node <- new.env(parent = emptyenv()) temp_node$value <- value temp_node$next_element <- NULL if (is.null(self$front$value)) { self$front <- temp_node self$rear <- temp_node } else { self$rear$next_element <- temp_node self$rear <- temp_node } } self$dequeue <- function() { temp_node <- self$front if (identical(self$front, self$rear)) { self$front <- NULL self$rear <- NULL } else { self$front <- self$front$next_element } temp_node$value } self$is_empty <- function() { is.null(self$front$value) && is.null(self$rear$value) } class(self) <- 'queue' self } #' @keywords internal option_map <- function() { self <- new.env(parent = emptyenv()) self$envir <- new.env(parent = emptyenv()) self$clear <- function() { self$envir <- new.env(parent = emptyenv()) invisible(self) } self$containsKey <- function(key) { exists(as.character(key), envir = self$envir, inherits = FALSE) } self$containsValue <- function(value) { !is.na(Position(function(v) value == v, self$values())) } self$entrySet <- function() { setNames( lapply(self$keySet(), function(k) self$get(k)), self$keySet() ) } self$get <- function(key) { get0(as.character(key), envir = self$envir, inherits = FALSE, ifnotfound = NULL) } self$keySet <- function() { ls(self$envir) } self$put <- function(key, value) { key <- as.character(key) prev <- self$get(key) assign(key, value = value, envir = self$envir, inherits = FALSE) prev } self$putAll <- function(m) { switch( class(m), 'list' = for (k in names(m)) self$put(k, m[[k]]), stop('Unsupported class `', class(m), '`', call. = FALSE) ) invisible(self) } self$size <- function() { length(self$keySet()) } self$values <- function() { lapply(self$keySet(), function(k) private$envir[[k]]) } class(self) <- 'option_map' self }
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/30_admm_lasso.R \name{admm_lasso} \alias{admm_lasso} \title{Fitting A Lasso Model Using ADMM Algorithm} \usage{ admm_lasso(x, y, intercept = TRUE, standardize = TRUE, ...) } \arguments{ \item{x}{The data matrix} \item{y}{The response vector} \item{intercept}{Whether to fit an intercept in the model. Default is \code{TRUE}.} \item{standardize}{Whether to standardize the explanatory variables before fitting the model. Default is \code{TRUE}. Fitted coefficients are always returned on the original scale.} } \description{ Lasso is a popular variable selection technique in high dimensional regression analysis, which tries to find the coefficient vector \eqn{\beta} that minimizes \deqn{\frac{1}{2n}\Vert y-X\beta\Vert_2^2+\lambda\Vert\beta\Vert_1}{ 1/(2n) * ||y - X * \beta||_2^2 + \lambda * ||\beta||_1} Here \eqn{n} is the sample size and \eqn{\lambda} is a regularization parameter that controls the sparseness of \eqn{\beta}. This function will not directly conduct the computation, but rather returns an object of class "\code{ADMM_Lasso}" that contains several memeber functions to actually constructs and fits the model. Member functions that are callable from this object are listed below: \tabular{ll}{ \code{$penalty()} \tab Specify the penalty parameter. See section \strong{Setting Penalty Parameter} for details.\cr \code{$parallel()} \tab Specify the number of threads for parallel computing. See section \strong{Parallel Computing} for details.\cr \code{$opts()} \tab Setting additional options. See section \strong{Additional Options} for details.\cr \code{$fit()} \tab Fit the model and do the actual computation. See section \strong{Model Fitting} for details. } } \section{Setting Penalty Parameter}{ The penalty parameter \eqn{\lambda} can be set through the member function \code{$penalty()}, with the usage and parameters given below: \preformatted{ model$penalty(lambda = NULL, nlambda = 100, lambda_min_ratio, ...) } \describe{ \item{\code{lambda}}{A user provided sequence of \eqn{\lambda}. If set to \code{NULL}, the program will calculate its own sequence according to \code{nlambda} and \code{lambda_min_ratio}, which starts from \eqn{\lambda_0} (with this \eqn{\lambda} all coefficients will be zero) and ends at \code{lambda0 * lambda_min_ratio}, containing \code{nlambda} values equally spaced in the log scale. It is recommended to set this parameter to be \code{NULL} (the default).} \item{\code{nlambda}}{Number of values in the \eqn{\lambda} sequence. Only used when the program calculates its own \eqn{\lambda} (by setting \code{lambda = NULL}).} \item{\code{lambda_min_ratio}}{Smallest value in the \eqn{\lambda} sequence as a fraction of \eqn{\lambda_0}. See the explanation of the \code{lambda} argument. This parameter is only used when the program calculates its own \eqn{\lambda} (by setting \code{lambda = NULL}). The default value is the same as \pkg{glmnet}: 0.0001 if \code{nrow(x) >= ncol(x)} and 0.01 otherwise.} } This member function will implicitly return the "\code{ADMM_Lasso}" object itself. } \section{Parallel Computing}{ The Lasso model can be fitted with parallel computing by setting the number of threads in the \code{$parallel()} member function. The usage of this method is \preformatted{ model$parallel(nthread = 2, ...) } Here \code{model} is the object returned by \code{admm_lasso()}, and \code{nthread} is the number of threads to be used. \code{nthread} must be less than \code{ncol(x) / 5}. \strong{NOTE:} Even in serial version of \code{admm_lasso()}, most matrix operations are implicitly parallelized when proper compiler options are turned on. Hence the parallel version of \code{admm_lasso()} is not necessarily faster than the serial one. This member function will implicitly return the "\code{ADMM_Lasso}" object itself. } \section{Additional Options}{ Additional options related to ADMM algorithm can be set through the \code{$opts()} member function of an "\code{ADMM_Lasso}" object. The usage of this method is \preformatted{ model$opts(maxit = 10000, eps_abs = 1e-5, eps_rel = 1e-5, rho = NULL) } Here \code{model} is the object returned by \code{admm_lasso()}. Explanation of the arguments is given below: \describe{ \item{\code{maxit}}{Maximum number of iterations.} \item{\code{eps_abs}}{Absolute tolerance parameter.} \item{\code{eps_rel}}{Relative tolerance parameter.} \item{\code{rho}}{ADMM step size parameter. If set to \code{NULL}, the program will compute a default one.} } This member function will implicitly return the "\code{ADMM_Lasso}" object itself. } \section{Model Fitting}{ Model will be fit after calling the \code{$fit()} member function. This is no argument that needs to be set. The function will return an object of class "\code{ADMM_Lasso_fit}", which contains the following fields: \describe{ \item{\code{lambda}}{The sequence of \eqn{\lambda} to build the solution path.} \item{\code{beta}}{A sparse matrix containing the estimated coefficient vectors, each column for one \eqn{\lambda}. Intercepts are in the first row.} \item{\code{niter}}{Number of ADMM iterations.} } Class "\code{ADMM_Lasso_fit}" also contains a \code{$plot()} member function, which plots the coefficient paths with the sequence of \eqn{\lambda}. See the examples below. } \examples{ set.seed(123) n = 100 p = 20 b = runif(p) x = matrix(rnorm(n * p, mean = 1.2, sd = 2), n, p) y = 5 + c(x \%*\% b) + rnorm(n) ## Directly fit the model admm_lasso(x, y)$fit() ## Or, if you want to have more customization: model = admm_lasso(x, y) print(model) ## Specify the lambda sequence model$penalty(nlambda = 20, lambda_min_ratio = 0.01) ## Lower down precision for faster computation model$opts(maxit = 100, eps_rel = 0.001) ## Use parallel computing (not necessary for this small dataset here) # model$parallel(nthread = 2) ## Inspect the updated model setting print(model) ## Fit the model and do the actual computation res = model$fit() res$beta ## Create a solution path plot res$plot() } \author{ Yixuan Qiu <\url{http://statr.me}> }
/man/admm_lasso.Rd
no_license
joegaotao/ADMM
R
false
false
6,767
rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/30_admm_lasso.R \name{admm_lasso} \alias{admm_lasso} \title{Fitting A Lasso Model Using ADMM Algorithm} \usage{ admm_lasso(x, y, intercept = TRUE, standardize = TRUE, ...) } \arguments{ \item{x}{The data matrix} \item{y}{The response vector} \item{intercept}{Whether to fit an intercept in the model. Default is \code{TRUE}.} \item{standardize}{Whether to standardize the explanatory variables before fitting the model. Default is \code{TRUE}. Fitted coefficients are always returned on the original scale.} } \description{ Lasso is a popular variable selection technique in high dimensional regression analysis, which tries to find the coefficient vector \eqn{\beta} that minimizes \deqn{\frac{1}{2n}\Vert y-X\beta\Vert_2^2+\lambda\Vert\beta\Vert_1}{ 1/(2n) * ||y - X * \beta||_2^2 + \lambda * ||\beta||_1} Here \eqn{n} is the sample size and \eqn{\lambda} is a regularization parameter that controls the sparseness of \eqn{\beta}. This function will not directly conduct the computation, but rather returns an object of class "\code{ADMM_Lasso}" that contains several memeber functions to actually constructs and fits the model. Member functions that are callable from this object are listed below: \tabular{ll}{ \code{$penalty()} \tab Specify the penalty parameter. See section \strong{Setting Penalty Parameter} for details.\cr \code{$parallel()} \tab Specify the number of threads for parallel computing. See section \strong{Parallel Computing} for details.\cr \code{$opts()} \tab Setting additional options. See section \strong{Additional Options} for details.\cr \code{$fit()} \tab Fit the model and do the actual computation. See section \strong{Model Fitting} for details. } } \section{Setting Penalty Parameter}{ The penalty parameter \eqn{\lambda} can be set through the member function \code{$penalty()}, with the usage and parameters given below: \preformatted{ model$penalty(lambda = NULL, nlambda = 100, lambda_min_ratio, ...) } \describe{ \item{\code{lambda}}{A user provided sequence of \eqn{\lambda}. If set to \code{NULL}, the program will calculate its own sequence according to \code{nlambda} and \code{lambda_min_ratio}, which starts from \eqn{\lambda_0} (with this \eqn{\lambda} all coefficients will be zero) and ends at \code{lambda0 * lambda_min_ratio}, containing \code{nlambda} values equally spaced in the log scale. It is recommended to set this parameter to be \code{NULL} (the default).} \item{\code{nlambda}}{Number of values in the \eqn{\lambda} sequence. Only used when the program calculates its own \eqn{\lambda} (by setting \code{lambda = NULL}).} \item{\code{lambda_min_ratio}}{Smallest value in the \eqn{\lambda} sequence as a fraction of \eqn{\lambda_0}. See the explanation of the \code{lambda} argument. This parameter is only used when the program calculates its own \eqn{\lambda} (by setting \code{lambda = NULL}). The default value is the same as \pkg{glmnet}: 0.0001 if \code{nrow(x) >= ncol(x)} and 0.01 otherwise.} } This member function will implicitly return the "\code{ADMM_Lasso}" object itself. } \section{Parallel Computing}{ The Lasso model can be fitted with parallel computing by setting the number of threads in the \code{$parallel()} member function. The usage of this method is \preformatted{ model$parallel(nthread = 2, ...) } Here \code{model} is the object returned by \code{admm_lasso()}, and \code{nthread} is the number of threads to be used. \code{nthread} must be less than \code{ncol(x) / 5}. \strong{NOTE:} Even in serial version of \code{admm_lasso()}, most matrix operations are implicitly parallelized when proper compiler options are turned on. Hence the parallel version of \code{admm_lasso()} is not necessarily faster than the serial one. This member function will implicitly return the "\code{ADMM_Lasso}" object itself. } \section{Additional Options}{ Additional options related to ADMM algorithm can be set through the \code{$opts()} member function of an "\code{ADMM_Lasso}" object. The usage of this method is \preformatted{ model$opts(maxit = 10000, eps_abs = 1e-5, eps_rel = 1e-5, rho = NULL) } Here \code{model} is the object returned by \code{admm_lasso()}. Explanation of the arguments is given below: \describe{ \item{\code{maxit}}{Maximum number of iterations.} \item{\code{eps_abs}}{Absolute tolerance parameter.} \item{\code{eps_rel}}{Relative tolerance parameter.} \item{\code{rho}}{ADMM step size parameter. If set to \code{NULL}, the program will compute a default one.} } This member function will implicitly return the "\code{ADMM_Lasso}" object itself. } \section{Model Fitting}{ Model will be fit after calling the \code{$fit()} member function. This is no argument that needs to be set. The function will return an object of class "\code{ADMM_Lasso_fit}", which contains the following fields: \describe{ \item{\code{lambda}}{The sequence of \eqn{\lambda} to build the solution path.} \item{\code{beta}}{A sparse matrix containing the estimated coefficient vectors, each column for one \eqn{\lambda}. Intercepts are in the first row.} \item{\code{niter}}{Number of ADMM iterations.} } Class "\code{ADMM_Lasso_fit}" also contains a \code{$plot()} member function, which plots the coefficient paths with the sequence of \eqn{\lambda}. See the examples below. } \examples{ set.seed(123) n = 100 p = 20 b = runif(p) x = matrix(rnorm(n * p, mean = 1.2, sd = 2), n, p) y = 5 + c(x \%*\% b) + rnorm(n) ## Directly fit the model admm_lasso(x, y)$fit() ## Or, if you want to have more customization: model = admm_lasso(x, y) print(model) ## Specify the lambda sequence model$penalty(nlambda = 20, lambda_min_ratio = 0.01) ## Lower down precision for faster computation model$opts(maxit = 100, eps_rel = 0.001) ## Use parallel computing (not necessary for this small dataset here) # model$parallel(nthread = 2) ## Inspect the updated model setting print(model) ## Fit the model and do the actual computation res = model$fit() res$beta ## Create a solution path plot res$plot() } \author{ Yixuan Qiu <\url{http://statr.me}> }
plot.ellipsefit<-function(a,putNumber=FALSE,values=NULL,xlim=NULL,ylim=NULL,main=NULL,newPred=TRUE,show=NULL,...) { if (newPred==TRUE) { ti <- (1:101)*pi/50 newX <- a$values["b.x"]*cos(ti)+a$values["cx"] newY <- a$values["b.y"]*cos(ti)+a$values["retention"]*sin(ti)+a$values["cy"] } else { newY <- a$pred.y newY[length(newY)+1] <- newY[1] newX <- a$pred.x newX[length(newX)+1] <- newX[1] } if (is.null(xlim)) xlim <-c(min(c(a$x,newX)),max(c(a$x,newX))) if (is.null(ylim)) ylim <- c(min(c(a$y,newY)),max(c(a$y,newY))) if (is.null(values)) plot(newY~newX,type="l",ylim=ylim,xlim=xlim,main=main,...) else { if (values=="inherent") { plot(newY~newX,type="l",ylim=ylim,xlim=xlim,...) title(line=3, paste(main),cex=1.2) mtext(paste( "b.x=",format(a$values["b.x"],digits=3)," b.y=",format(a$values["b.y"],digits=3)),side=3,line=1.85,cex=0.75) mtext(paste("cx=",format(a$values["cx"],digits=3)," cy=",format(a$values["cy"],digits=3)),side=3,line=0.95,cex=0.75) mtext(paste("Retention=",format(a$values["retention"],digits=3)),side=3,line=0.0,cex=0.75) } if (values=="hysteresis") { plot(newY~newX,type="l",ylim=ylim,xlim=xlim,...) title(line=3, paste(main),cex=1.2) mtext(paste( "b.x=",format(a$values["b.x"],digits=3)," b.y=",format(a$values["b.y"],digits=3)," cx=",format(a$values["cx"],digits=3)),side=3,line=1.85,cex=0.75) mtext(paste("cy=",format(a$values["cy"],digits=3)," Area=",format(a$values["area"],digits=3)," Lag=",format(a$values["lag"],digits=3)),side=3,line=0.95,cex=0.75) mtext(paste("Retention=",format(a$values["retention"],digits=3)," Coercion=",format(a$values["coercion"],digits=3)),side=3,line=0.0,cex=0.75) } if (values=="hysteresis.all") { plot(newY~newX,type="l",ylim=ylim,xlim=xlim,...) title(line=3, paste(main),cex=1.2) mtext(paste( "b.x=",format(a$values["b.x"],digits=3)," b.y=",format(a$values["b.y"],digits=3)," cx=",format(a$values["cx"],digits=3)," cy=",format(a$values["cy"],digits=3)),side=3,line=1.85,cex=0.75) mtext(paste("Area=",format(a$values["area"],digits=3)," Lag=",format(a$values["lag"],digits=3)," Retention=",format(a$values["retention"],digits=3)," Split Angle=",format(a$values["split.angle"],digits=3)),side=3,line=0.95,cex=0.75) mtext(paste("Coercion=",format(a$values["coercion"],digits=3)," Hysteresis x=",format(a$values["hysteresis.x"],digits=3)," Hysteresis y=",format(a$values["hysteresis.y"],digits=3)),side=3,line=0.0,cex=0.75) } if (values=="derived") { plot(newY~newX,type="l",ylim=ylim,xlim=xlim,...) title(line=3, paste(main),cex=1.2) mtext(paste( "Coercion=",format(a$values["coercion"],digits=3)," Area=",format(a$values["area"],digits=3)),side=3,line=1.85,cex=0.75) mtext(paste("Lag=",format(a$values["lag"],digits=3)," Split Angle=",format(a$values["split.angle"],digits=3)),side=3,line=0.95,cex=0.75) mtext(paste("Hysteresis x=",format(a$values["hysteresis.x"],digits=3)," Hysteresis y=",format(a$values["hysteresis.y"],digits=3)),side=3,line=0.0,cex=0.75) } if (values=="ellipse") { plot(newY~newX,type="l",ylim=ylim,xlim=xlim,...) title(line=3, paste(main),cex=1.2) mtext(paste( "Ampx=",format(a$values["ampx"],digits=3)," Ampy=",format(a$values["ampy"],digits=3)),side=3,line=1.85,cex=0.75) mtext(paste("rote.deg=",format(a$values["rote.deg"],digits=3)," Eccentricity=",format(a$values["eccentricity"],digits=3)),side=3,line=0.95,cex=0.75) mtext(paste("S-major Axis=",format(a$values["semi.major"],digits=3)," S-minor Axis=",format(a$values["semi.minor"],digits=3)),side=3,line=0.0,cex=0.75) } if (values=="ellipse.all") { plot(newY~newX,type="l",ylim=ylim,xlim=xlim,...) title(line=3, paste(main),cex=1.2) mtext(paste("Cx=",format(a$values["cx"],digits=3)," Cy=",format(a$values["cy"],digits=3), " Ampx=",format(a$values["ampx"],digits=3)," Ampy=",format(a$values["ampy"],digits=3)),side=3,line=1.85,cex=0.75) mtext(paste("rote.deg=",format(a$values["rote.deg"],digits=3)," focus.x=",format(a$values["focus.x"],digits=3)," focus.y=",format(a$values["focus.y"],digits=3)),side=3,line=0.95,cex=0.75) mtext(paste("S-major Axis=",format(a$values["semi.major"],digits=3)," S-minor Axis=",format(a$values["semi.minor"],digits=3)," Eccentricity=",format(a$values["eccentricity"],digits=3)),side=3,line=0.0,cex=0.75) } } points(a$y~a$x,pch=1,cex=0.85) if (any(show=="semi.major")) segments(a$values["cx"],a$values["cy"],a$values["cx"]+a$values["semi.major"]*cos(a$values["rote.deg"]/180*pi),a$values["cy"]+a$values["semi.major"]*sin(a$values["rote.deg"]/180*pi),col="red") if (any(show=="semi.minor")) segments(a$values["cx"],a$values["cy"],a$values["cx"]+a$values["semi.minor"]*cos(a$values["rote.deg"]/180*pi+pi/2),a$values["cy"]+a$values["semi.minor"]*sin(a$values["rote.deg"]/180*pi+pi/2),col="red") if (any(show %in% c("b.x","b.y"))) segments(a$values["cx"],a$values["cy"],a$values["cx"]+a$values["b.x"],a$values["cy"]+a$values["b.y"],col="blue") if (any(show %in% c("focus.x","focus.y"))) points(c(a$values["cx"]+a$values["focus.x"],a$values["cx"]-a$values["focus.x"]),c(a$values["cy"]+a$values["focus.y"],a$values["cy"]-a$values["focus.y"]),col="gold",cex=2,pch=19) if (any(show=="rote.deg")) {arrows(a$values["cx"]+a$values["coercion"],a$values["cy"],a$values["cx"]+a$values["focus.x"],a$values["cy"]+a$values["focus.y"],lty=2) segments(a$values["cx"],a$values["cy"],a$values["cx"]+a$values["coercion"],a$values["cy"],lty=2) } if (any(show=="retention")) segments(a$values["cx"],a$values["cy"],a$values["cx"],a$values["cy"]+a$values["retention"],col="purple") if (any(show=="coercion")) segments(a$values["cx"],a$values["cy"],a$values["cx"]+a$values["coercion"],a$values["cy"],col="green") if(putNumber==TRUE) text(a$x,a$y,as.character(format(1:length(a$y),digits=4))) }
/R/plot.ellipsefit.R
no_license
spencerm89/hysteresis3
R
false
false
5,989
r
plot.ellipsefit<-function(a,putNumber=FALSE,values=NULL,xlim=NULL,ylim=NULL,main=NULL,newPred=TRUE,show=NULL,...) { if (newPred==TRUE) { ti <- (1:101)*pi/50 newX <- a$values["b.x"]*cos(ti)+a$values["cx"] newY <- a$values["b.y"]*cos(ti)+a$values["retention"]*sin(ti)+a$values["cy"] } else { newY <- a$pred.y newY[length(newY)+1] <- newY[1] newX <- a$pred.x newX[length(newX)+1] <- newX[1] } if (is.null(xlim)) xlim <-c(min(c(a$x,newX)),max(c(a$x,newX))) if (is.null(ylim)) ylim <- c(min(c(a$y,newY)),max(c(a$y,newY))) if (is.null(values)) plot(newY~newX,type="l",ylim=ylim,xlim=xlim,main=main,...) else { if (values=="inherent") { plot(newY~newX,type="l",ylim=ylim,xlim=xlim,...) title(line=3, paste(main),cex=1.2) mtext(paste( "b.x=",format(a$values["b.x"],digits=3)," b.y=",format(a$values["b.y"],digits=3)),side=3,line=1.85,cex=0.75) mtext(paste("cx=",format(a$values["cx"],digits=3)," cy=",format(a$values["cy"],digits=3)),side=3,line=0.95,cex=0.75) mtext(paste("Retention=",format(a$values["retention"],digits=3)),side=3,line=0.0,cex=0.75) } if (values=="hysteresis") { plot(newY~newX,type="l",ylim=ylim,xlim=xlim,...) title(line=3, paste(main),cex=1.2) mtext(paste( "b.x=",format(a$values["b.x"],digits=3)," b.y=",format(a$values["b.y"],digits=3)," cx=",format(a$values["cx"],digits=3)),side=3,line=1.85,cex=0.75) mtext(paste("cy=",format(a$values["cy"],digits=3)," Area=",format(a$values["area"],digits=3)," Lag=",format(a$values["lag"],digits=3)),side=3,line=0.95,cex=0.75) mtext(paste("Retention=",format(a$values["retention"],digits=3)," Coercion=",format(a$values["coercion"],digits=3)),side=3,line=0.0,cex=0.75) } if (values=="hysteresis.all") { plot(newY~newX,type="l",ylim=ylim,xlim=xlim,...) title(line=3, paste(main),cex=1.2) mtext(paste( "b.x=",format(a$values["b.x"],digits=3)," b.y=",format(a$values["b.y"],digits=3)," cx=",format(a$values["cx"],digits=3)," cy=",format(a$values["cy"],digits=3)),side=3,line=1.85,cex=0.75) mtext(paste("Area=",format(a$values["area"],digits=3)," Lag=",format(a$values["lag"],digits=3)," Retention=",format(a$values["retention"],digits=3)," Split Angle=",format(a$values["split.angle"],digits=3)),side=3,line=0.95,cex=0.75) mtext(paste("Coercion=",format(a$values["coercion"],digits=3)," Hysteresis x=",format(a$values["hysteresis.x"],digits=3)," Hysteresis y=",format(a$values["hysteresis.y"],digits=3)),side=3,line=0.0,cex=0.75) } if (values=="derived") { plot(newY~newX,type="l",ylim=ylim,xlim=xlim,...) title(line=3, paste(main),cex=1.2) mtext(paste( "Coercion=",format(a$values["coercion"],digits=3)," Area=",format(a$values["area"],digits=3)),side=3,line=1.85,cex=0.75) mtext(paste("Lag=",format(a$values["lag"],digits=3)," Split Angle=",format(a$values["split.angle"],digits=3)),side=3,line=0.95,cex=0.75) mtext(paste("Hysteresis x=",format(a$values["hysteresis.x"],digits=3)," Hysteresis y=",format(a$values["hysteresis.y"],digits=3)),side=3,line=0.0,cex=0.75) } if (values=="ellipse") { plot(newY~newX,type="l",ylim=ylim,xlim=xlim,...) title(line=3, paste(main),cex=1.2) mtext(paste( "Ampx=",format(a$values["ampx"],digits=3)," Ampy=",format(a$values["ampy"],digits=3)),side=3,line=1.85,cex=0.75) mtext(paste("rote.deg=",format(a$values["rote.deg"],digits=3)," Eccentricity=",format(a$values["eccentricity"],digits=3)),side=3,line=0.95,cex=0.75) mtext(paste("S-major Axis=",format(a$values["semi.major"],digits=3)," S-minor Axis=",format(a$values["semi.minor"],digits=3)),side=3,line=0.0,cex=0.75) } if (values=="ellipse.all") { plot(newY~newX,type="l",ylim=ylim,xlim=xlim,...) title(line=3, paste(main),cex=1.2) mtext(paste("Cx=",format(a$values["cx"],digits=3)," Cy=",format(a$values["cy"],digits=3), " Ampx=",format(a$values["ampx"],digits=3)," Ampy=",format(a$values["ampy"],digits=3)),side=3,line=1.85,cex=0.75) mtext(paste("rote.deg=",format(a$values["rote.deg"],digits=3)," focus.x=",format(a$values["focus.x"],digits=3)," focus.y=",format(a$values["focus.y"],digits=3)),side=3,line=0.95,cex=0.75) mtext(paste("S-major Axis=",format(a$values["semi.major"],digits=3)," S-minor Axis=",format(a$values["semi.minor"],digits=3)," Eccentricity=",format(a$values["eccentricity"],digits=3)),side=3,line=0.0,cex=0.75) } } points(a$y~a$x,pch=1,cex=0.85) if (any(show=="semi.major")) segments(a$values["cx"],a$values["cy"],a$values["cx"]+a$values["semi.major"]*cos(a$values["rote.deg"]/180*pi),a$values["cy"]+a$values["semi.major"]*sin(a$values["rote.deg"]/180*pi),col="red") if (any(show=="semi.minor")) segments(a$values["cx"],a$values["cy"],a$values["cx"]+a$values["semi.minor"]*cos(a$values["rote.deg"]/180*pi+pi/2),a$values["cy"]+a$values["semi.minor"]*sin(a$values["rote.deg"]/180*pi+pi/2),col="red") if (any(show %in% c("b.x","b.y"))) segments(a$values["cx"],a$values["cy"],a$values["cx"]+a$values["b.x"],a$values["cy"]+a$values["b.y"],col="blue") if (any(show %in% c("focus.x","focus.y"))) points(c(a$values["cx"]+a$values["focus.x"],a$values["cx"]-a$values["focus.x"]),c(a$values["cy"]+a$values["focus.y"],a$values["cy"]-a$values["focus.y"]),col="gold",cex=2,pch=19) if (any(show=="rote.deg")) {arrows(a$values["cx"]+a$values["coercion"],a$values["cy"],a$values["cx"]+a$values["focus.x"],a$values["cy"]+a$values["focus.y"],lty=2) segments(a$values["cx"],a$values["cy"],a$values["cx"]+a$values["coercion"],a$values["cy"],lty=2) } if (any(show=="retention")) segments(a$values["cx"],a$values["cy"],a$values["cx"],a$values["cy"]+a$values["retention"],col="purple") if (any(show=="coercion")) segments(a$values["cx"],a$values["cy"],a$values["cx"]+a$values["coercion"],a$values["cy"],col="green") if(putNumber==TRUE) text(a$x,a$y,as.character(format(1:length(a$y),digits=4))) }
testlist <- list(type = 0L, z = 2.99939362779157e-241) result <- do.call(esreg::G1_fun,testlist) str(result)
/esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609894421-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
108
r
testlist <- list(type = 0L, z = 2.99939362779157e-241) result <- do.call(esreg::G1_fun,testlist) str(result)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/circuits.R \name{FindAllCircuits} \alias{FindAllCircuits} \title{Find all circuits in a multi-timepoint longitudinal dataset} \usage{ FindAllCircuits( seu, nnr, ranked_genes = NULL, all.tps = NULL, subsample = F, subsample.n = 1e+05 ) } \arguments{ \item{seu}{A binned Seurat object with binning results stored in the "bins" column of the meta.data slot} \item{nnr}{Ligand and target ranking results from NicheNet. Currently only the output of \code{PrioritizeLigands} is supported.} \item{ranked_genes}{Single-cell gene signatures, output from \code{crGeneSig}} \item{all.tps}{Character vector of all timepoints between which to find circuits. Time point identities must be in the correct sequential order.} \item{subsample}{logical. Subsample circuits for each pair of timepoints to the threshold set in subsample.n?} \item{subsample.n}{numeric. If subsample=T, subsample circuits from each timepoint pair to this level} } \value{ } \description{ Find all circuits in a multi-timepoint longitudinal dataset }
/man/FindAllCircuits.Rd
permissive
BlishLab/scriabin
R
false
true
1,107
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/circuits.R \name{FindAllCircuits} \alias{FindAllCircuits} \title{Find all circuits in a multi-timepoint longitudinal dataset} \usage{ FindAllCircuits( seu, nnr, ranked_genes = NULL, all.tps = NULL, subsample = F, subsample.n = 1e+05 ) } \arguments{ \item{seu}{A binned Seurat object with binning results stored in the "bins" column of the meta.data slot} \item{nnr}{Ligand and target ranking results from NicheNet. Currently only the output of \code{PrioritizeLigands} is supported.} \item{ranked_genes}{Single-cell gene signatures, output from \code{crGeneSig}} \item{all.tps}{Character vector of all timepoints between which to find circuits. Time point identities must be in the correct sequential order.} \item{subsample}{logical. Subsample circuits for each pair of timepoints to the threshold set in subsample.n?} \item{subsample.n}{numeric. If subsample=T, subsample circuits from each timepoint pair to this level} } \value{ } \description{ Find all circuits in a multi-timepoint longitudinal dataset }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mica.taxonomies.R \name{mica.vocabularies} \alias{mica.vocabularies} \title{Get the vocabularies} \usage{ mica.vocabularies( mica, query = NULL, locale = "en", target = "variable", taxonomies = NULL, df = TRUE ) } \arguments{ \item{mica}{A Mica object} \item{query}{The search query} \item{locale}{The language for labels (when NULL labels are not included in the result)} \item{target}{What the taxonomy is about: variable (default), dataset, study, network} \item{taxonomies}{Taxonomy names to subset. If NULL or empty all taxonomies are returned} \item{df}{Return a data.frame (default is TRUE)} } \description{ Get the taxonomy vocabularies, optionally filtered by taxonomy name and by term matching. } \examples{ \dontrun{ m <- mica.login("https://mica-demo.obiba.org") mica.vocabularies(m,target="variable", query="cancer", locale = "en") mica.logout(m) } } \seealso{ Other taxonomies functions: \code{\link{mica.taxonomies}()} } \concept{taxonomies functions}
/man/mica.vocabularies.Rd
no_license
obiba/micar
R
false
true
1,063
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mica.taxonomies.R \name{mica.vocabularies} \alias{mica.vocabularies} \title{Get the vocabularies} \usage{ mica.vocabularies( mica, query = NULL, locale = "en", target = "variable", taxonomies = NULL, df = TRUE ) } \arguments{ \item{mica}{A Mica object} \item{query}{The search query} \item{locale}{The language for labels (when NULL labels are not included in the result)} \item{target}{What the taxonomy is about: variable (default), dataset, study, network} \item{taxonomies}{Taxonomy names to subset. If NULL or empty all taxonomies are returned} \item{df}{Return a data.frame (default is TRUE)} } \description{ Get the taxonomy vocabularies, optionally filtered by taxonomy name and by term matching. } \examples{ \dontrun{ m <- mica.login("https://mica-demo.obiba.org") mica.vocabularies(m,target="variable", query="cancer", locale = "en") mica.logout(m) } } \seealso{ Other taxonomies functions: \code{\link{mica.taxonomies}()} } \concept{taxonomies functions}
NEI <- readRDS("summarySCC_PM25.rds") EmissionMean<-with(NEI, tapply(Emissions, year, sum, na.rm=T)) EmissionMean<-EmissionMean/1000000 year<-c(1999, 2002, 2005, 2008) png("Plot1.png") plot(year, EmissionMean, type = 'l', ylab="Total Emission (Millions of Tones)") title(main="Total emission in United States from 1999 to 2008")
/Exploratory Data Analysis/week4/Plot1.R
no_license
Nazlibk/coursera
R
false
false
329
r
NEI <- readRDS("summarySCC_PM25.rds") EmissionMean<-with(NEI, tapply(Emissions, year, sum, na.rm=T)) EmissionMean<-EmissionMean/1000000 year<-c(1999, 2002, 2005, 2008) png("Plot1.png") plot(year, EmissionMean, type = 'l', ylab="Total Emission (Millions of Tones)") title(main="Total emission in United States from 1999 to 2008")
library(futile.matrix) ### Name: futile.matrix-package ### Title: A collection of matrix manipulation functions ### Aliases: futile.matrix futile.matrix-package ### Keywords: attribute logic package ### ** Examples # Generate a random ensemble m <- rmatrix(WishartModel(100,400)) # Select sub-matrices library(datasets) select(swiss, "Rive") select(swiss, col.pat='^E') select(swiss, "Rive", '^E') <- -1 dimnames <- list( c(rownames(swiss), 'Zermat', 'Zurich', 'Geneva'), c(colnames(swiss), 'Age','Hair.Color') ) my.swiss <- expand(swiss, dimnames)
/data/genthat_extracted_code/futile.matrix/examples/futile.matrix-package.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
558
r
library(futile.matrix) ### Name: futile.matrix-package ### Title: A collection of matrix manipulation functions ### Aliases: futile.matrix futile.matrix-package ### Keywords: attribute logic package ### ** Examples # Generate a random ensemble m <- rmatrix(WishartModel(100,400)) # Select sub-matrices library(datasets) select(swiss, "Rive") select(swiss, col.pat='^E') select(swiss, "Rive", '^E') <- -1 dimnames <- list( c(rownames(swiss), 'Zermat', 'Zurich', 'Geneva'), c(colnames(swiss), 'Age','Hair.Color') ) my.swiss <- expand(swiss, dimnames)
d = read.table("all_bffiles", as.is = T) npheno = nrow(d) npair = npheno*(npheno-1)/2 toreturn = data.frame(matrix(nrow = npair, ncol = 10)) index = 1 for (i in 1:(npheno-1)){ p1 = d[i,1] for (j in (i+1):(npheno)){ print(paste(i, j)) p2 = d[j,1] print(paste(p1, p2)) toreturn[index,1] = p1 toreturn[index,2] = p2 f1 = paste0("../../overlaps/data/", p1, "_", p2, ".overlap_wbetas") tmp = read.table(f1, as.is = T, head = T) mhc =which(tmp$chr == "chr6" & tmp$pos >= 26000000 & tmp$pos <=34000000) if(length(mhc)>0){ tmp = tmp[-mhc,] } tmp$B1 = tmp$Z_1*sqrt(tmp$V_1) tmp$B2 = tmp$Z*sqrt(tmp$V) tmp$R1 = rank(tmp$B1) tmp$R2 = rank(tmp$B2) N1 = nrow(tmp) c1 = cor.test(tmp$R1, tmp$R2) e1 = c1$estimate z1 = 0.5*log ( (1+e1)/(1-e1)) se1 = 1/( sqrt(nrow(tmp)-3)) f2 = paste0("../../overlaps/data/", p2, "_", p1, ".overlap_wbetas") tmp = read.table(f2, as.is = T, head = T) mhc =which(tmp$chr == "chr6" & tmp$pos >= 26000000 & tmp$pos <=34000000) if(length(mhc)>0){ tmp = tmp[-mhc,] } tmp$B1 = tmp$Z_1*sqrt(tmp$V_1) tmp$B2 = tmp$Z*sqrt(tmp$V) tmp$R1 = rank(tmp$B1) tmp$R2 = rank(tmp$B2) N2 = nrow(tmp) c2 = cor.test(tmp$R1, tmp$R2) e2 = c2$estimate z2 = 0.5*log ( (1+e2)/(1-e2)) se2 = 1/( sqrt(nrow(tmp)-3)) toreturn[index,3]= N1 toreturn[index,4] = N2 toreturn[index,5] = e1 toreturn[index,6] = e2 toreturn[index,7] = z1 toreturn[index,8] = z2 toreturn[index,9] = se1 toreturn[index,10] = se2 index = index+1 } } names(toreturn) = c("P1", "P2", "N1", "N2", "RHO1", "RHO2", "FZ1", "FZ2", "SE1", "SE2") toreturn = toreturn[!is.na(toreturn[,5]),] toreturn = toreturn[!is.na(toreturn[,6]),] toreturn = toreturn[!is.na(toreturn[,7]),] toreturn = toreturn[!is.na(toreturn[,8]),] toreturn = toreturn[!is.na(toreturn[,9]),] toreturn = toreturn[is.finite(toreturn$FZ2) & is.finite(toreturn$FZ1),] write.table(toreturn, file = "all_rho", quote = F, row.names = F)
/MRMR/scripts/1_get_all_rho.R
no_license
RezaJF/gwas-pw-paper
R
false
false
2,182
r
d = read.table("all_bffiles", as.is = T) npheno = nrow(d) npair = npheno*(npheno-1)/2 toreturn = data.frame(matrix(nrow = npair, ncol = 10)) index = 1 for (i in 1:(npheno-1)){ p1 = d[i,1] for (j in (i+1):(npheno)){ print(paste(i, j)) p2 = d[j,1] print(paste(p1, p2)) toreturn[index,1] = p1 toreturn[index,2] = p2 f1 = paste0("../../overlaps/data/", p1, "_", p2, ".overlap_wbetas") tmp = read.table(f1, as.is = T, head = T) mhc =which(tmp$chr == "chr6" & tmp$pos >= 26000000 & tmp$pos <=34000000) if(length(mhc)>0){ tmp = tmp[-mhc,] } tmp$B1 = tmp$Z_1*sqrt(tmp$V_1) tmp$B2 = tmp$Z*sqrt(tmp$V) tmp$R1 = rank(tmp$B1) tmp$R2 = rank(tmp$B2) N1 = nrow(tmp) c1 = cor.test(tmp$R1, tmp$R2) e1 = c1$estimate z1 = 0.5*log ( (1+e1)/(1-e1)) se1 = 1/( sqrt(nrow(tmp)-3)) f2 = paste0("../../overlaps/data/", p2, "_", p1, ".overlap_wbetas") tmp = read.table(f2, as.is = T, head = T) mhc =which(tmp$chr == "chr6" & tmp$pos >= 26000000 & tmp$pos <=34000000) if(length(mhc)>0){ tmp = tmp[-mhc,] } tmp$B1 = tmp$Z_1*sqrt(tmp$V_1) tmp$B2 = tmp$Z*sqrt(tmp$V) tmp$R1 = rank(tmp$B1) tmp$R2 = rank(tmp$B2) N2 = nrow(tmp) c2 = cor.test(tmp$R1, tmp$R2) e2 = c2$estimate z2 = 0.5*log ( (1+e2)/(1-e2)) se2 = 1/( sqrt(nrow(tmp)-3)) toreturn[index,3]= N1 toreturn[index,4] = N2 toreturn[index,5] = e1 toreturn[index,6] = e2 toreturn[index,7] = z1 toreturn[index,8] = z2 toreturn[index,9] = se1 toreturn[index,10] = se2 index = index+1 } } names(toreturn) = c("P1", "P2", "N1", "N2", "RHO1", "RHO2", "FZ1", "FZ2", "SE1", "SE2") toreturn = toreturn[!is.na(toreturn[,5]),] toreturn = toreturn[!is.na(toreturn[,6]),] toreturn = toreturn[!is.na(toreturn[,7]),] toreturn = toreturn[!is.na(toreturn[,8]),] toreturn = toreturn[!is.na(toreturn[,9]),] toreturn = toreturn[is.finite(toreturn$FZ2) & is.finite(toreturn$FZ1),] write.table(toreturn, file = "all_rho", quote = F, row.names = F)
#' Basic correlation plot function for normalized or unnormalized counts. #' #' This function plots a heatmap of the "n" features with greatest variance #' across rows. #' #' #' @param obj A MRexperiment object with count data. #' @param n The number of features to plot. This chooses the "n" features with greatest variance. #' @param norm Whether or not to normalize the counts - if MRexperiment object. #' @param log Whether or not to log2 transform the counts - if MRexperiment object. #' @param fun Function to calculate pair-wise relationships. Default is pearson #' correlation #' @param ... Additional plot arguments. #' @return plotted correlation matrix #' @seealso \code{\link{cumNormMat}} #' @examples #' #' data(mouseData) #' plotCorr(obj=mouseData,n=200,cexRow = 0.4,cexCol = 0.4,trace="none",dendrogram="none", #' col = colorRampPalette(brewer.pal(9, "RdBu"))(50)) #' plotCorr <- function(obj,n,norm=TRUE,log=TRUE,fun=cor,...) { mat = returnAppropriateObj(obj,norm,log) otusToKeep <- which(rowSums(mat) > 0) otuVars = rowSds(mat[otusToKeep, ]) otuIndices = otusToKeep[order(otuVars, decreasing = TRUE)[1:n]] mat2 = mat[otuIndices, ] cc = as.matrix(fun(t(mat2))) hc = hclust(dist(mat2)) otuOrder = hc$order cc = cc[otuOrder, otuOrder] heatmap.2(t(cc),...) invisible(t(cc)) }
/R/plotCorr.R
no_license
HCBravoLab/metagenomeSeq
R
false
false
1,348
r
#' Basic correlation plot function for normalized or unnormalized counts. #' #' This function plots a heatmap of the "n" features with greatest variance #' across rows. #' #' #' @param obj A MRexperiment object with count data. #' @param n The number of features to plot. This chooses the "n" features with greatest variance. #' @param norm Whether or not to normalize the counts - if MRexperiment object. #' @param log Whether or not to log2 transform the counts - if MRexperiment object. #' @param fun Function to calculate pair-wise relationships. Default is pearson #' correlation #' @param ... Additional plot arguments. #' @return plotted correlation matrix #' @seealso \code{\link{cumNormMat}} #' @examples #' #' data(mouseData) #' plotCorr(obj=mouseData,n=200,cexRow = 0.4,cexCol = 0.4,trace="none",dendrogram="none", #' col = colorRampPalette(brewer.pal(9, "RdBu"))(50)) #' plotCorr <- function(obj,n,norm=TRUE,log=TRUE,fun=cor,...) { mat = returnAppropriateObj(obj,norm,log) otusToKeep <- which(rowSums(mat) > 0) otuVars = rowSds(mat[otusToKeep, ]) otuIndices = otusToKeep[order(otuVars, decreasing = TRUE)[1:n]] mat2 = mat[otuIndices, ] cc = as.matrix(fun(t(mat2))) hc = hclust(dist(mat2)) otuOrder = hc$order cc = cc[otuOrder, otuOrder] heatmap.2(t(cc),...) invisible(t(cc)) }
mcmc_sampler.mfbvar_minn_csv <- function(x, ...){ n_vars <- ncol(x$Y) if (!(!is.null(x$Y) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { test_all <- sapply(x, is.null) test_sub <- test_all[c("Y", "n_lags", "n_burnin", "n_reps")] stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) } prior_nu <- n_vars + 2 priors <- prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, n_lags = x$n_lags, prior_nu = prior_nu) prior_Pi_mean <- priors$prior_Pi_mean prior_Pi_Omega <- priors$prior_Pi_Omega prior_S <- priors$prior_S Y <- x$Y freq <- x$freq n_fcst <- x$n_fcst verbose <- x$verbose n_lags <- x$n_lags lambda4 <- x$lambda4 # Add terms for constant prior_Pi_Omega <- diag(c(x$lambda1^2*lambda4^2, diag(prior_Pi_Omega))) prior_Pi_mean <- rbind(0, prior_Pi_mean) phi_invvar <- 1/x$prior_phi[2] phi_meaninvvar <- x$prior_phi[1] * phi_invvar prior_sigma2 <- x$prior_sigma2[1] prior_df <- x$prior_sigma2[2] add_args <- list(...) n_reps <- x$n_reps n_burnin <- x$n_burnin n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) init <- add_args$init init_Pi <- init$init_Pi init_Sigma <- init$init_Sigma init_Z <- init$init_Z init_phi <- init$init_phi init_sigma <- init$init_sigma init_f <- init$init_f # n_vars: number of variables # n_lags: number of lags # n_determ: number of deterministic variables # n_T: sample size (full sample) # n_T_: sample size (reduced sample) n_q <- sum(freq == "q") if (n_q < n_vars) { T_b <- max(which(!apply(apply(Y[, freq == "m", drop = FALSE], 2, is.na), 1, any))) } else { T_b <- nrow(Y) } if (n_q == 0 || n_q == n_vars) { complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) Y <- Y[complete_quarters, ] } if (n_q > 0) { if (x$aggregation == "average") { Lambda_ <- build_Lambda(rep("average", n_q), 3) } else { Lambda_ <- build_Lambda(rep("triangular", n_q), 5)} } else { Lambda_ <- matrix(0, 1, 3) } n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) n_T <- dim(Y)[1]# - n_lags n_T_ <- n_T - n_pseudolags d <- matrix(1, nrow = nrow(Y), ncol = 1) post_nu <- n_T_ + prior_nu ################################################################ ### Preallocation # Pi and Sigma store their i-th draws in the third dimension, psi # is vectorized so it has its i-th draw stored in the i-th row # Pi: p * pk * n_reps, each [,,i] stores Pi' # Sigma: p * p * n_reps # psi: n_reps * p # Z: T * p * n_reps ### If forecasting (h is horizon): # Z_fcst: hk * p * n_reps # d_fcst_lags: hk * m ### If root checking: # roots: n_reps vector # num_tries: n_reps vector ### If smoothing of the state vector: # smoothed_Z: T * p * n_reps Pi <- array(NA, dim = c(n_vars, n_vars * n_lags + 1, n_reps/n_thin)) Sigma <- array(NA, dim = c(n_vars, n_vars, n_reps/n_thin)) Z <- array(NA, dim = c(n_T, n_vars, n_reps/n_thin)) phi <- rep(NA, n_reps/n_thin) sigma <- rep(NA, n_reps/n_thin) f <- matrix(NA, n_reps, n_T_) Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) if (n_fcst > 0) { rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) Z_fcst[,,1] <- 0 } else { rownames(Z_fcst) <- (n_T-n_lags+1):n_T } ################################################################ ### MCMC sampling initialization # If the initial values are not provided, the missing values in # Z are filled with the next observed value and Pi, Sigma and # psi are then computed using maximum likelihood # This allows the user to run the MCMC sampler for a burn-in # period, then use the final draw of that as initialization # for multiple chains if (is.null(init_Z)) { Z[,, 1] <- fill_na(Y) } else { if (all(dim(Z[,, 1]) == dim(init_Z))) { Z[,, 1] <- init_Z } else { stop(paste0("The dimension of init_Z is ", paste(dim(init_Z), collapse = " x "), ", but should be ", paste(dim(Z[,, 1]), collapse = " x "))) } } ols_results <- ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = 1) if (is.null(init_Pi)) { Pi[,, 1] <- cbind(ols_results$const, ols_results$Pi) } else { if (all(dim(Pi[,, 1]) == dim(init_Pi))) { Pi[,, 1] <- init_Pi } else { stop(paste0("The dimension of init_Pi is ", paste(dim(init_Pi), collapse = " x "), ", but should be ", paste(dim(Pi[,, 1]), collapse = " x "))) } } # Compute the maximum eigenvalue of the initial Pi if (is.null(init_Sigma)) { Sigma[,, 1] <- ols_results$S } else { if (all(dim(Sigma[,,1]) == dim(init_Sigma))) { Sigma[,, 1] <- init_Sigma } else { stop(paste0("The dimension of init_Sigma is ", paste(dim(init_Sigma), collapse = " x "), ", but should be ", paste(dim(Sigma[,,1]), collapse = " x "))) } } if (is.null(init_phi)) { phi[1] <- x$prior_phi[1] } else { phi[1] <- init_phi } if (is.null(init_sigma)) { sigma[1] <- sqrt(x$prior_sigma2[1]) } else { sigma[1] <- init_sigma } if (is.null(init_f)) { f[1,] <- 0.0 } else { f[1,] <- init_f } ################################################################ ### Compute terms which do not vary in the sampler Z_1 <- Z[1:n_pseudolags,, 1] # For the posterior of Pi inv_prior_Pi_Omega <- chol2inv(chol(prior_Pi_Omega)) Omega_Pi <- inv_prior_Pi_Omega %*% prior_Pi_mean mcmc_minn_csv(Y[-(1:n_lags),],Pi,Sigma,Z,Z_fcst,phi,sigma,f,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega, Omega_Pi,prior_Pi_mean,prior_S,Z_1,10,phi_invvar,phi_meaninvvar,prior_sigma2,prior_df, n_reps,n_burnin,n_q,T_b-n_lags,n_lags,n_vars,n_T_,n_fcst,n_thin,verbose) return_obj <- list(Pi = Pi, Sigma = Sigma, Z = Z, phi = phi, sigma = sigma, f = f, Z_fcst = NULL, aggregation = x$aggregation, n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, prior_S = prior_S, prior_nu = n_vars+2, post_nu = n_T + n_vars+2, d = d, Y = Y, n_T = n_T, n_T_ = n_T_, n_reps = n_reps, n_burnin = n_burnin, n_thin = n_thin, Lambda_ = Lambda_, init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_Z = Z[,, n_reps/n_thin], init_phi = phi[n_reps/n_thin], init_sigma = sigma[n_reps/n_thin], init_f = f[n_reps/n_thin,])) if (n_fcst > 0) { return_obj$Z_fcst <- Z_fcst } return(return_obj) } mcmc_sampler.mfbvar_ss_csv <- function(x, ...) { n_vars <- ncol(x$Y) if (!(!is.null(x$Y) && !is.null(x$d) && !is.null(x$prior_psi_mean) && !is.null(x$prior_psi_Omega) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { test_all <- sapply(x, is.null) test_sub <- test_all[c("Y", "d", "prior_psi_mean", "prior_psi_Omega", "n_lags", "n_burnin", "n_reps")] stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) } if (x$n_fcst > 0 && nrow(x$d_fcst) != x$n_fcst) { stop("d_fcst has ", nrow(x$d_fcst), " rows, but n_fcst is ", x$n_fcst, ".") } priors <- prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, n_lags = x$n_lags, prior_nu = n_vars + 2) prior_Pi_mean <- priors$prior_Pi_mean prior_Pi_Omega <- priors$prior_Pi_Omega prior_S <- priors$prior_S Y <- x$Y d <- x$d d_fcst <- x$d_fcst freq <- x$freq prior_psi_mean <- x$prior_psi_mean prior_psi_Omega <- x$prior_psi_Omega n_fcst <- x$n_fcst check_roots <- x$check_roots verbose <- x$verbose phi_invvar <- 1/x$prior_phi[2] phi_meaninvvar <- x$prior_phi[1] * phi_invvar prior_sigma2 <- x$prior_sigma2[1] prior_df <- x$prior_sigma2[2] add_args <- list(...) n_reps <- x$n_reps n_burnin <- x$n_burnin n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) init <- add_args$init init_Pi <- init$init_Pi init_Sigma <- init$init_Sigma init_psi <- init$init_psi init_Z <- init$init_Z init_phi <- init$init_phi init_sigma <- init$init_sigma init_f <- init$init_f # n_vars: number of variables # n_lags: number of lags # n_determ: number of deterministic variables # n_T: sample size (full sample) # n_T_: sample size (reduced sample) n_vars <- dim(Y)[2] n_lags <- prod(dim(as.matrix(prior_Pi_mean)))/n_vars^2 n_q <- sum(freq == "q") n_m <- sum(freq == "m") if (n_q == 0 || n_q == n_vars) { complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) Y <- Y[complete_quarters, ] d_fcst <- rbind(d[!complete_quarters, , drop = FALSE], d_fcst) d <- d[complete_quarters, , drop = FALSE] } y_in_p <- Y[-(1:n_lags), ] if (n_q < n_vars) { T_b <- min(apply(y_in_p[,1:n_m,drop=FALSE], 2, function(x) ifelse(any(is.na(x)), min(which(is.na(x))), Inf))-1, nrow(y_in_p)) } else { T_b <- nrow(y_in_p) } if (n_q > 0) { if (x$aggregation == "average") { Lambda_ <- build_Lambda(rep("average", n_q), 3) } else { Lambda_ <- build_Lambda(rep("triangular", n_q), 5)} } else { Lambda_ <- matrix(0, 1, 3) } n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) n_determ <- dim(d)[2] n_T <- dim(Y)[1]# - n_lags n_T_ <- n_T - n_pseudolags ################################################################ ### Preallocation # Pi and Sigma store their i-th draws in the third dimension, psi # is vectorized so it has its i-th draw stored in the i-th row # Pi: p * pk * n_reps, each [,,i] stores Pi' # Sigma: p * p * n_reps # psi: n_reps * p # Z: T * p * n_reps ### If forecasting (h is horizon): # Z_fcst: hk * p * n_reps # d_fcst_lags: hk * m ### If root checking: # roots: n_reps vector # num_tries: n_reps vector ### If smoothing of the state vector: # smoothed_Z: T * p * n_reps Pi <- array(NA, dim = c(n_vars, n_vars * n_lags, n_reps/n_thin)) Sigma <- array(NA, dim = c(n_vars, n_vars, n_reps/n_thin)) psi <- array(NA, dim = c(n_reps/n_thin, n_vars * n_determ)) Z <- array(NA, dim = c(n_T, n_vars, n_reps/n_thin)) phi <- rep(NA, n_reps/n_thin) sigma <- rep(NA, n_reps/n_thin) f <- matrix(NA, n_reps/n_thin, n_T_) Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) if (n_fcst > 0) { rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) Z_fcst[,,1] <- 0 } else { rownames(Z_fcst) <- (n_T-n_lags+1):n_T } d_fcst_lags <- as.matrix(rbind(d[(n_T-n_lags+1):n_T, , drop = FALSE], d_fcst)) d_fcst_lags <- d_fcst_lags[1:(n_lags+n_fcst), , drop = FALSE] roots <- vector("numeric", n_reps/n_thin) num_tries <- roots ################################################################ ### MCMC sampling initialization # If the initial values are not provided, the missing values in # Z are filled with the next observed value and Pi, Sigma and # psi are then computed using maximum likelihood # This allows the user to run the MCMC sampler for a burn-in # period, then use the final draw of that as initialization # for multiple chains if (is.null(init_Z)) { Z[,, 1] <- fill_na(Y) } else { if (all(dim(Z[,, 1]) == dim(init_Z))) { Z[,, 1] <- init_Z } else { stop(paste0("The dimension of init_Z is ", paste(dim(init_Z), collapse = " x "), ", but should be ", paste(dim(Z[,, 1]), collapse = " x "))) } } ols_results <- tryCatch(ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = n_determ), error = function(cond) NULL) if (is.null(ols_results)) { ols_results <- list() ols_results$Pi <- prior_Pi_mean ols_results$S <- prior_S ols_results$psi <- prior_psi_mean } if (is.null(init_Pi)) { Pi[,, 1] <- ols_results$Pi } else { if (all(dim(Pi[,, 1]) == dim(init_Pi))) { Pi[,, 1] <- init_Pi } else { stop(paste0("The dimension of init_Pi is ", paste(dim(init_Pi), collapse = " x "), ", but should be ", paste(dim(Pi[,, 1]), collapse = " x "))) } } # Compute the maximum eigenvalue of the initial Pi if (check_roots == TRUE) { Pi_comp <- build_companion(Pi = Pi[,, 1], n_vars = n_vars, n_lags = n_lags) roots[1] <- max_eig_cpp(Pi_comp) } if (is.null(init_Sigma)) { Sigma[,, 1] <- ols_results$S } else { if (all(dim(Sigma[,,1]) == dim(init_Sigma))) { Sigma[,, 1] <- init_Sigma } else { stop(paste0("The dimension of init_Sigma is ", paste(dim(init_Sigma), collapse = " x "), ", but should be ", paste(dim(Sigma[,,1]), collapse = " x "))) } } if (is.null(init_psi)) { if (roots[1] < 1) { psi[1, ] <- ols_results$psi } else { psi[1, ] <- prior_psi_mean } } else { if (length(psi[1, ]) == length(init_psi)) { psi[1,] <- init_psi } else { stop(paste0("The length of init_psi is ", paste(length(init_psi), collapse = " x "), ", but should be ", paste(length(psi[1,]), collapse = " x "))) } } if (is.null(init_phi)) { phi[1] <- x$prior_phi[1] } else { phi[1] <- init_phi } if (is.null(init_sigma)) { sigma[1] <- sqrt(x$prior_sigma2[1]) } else { sigma[1] <- init_sigma } if (is.null(init_f)) { f[1,] <- 0.0 } else { f[1,] <- init_f } ################################################################ ### Compute terms which do not vary in the sampler # Create D (does not vary in the sampler), and find roots of Pi # if requested D_mat <- build_DD(d = d, n_lags = n_lags) dt <- d[-(1:n_lags), , drop = FALSE] d1 <- d[1:n_lags, , drop = FALSE] # For the posterior of Pi inv_prior_Pi_Omega <- chol2inv(chol(prior_Pi_Omega)) Omega_Pi <- inv_prior_Pi_Omega %*% prior_Pi_mean # For the posterior of psi Z_1 <- Z[1:n_pseudolags,, 1] phi_mu <- matrix(0, 1, 1) lambda_mu <- matrix(0, 1, 1) omega <- matrix(diag(prior_psi_Omega), nrow = 1) c0 <- 0 c1 <- 0 s <- 0 mcmc_ssng_csv(Y[-(1:n_lags),],Pi,Sigma,psi,phi_mu,lambda_mu,omega,Z,Z_fcst,phi,sigma,f,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega,Omega_Pi,prior_Pi_mean, prior_S,D_mat,dt,d1,d_fcst_lags,prior_psi_mean,c0,c1,s,check_roots,Z_1, 10,phi_invvar,phi_meaninvvar,prior_sigma2,prior_df,n_reps,n_burnin,n_q,T_b,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin, verbose,FALSE) return_obj <- list(Pi = Pi, Sigma = Sigma, psi = psi, Z = Z, phi = phi, sigma = sigma, f = f, roots = NULL, num_tries = NULL, Z_fcst = NULL, aggregation = x$aggregation, n_determ = n_determ, n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, prior_S = prior_S, prior_nu = n_vars+2, post_nu = n_T + n_vars+2, d = d, Y = Y, n_T = n_T, n_T_ = n_T_, prior_psi_Omega = prior_psi_Omega, prior_psi_mean = prior_psi_mean, n_reps = n_reps, n_burnin = n_burnin, n_thin = n_thin, Lambda_ = Lambda_, init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_psi = psi[n_reps/n_thin, ], init_Z = Z[,, n_reps/n_thin], init_phi = phi[n_reps/n_thin], init_sigma = sigma[n_reps/n_thin], init_f = f[n_reps/n_thin,])) if (check_roots == TRUE) { return_obj$roots <- roots return_obj$num_tries <- num_tries } if (n_fcst > 0) { return_obj$Z_fcst <- Z_fcst } return(return_obj) } mcmc_sampler.mfbvar_ssng_csv <- function(x, ...) { n_vars <- ncol(x$Y) if (!(!is.null(x$Y) && !is.null(x$d) && !is.null(x$prior_psi_mean) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { test_all <- sapply(x, is.null) test_sub <- test_all[c("Y", "d", "prior_psi_mean", "n_lags", "n_burnin", "n_reps")] stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) } if (x$n_fcst > 0 && nrow(x$d_fcst) != x$n_fcst) { stop("d_fcst has ", nrow(x$d_fcst), " rows, but n_fcst is ", x$n_fcst, ".") } priors <- prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, n_lags = x$n_lags, prior_nu = n_vars + 2) prior_Pi_mean <- priors$prior_Pi_mean prior_Pi_Omega <- priors$prior_Pi_Omega prior_S <- priors$prior_S Y <- x$Y d <- x$d d_fcst <- x$d_fcst freq <- x$freq prior_psi_mean <- x$prior_psi_mean prior_psi_Omega <- x$prior_psi_Omega n_fcst <- x$n_fcst check_roots <- x$check_roots verbose <- x$verbose phi_invvar <- 1/x$prior_phi[2] phi_meaninvvar <- x$prior_phi[1] * phi_invvar prior_sigma2 <- x$prior_sigma2[1] prior_df <- x$prior_sigma2[2] add_args <- list(...) n_reps <- x$n_reps n_burnin <- x$n_burnin n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) init <- add_args$init init_Pi <- init$init_Pi init_Sigma <- init$init_Sigma init_psi <- init$init_psi init_Z <- init$init_Z init_phi <- init$init_phi init_sigma <- init$init_sigma init_f <- init$init_f init_omega <- init$init_omega init_phi_mu <- init$init_phi_mu init_lambda_mu <- init$init_lambda_mu # n_vars: number of variables # n_lags: number of lags # n_determ: number of deterministic variables # n_T: sample size (full sample) # n_T_: sample size (reduced sample) n_vars <- dim(Y)[2] n_lags <- prod(dim(as.matrix(prior_Pi_mean)))/n_vars^2 n_q <- sum(freq == "q") n_m <- sum(freq == "m") if (n_q == 0 || n_q == n_vars) { complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) Y <- Y[complete_quarters, ] d_fcst <- rbind(d[!complete_quarters, , drop = FALSE], d_fcst) d <- d[complete_quarters, , drop = FALSE] } y_in_p <- Y[-(1:n_lags), ] if (n_q < n_vars) { T_b <- min(apply(y_in_p[,1:n_m,drop=FALSE], 2, function(x) ifelse(any(is.na(x)), min(which(is.na(x))), Inf))-1, nrow(y_in_p)) } else { T_b <- nrow(y_in_p) } if (n_q > 0) { if (x$aggregation == "average") { Lambda_ <- build_Lambda(rep("average", n_q), 3) } else { Lambda_ <- build_Lambda(rep("triangular", n_q), 5)} } else { Lambda_ <- matrix(0, 1, 3) } n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) n_determ <- dim(d)[2] n_T <- dim(Y)[1]# - n_lags n_T_ <- n_T - n_pseudolags c0 <- ifelse(is.null(x$prior_ng), 0.01, x$prior_ng[1]) c1 <- ifelse(is.null(x$prior_ng), 0.01, x$prior_ng[2]) s <- ifelse(is.null(x[["s"]]), -10, x$s) ################################################################ ### Preallocation # Pi and Sigma store their i-th draws in the third dimension, psi # is vectorized so it has its i-th draw stored in the i-th row # Pi: p * pk * n_reps, each [,,i] stores Pi' # Sigma: p * p * n_reps # psi: n_reps * p # Z: T * p * n_reps ### If forecasting (h is horizon): # Z_fcst: hk * p * n_reps # d_fcst_lags: hk * m ### If root checking: # roots: n_reps vector # num_tries: n_reps vector ### If smoothing of the state vector: # smoothed_Z: T * p * n_reps Pi <- array(NA, dim = c(n_vars, n_vars * n_lags, n_reps/n_thin)) Sigma <- array(NA, dim = c(n_vars, n_vars, n_reps/n_thin)) psi <- array(NA, dim = c(n_reps/n_thin, n_vars * n_determ)) Z <- array(NA, dim = c(n_T, n_vars, n_reps/n_thin)) phi <- rep(NA, n_reps/n_thin) sigma <- rep(NA, n_reps/n_thin) f <- matrix(NA, n_reps/n_thin, n_T_) omega <- matrix(NA, nrow = n_reps/n_thin, ncol = n_vars * n_determ) phi_mu <- rep(NA, n_reps/n_thin) lambda_mu <- rep(NA, n_reps/n_thin) Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) if (n_fcst > 0) { rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) Z_fcst[,,1] <- 0 } else { rownames(Z_fcst) <- (n_T-n_lags+1):n_T } d_fcst_lags <- as.matrix(rbind(d[(n_T-n_lags+1):n_T, , drop = FALSE], d_fcst)) d_fcst_lags <- d_fcst_lags[1:(n_lags+n_fcst), , drop = FALSE] roots <- vector("numeric", n_reps/n_thin) num_tries <- roots ################################################################ ### MCMC sampling initialization # If the initial values are not provided, the missing values in # Z are filled with the next observed value and Pi, Sigma and # psi are then computed using maximum likelihood # This allows the user to run the MCMC sampler for a burn-in # period, then use the final draw of that as initialization # for multiple chains if (is.null(init_Z)) { Z[,, 1] <- fill_na(Y) } else { if (all(dim(Z[,, 1]) == dim(init_Z))) { Z[,, 1] <- init_Z } else { stop(paste0("The dimension of init_Z is ", paste(dim(init_Z), collapse = " x "), ", but should be ", paste(dim(Z[,, 1]), collapse = " x "))) } } ols_results <- tryCatch(ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = n_determ), error = function(cond) NULL) if (is.null(ols_results)) { ols_results <- list() ols_results$Pi <- prior_Pi_mean ols_results$S <- prior_S ols_results$psi <- prior_psi_mean } if (is.null(init_Pi)) { Pi[,, 1] <- ols_results$Pi } else { if (all(dim(Pi[,, 1]) == dim(init_Pi))) { Pi[,, 1] <- init_Pi } else { stop(paste0("The dimension of init_Pi is ", paste(dim(init_Pi), collapse = " x "), ", but should be ", paste(dim(Pi[,, 1]), collapse = " x "))) } } # Compute the maximum eigenvalue of the initial Pi if (check_roots == TRUE) { Pi_comp <- build_companion(Pi = Pi[,, 1], n_vars = n_vars, n_lags = n_lags) roots[1] <- max_eig_cpp(Pi_comp) } if (is.null(init_Sigma)) { Sigma[,, 1] <- ols_results$S } else { if (all(dim(Sigma[,,1]) == dim(init_Sigma))) { Sigma[,, 1] <- init_Sigma } else { stop(paste0("The dimension of init_Sigma is ", paste(dim(init_Sigma), collapse = " x "), ", but should be ", paste(dim(Sigma[,,1]), collapse = " x "))) } } if (is.null(init_psi)) { if (roots[1] < 1) { psi[1, ] <- ols_results$psi } else { psi[1, ] <- prior_psi_mean } } else { if (length(psi[1, ]) == length(init_psi)) { psi[1,] <- init_psi } else { stop(paste0("The length of init_psi is ", paste(length(init_psi), collapse = " x "), ", but should be ", paste(length(psi[1,]), collapse = " x "))) } } if (is.null(init_phi)) { phi[1] <- x$prior_phi[1] } else { phi[1] <- init_phi } if (is.null(init_sigma)) { sigma[1] <- sqrt(x$prior_sigma2[1]) } else { sigma[1] <- init_sigma } if (is.null(init_f)) { f[1,] <- 0.0 } else { f[1,] <- init_f } if (is.null(init_omega)) { if (is.null(prior_psi_Omega)) { omega[1, ] <- diag(prior_psi_Omega) } else { omega[1, ] <- rep(0.1, n_determ*n_vars) } } else { omega[1, ] <- init_omega } if (is.null(init_phi_mu)) { phi_mu[1] <- 1 } else { phi_mu[1] <- init_phi_mu } if (is.null(init_lambda_mu)) { lambda_mu[1] <- 1 } else { lambda_mu[1] <- init_lambda_mu } ################################################################ ### Compute terms which do not vary in the sampler # Create D (does not vary in the sampler), and find roots of Pi # if requested D_mat <- build_DD(d = d, n_lags = n_lags) dt <- d[-(1:n_lags), , drop = FALSE] d1 <- d[1:n_lags, , drop = FALSE] # For the posterior of Pi inv_prior_Pi_Omega <- chol2inv(chol(prior_Pi_Omega)) Omega_Pi <- inv_prior_Pi_Omega %*% prior_Pi_mean Z_1 <- Z[1:n_pseudolags,, 1] mcmc_ssng_csv(Y[-(1:n_lags),],Pi,Sigma,psi,phi_mu,lambda_mu,omega,Z,Z_fcst,phi,sigma,f,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega,Omega_Pi,prior_Pi_mean, prior_S,D_mat,dt,d1,d_fcst_lags,prior_psi_mean,c0,c1,s,check_roots,Z_1, 10,phi_invvar,phi_meaninvvar,prior_sigma2,prior_df,n_reps,n_burnin,n_q,T_b,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin,verbose,TRUE) return_obj <- list(Pi = Pi, Sigma = Sigma, psi = psi, Z = Z, phi_mu = phi_mu, lambda_mu = lambda_mu, omega = omega, phi = phi, sigma = sigma, f = f, Z_fcst = NULL, aggregation = x$aggregation, n_determ = n_determ, n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, prior_S = prior_S, prior_nu = n_vars+2, post_nu = n_T + n_vars+2, d = d, Y = Y, n_T = n_T, n_T_ = n_T_, prior_psi_Omega = prior_psi_Omega, prior_psi_mean = prior_psi_mean, n_reps = n_reps, Lambda_ = Lambda_, init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_psi = psi[n_reps/n_thin, ], init_Z = Z[,, n_reps/n_thin], init_phi = phi[n_reps/n_thin], init_sigma = sigma[n_reps/n_thin], init_f = f[n_reps/n_thin,], init_omega = omega[n_reps/n_thin, ], init_lambda_mu = lambda_mu[n_reps/n_thin], init_phi_mu = phi_mu[n_reps/n_thin])) if (check_roots == TRUE) { return_obj$roots <- roots return_obj$num_tries <- num_tries } if (n_fcst > 0) { return_obj$Z_fcst <- Z_fcst } return(return_obj) }
/R/mcmc_sampler_csv.R
no_license
mmkuang/mfbvar
R
false
false
25,536
r
mcmc_sampler.mfbvar_minn_csv <- function(x, ...){ n_vars <- ncol(x$Y) if (!(!is.null(x$Y) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { test_all <- sapply(x, is.null) test_sub <- test_all[c("Y", "n_lags", "n_burnin", "n_reps")] stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) } prior_nu <- n_vars + 2 priors <- prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, n_lags = x$n_lags, prior_nu = prior_nu) prior_Pi_mean <- priors$prior_Pi_mean prior_Pi_Omega <- priors$prior_Pi_Omega prior_S <- priors$prior_S Y <- x$Y freq <- x$freq n_fcst <- x$n_fcst verbose <- x$verbose n_lags <- x$n_lags lambda4 <- x$lambda4 # Add terms for constant prior_Pi_Omega <- diag(c(x$lambda1^2*lambda4^2, diag(prior_Pi_Omega))) prior_Pi_mean <- rbind(0, prior_Pi_mean) phi_invvar <- 1/x$prior_phi[2] phi_meaninvvar <- x$prior_phi[1] * phi_invvar prior_sigma2 <- x$prior_sigma2[1] prior_df <- x$prior_sigma2[2] add_args <- list(...) n_reps <- x$n_reps n_burnin <- x$n_burnin n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) init <- add_args$init init_Pi <- init$init_Pi init_Sigma <- init$init_Sigma init_Z <- init$init_Z init_phi <- init$init_phi init_sigma <- init$init_sigma init_f <- init$init_f # n_vars: number of variables # n_lags: number of lags # n_determ: number of deterministic variables # n_T: sample size (full sample) # n_T_: sample size (reduced sample) n_q <- sum(freq == "q") if (n_q < n_vars) { T_b <- max(which(!apply(apply(Y[, freq == "m", drop = FALSE], 2, is.na), 1, any))) } else { T_b <- nrow(Y) } if (n_q == 0 || n_q == n_vars) { complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) Y <- Y[complete_quarters, ] } if (n_q > 0) { if (x$aggregation == "average") { Lambda_ <- build_Lambda(rep("average", n_q), 3) } else { Lambda_ <- build_Lambda(rep("triangular", n_q), 5)} } else { Lambda_ <- matrix(0, 1, 3) } n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) n_T <- dim(Y)[1]# - n_lags n_T_ <- n_T - n_pseudolags d <- matrix(1, nrow = nrow(Y), ncol = 1) post_nu <- n_T_ + prior_nu ################################################################ ### Preallocation # Pi and Sigma store their i-th draws in the third dimension, psi # is vectorized so it has its i-th draw stored in the i-th row # Pi: p * pk * n_reps, each [,,i] stores Pi' # Sigma: p * p * n_reps # psi: n_reps * p # Z: T * p * n_reps ### If forecasting (h is horizon): # Z_fcst: hk * p * n_reps # d_fcst_lags: hk * m ### If root checking: # roots: n_reps vector # num_tries: n_reps vector ### If smoothing of the state vector: # smoothed_Z: T * p * n_reps Pi <- array(NA, dim = c(n_vars, n_vars * n_lags + 1, n_reps/n_thin)) Sigma <- array(NA, dim = c(n_vars, n_vars, n_reps/n_thin)) Z <- array(NA, dim = c(n_T, n_vars, n_reps/n_thin)) phi <- rep(NA, n_reps/n_thin) sigma <- rep(NA, n_reps/n_thin) f <- matrix(NA, n_reps, n_T_) Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) if (n_fcst > 0) { rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) Z_fcst[,,1] <- 0 } else { rownames(Z_fcst) <- (n_T-n_lags+1):n_T } ################################################################ ### MCMC sampling initialization # If the initial values are not provided, the missing values in # Z are filled with the next observed value and Pi, Sigma and # psi are then computed using maximum likelihood # This allows the user to run the MCMC sampler for a burn-in # period, then use the final draw of that as initialization # for multiple chains if (is.null(init_Z)) { Z[,, 1] <- fill_na(Y) } else { if (all(dim(Z[,, 1]) == dim(init_Z))) { Z[,, 1] <- init_Z } else { stop(paste0("The dimension of init_Z is ", paste(dim(init_Z), collapse = " x "), ", but should be ", paste(dim(Z[,, 1]), collapse = " x "))) } } ols_results <- ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = 1) if (is.null(init_Pi)) { Pi[,, 1] <- cbind(ols_results$const, ols_results$Pi) } else { if (all(dim(Pi[,, 1]) == dim(init_Pi))) { Pi[,, 1] <- init_Pi } else { stop(paste0("The dimension of init_Pi is ", paste(dim(init_Pi), collapse = " x "), ", but should be ", paste(dim(Pi[,, 1]), collapse = " x "))) } } # Compute the maximum eigenvalue of the initial Pi if (is.null(init_Sigma)) { Sigma[,, 1] <- ols_results$S } else { if (all(dim(Sigma[,,1]) == dim(init_Sigma))) { Sigma[,, 1] <- init_Sigma } else { stop(paste0("The dimension of init_Sigma is ", paste(dim(init_Sigma), collapse = " x "), ", but should be ", paste(dim(Sigma[,,1]), collapse = " x "))) } } if (is.null(init_phi)) { phi[1] <- x$prior_phi[1] } else { phi[1] <- init_phi } if (is.null(init_sigma)) { sigma[1] <- sqrt(x$prior_sigma2[1]) } else { sigma[1] <- init_sigma } if (is.null(init_f)) { f[1,] <- 0.0 } else { f[1,] <- init_f } ################################################################ ### Compute terms which do not vary in the sampler Z_1 <- Z[1:n_pseudolags,, 1] # For the posterior of Pi inv_prior_Pi_Omega <- chol2inv(chol(prior_Pi_Omega)) Omega_Pi <- inv_prior_Pi_Omega %*% prior_Pi_mean mcmc_minn_csv(Y[-(1:n_lags),],Pi,Sigma,Z,Z_fcst,phi,sigma,f,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega, Omega_Pi,prior_Pi_mean,prior_S,Z_1,10,phi_invvar,phi_meaninvvar,prior_sigma2,prior_df, n_reps,n_burnin,n_q,T_b-n_lags,n_lags,n_vars,n_T_,n_fcst,n_thin,verbose) return_obj <- list(Pi = Pi, Sigma = Sigma, Z = Z, phi = phi, sigma = sigma, f = f, Z_fcst = NULL, aggregation = x$aggregation, n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, prior_S = prior_S, prior_nu = n_vars+2, post_nu = n_T + n_vars+2, d = d, Y = Y, n_T = n_T, n_T_ = n_T_, n_reps = n_reps, n_burnin = n_burnin, n_thin = n_thin, Lambda_ = Lambda_, init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_Z = Z[,, n_reps/n_thin], init_phi = phi[n_reps/n_thin], init_sigma = sigma[n_reps/n_thin], init_f = f[n_reps/n_thin,])) if (n_fcst > 0) { return_obj$Z_fcst <- Z_fcst } return(return_obj) } mcmc_sampler.mfbvar_ss_csv <- function(x, ...) { n_vars <- ncol(x$Y) if (!(!is.null(x$Y) && !is.null(x$d) && !is.null(x$prior_psi_mean) && !is.null(x$prior_psi_Omega) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { test_all <- sapply(x, is.null) test_sub <- test_all[c("Y", "d", "prior_psi_mean", "prior_psi_Omega", "n_lags", "n_burnin", "n_reps")] stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) } if (x$n_fcst > 0 && nrow(x$d_fcst) != x$n_fcst) { stop("d_fcst has ", nrow(x$d_fcst), " rows, but n_fcst is ", x$n_fcst, ".") } priors <- prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, n_lags = x$n_lags, prior_nu = n_vars + 2) prior_Pi_mean <- priors$prior_Pi_mean prior_Pi_Omega <- priors$prior_Pi_Omega prior_S <- priors$prior_S Y <- x$Y d <- x$d d_fcst <- x$d_fcst freq <- x$freq prior_psi_mean <- x$prior_psi_mean prior_psi_Omega <- x$prior_psi_Omega n_fcst <- x$n_fcst check_roots <- x$check_roots verbose <- x$verbose phi_invvar <- 1/x$prior_phi[2] phi_meaninvvar <- x$prior_phi[1] * phi_invvar prior_sigma2 <- x$prior_sigma2[1] prior_df <- x$prior_sigma2[2] add_args <- list(...) n_reps <- x$n_reps n_burnin <- x$n_burnin n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) init <- add_args$init init_Pi <- init$init_Pi init_Sigma <- init$init_Sigma init_psi <- init$init_psi init_Z <- init$init_Z init_phi <- init$init_phi init_sigma <- init$init_sigma init_f <- init$init_f # n_vars: number of variables # n_lags: number of lags # n_determ: number of deterministic variables # n_T: sample size (full sample) # n_T_: sample size (reduced sample) n_vars <- dim(Y)[2] n_lags <- prod(dim(as.matrix(prior_Pi_mean)))/n_vars^2 n_q <- sum(freq == "q") n_m <- sum(freq == "m") if (n_q == 0 || n_q == n_vars) { complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) Y <- Y[complete_quarters, ] d_fcst <- rbind(d[!complete_quarters, , drop = FALSE], d_fcst) d <- d[complete_quarters, , drop = FALSE] } y_in_p <- Y[-(1:n_lags), ] if (n_q < n_vars) { T_b <- min(apply(y_in_p[,1:n_m,drop=FALSE], 2, function(x) ifelse(any(is.na(x)), min(which(is.na(x))), Inf))-1, nrow(y_in_p)) } else { T_b <- nrow(y_in_p) } if (n_q > 0) { if (x$aggregation == "average") { Lambda_ <- build_Lambda(rep("average", n_q), 3) } else { Lambda_ <- build_Lambda(rep("triangular", n_q), 5)} } else { Lambda_ <- matrix(0, 1, 3) } n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) n_determ <- dim(d)[2] n_T <- dim(Y)[1]# - n_lags n_T_ <- n_T - n_pseudolags ################################################################ ### Preallocation # Pi and Sigma store their i-th draws in the third dimension, psi # is vectorized so it has its i-th draw stored in the i-th row # Pi: p * pk * n_reps, each [,,i] stores Pi' # Sigma: p * p * n_reps # psi: n_reps * p # Z: T * p * n_reps ### If forecasting (h is horizon): # Z_fcst: hk * p * n_reps # d_fcst_lags: hk * m ### If root checking: # roots: n_reps vector # num_tries: n_reps vector ### If smoothing of the state vector: # smoothed_Z: T * p * n_reps Pi <- array(NA, dim = c(n_vars, n_vars * n_lags, n_reps/n_thin)) Sigma <- array(NA, dim = c(n_vars, n_vars, n_reps/n_thin)) psi <- array(NA, dim = c(n_reps/n_thin, n_vars * n_determ)) Z <- array(NA, dim = c(n_T, n_vars, n_reps/n_thin)) phi <- rep(NA, n_reps/n_thin) sigma <- rep(NA, n_reps/n_thin) f <- matrix(NA, n_reps/n_thin, n_T_) Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) if (n_fcst > 0) { rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) Z_fcst[,,1] <- 0 } else { rownames(Z_fcst) <- (n_T-n_lags+1):n_T } d_fcst_lags <- as.matrix(rbind(d[(n_T-n_lags+1):n_T, , drop = FALSE], d_fcst)) d_fcst_lags <- d_fcst_lags[1:(n_lags+n_fcst), , drop = FALSE] roots <- vector("numeric", n_reps/n_thin) num_tries <- roots ################################################################ ### MCMC sampling initialization # If the initial values are not provided, the missing values in # Z are filled with the next observed value and Pi, Sigma and # psi are then computed using maximum likelihood # This allows the user to run the MCMC sampler for a burn-in # period, then use the final draw of that as initialization # for multiple chains if (is.null(init_Z)) { Z[,, 1] <- fill_na(Y) } else { if (all(dim(Z[,, 1]) == dim(init_Z))) { Z[,, 1] <- init_Z } else { stop(paste0("The dimension of init_Z is ", paste(dim(init_Z), collapse = " x "), ", but should be ", paste(dim(Z[,, 1]), collapse = " x "))) } } ols_results <- tryCatch(ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = n_determ), error = function(cond) NULL) if (is.null(ols_results)) { ols_results <- list() ols_results$Pi <- prior_Pi_mean ols_results$S <- prior_S ols_results$psi <- prior_psi_mean } if (is.null(init_Pi)) { Pi[,, 1] <- ols_results$Pi } else { if (all(dim(Pi[,, 1]) == dim(init_Pi))) { Pi[,, 1] <- init_Pi } else { stop(paste0("The dimension of init_Pi is ", paste(dim(init_Pi), collapse = " x "), ", but should be ", paste(dim(Pi[,, 1]), collapse = " x "))) } } # Compute the maximum eigenvalue of the initial Pi if (check_roots == TRUE) { Pi_comp <- build_companion(Pi = Pi[,, 1], n_vars = n_vars, n_lags = n_lags) roots[1] <- max_eig_cpp(Pi_comp) } if (is.null(init_Sigma)) { Sigma[,, 1] <- ols_results$S } else { if (all(dim(Sigma[,,1]) == dim(init_Sigma))) { Sigma[,, 1] <- init_Sigma } else { stop(paste0("The dimension of init_Sigma is ", paste(dim(init_Sigma), collapse = " x "), ", but should be ", paste(dim(Sigma[,,1]), collapse = " x "))) } } if (is.null(init_psi)) { if (roots[1] < 1) { psi[1, ] <- ols_results$psi } else { psi[1, ] <- prior_psi_mean } } else { if (length(psi[1, ]) == length(init_psi)) { psi[1,] <- init_psi } else { stop(paste0("The length of init_psi is ", paste(length(init_psi), collapse = " x "), ", but should be ", paste(length(psi[1,]), collapse = " x "))) } } if (is.null(init_phi)) { phi[1] <- x$prior_phi[1] } else { phi[1] <- init_phi } if (is.null(init_sigma)) { sigma[1] <- sqrt(x$prior_sigma2[1]) } else { sigma[1] <- init_sigma } if (is.null(init_f)) { f[1,] <- 0.0 } else { f[1,] <- init_f } ################################################################ ### Compute terms which do not vary in the sampler # Create D (does not vary in the sampler), and find roots of Pi # if requested D_mat <- build_DD(d = d, n_lags = n_lags) dt <- d[-(1:n_lags), , drop = FALSE] d1 <- d[1:n_lags, , drop = FALSE] # For the posterior of Pi inv_prior_Pi_Omega <- chol2inv(chol(prior_Pi_Omega)) Omega_Pi <- inv_prior_Pi_Omega %*% prior_Pi_mean # For the posterior of psi Z_1 <- Z[1:n_pseudolags,, 1] phi_mu <- matrix(0, 1, 1) lambda_mu <- matrix(0, 1, 1) omega <- matrix(diag(prior_psi_Omega), nrow = 1) c0 <- 0 c1 <- 0 s <- 0 mcmc_ssng_csv(Y[-(1:n_lags),],Pi,Sigma,psi,phi_mu,lambda_mu,omega,Z,Z_fcst,phi,sigma,f,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega,Omega_Pi,prior_Pi_mean, prior_S,D_mat,dt,d1,d_fcst_lags,prior_psi_mean,c0,c1,s,check_roots,Z_1, 10,phi_invvar,phi_meaninvvar,prior_sigma2,prior_df,n_reps,n_burnin,n_q,T_b,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin, verbose,FALSE) return_obj <- list(Pi = Pi, Sigma = Sigma, psi = psi, Z = Z, phi = phi, sigma = sigma, f = f, roots = NULL, num_tries = NULL, Z_fcst = NULL, aggregation = x$aggregation, n_determ = n_determ, n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, prior_S = prior_S, prior_nu = n_vars+2, post_nu = n_T + n_vars+2, d = d, Y = Y, n_T = n_T, n_T_ = n_T_, prior_psi_Omega = prior_psi_Omega, prior_psi_mean = prior_psi_mean, n_reps = n_reps, n_burnin = n_burnin, n_thin = n_thin, Lambda_ = Lambda_, init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_psi = psi[n_reps/n_thin, ], init_Z = Z[,, n_reps/n_thin], init_phi = phi[n_reps/n_thin], init_sigma = sigma[n_reps/n_thin], init_f = f[n_reps/n_thin,])) if (check_roots == TRUE) { return_obj$roots <- roots return_obj$num_tries <- num_tries } if (n_fcst > 0) { return_obj$Z_fcst <- Z_fcst } return(return_obj) } mcmc_sampler.mfbvar_ssng_csv <- function(x, ...) { n_vars <- ncol(x$Y) if (!(!is.null(x$Y) && !is.null(x$d) && !is.null(x$prior_psi_mean) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { test_all <- sapply(x, is.null) test_sub <- test_all[c("Y", "d", "prior_psi_mean", "n_lags", "n_burnin", "n_reps")] stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) } if (x$n_fcst > 0 && nrow(x$d_fcst) != x$n_fcst) { stop("d_fcst has ", nrow(x$d_fcst), " rows, but n_fcst is ", x$n_fcst, ".") } priors <- prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, n_lags = x$n_lags, prior_nu = n_vars + 2) prior_Pi_mean <- priors$prior_Pi_mean prior_Pi_Omega <- priors$prior_Pi_Omega prior_S <- priors$prior_S Y <- x$Y d <- x$d d_fcst <- x$d_fcst freq <- x$freq prior_psi_mean <- x$prior_psi_mean prior_psi_Omega <- x$prior_psi_Omega n_fcst <- x$n_fcst check_roots <- x$check_roots verbose <- x$verbose phi_invvar <- 1/x$prior_phi[2] phi_meaninvvar <- x$prior_phi[1] * phi_invvar prior_sigma2 <- x$prior_sigma2[1] prior_df <- x$prior_sigma2[2] add_args <- list(...) n_reps <- x$n_reps n_burnin <- x$n_burnin n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) init <- add_args$init init_Pi <- init$init_Pi init_Sigma <- init$init_Sigma init_psi <- init$init_psi init_Z <- init$init_Z init_phi <- init$init_phi init_sigma <- init$init_sigma init_f <- init$init_f init_omega <- init$init_omega init_phi_mu <- init$init_phi_mu init_lambda_mu <- init$init_lambda_mu # n_vars: number of variables # n_lags: number of lags # n_determ: number of deterministic variables # n_T: sample size (full sample) # n_T_: sample size (reduced sample) n_vars <- dim(Y)[2] n_lags <- prod(dim(as.matrix(prior_Pi_mean)))/n_vars^2 n_q <- sum(freq == "q") n_m <- sum(freq == "m") if (n_q == 0 || n_q == n_vars) { complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) Y <- Y[complete_quarters, ] d_fcst <- rbind(d[!complete_quarters, , drop = FALSE], d_fcst) d <- d[complete_quarters, , drop = FALSE] } y_in_p <- Y[-(1:n_lags), ] if (n_q < n_vars) { T_b <- min(apply(y_in_p[,1:n_m,drop=FALSE], 2, function(x) ifelse(any(is.na(x)), min(which(is.na(x))), Inf))-1, nrow(y_in_p)) } else { T_b <- nrow(y_in_p) } if (n_q > 0) { if (x$aggregation == "average") { Lambda_ <- build_Lambda(rep("average", n_q), 3) } else { Lambda_ <- build_Lambda(rep("triangular", n_q), 5)} } else { Lambda_ <- matrix(0, 1, 3) } n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) n_determ <- dim(d)[2] n_T <- dim(Y)[1]# - n_lags n_T_ <- n_T - n_pseudolags c0 <- ifelse(is.null(x$prior_ng), 0.01, x$prior_ng[1]) c1 <- ifelse(is.null(x$prior_ng), 0.01, x$prior_ng[2]) s <- ifelse(is.null(x[["s"]]), -10, x$s) ################################################################ ### Preallocation # Pi and Sigma store their i-th draws in the third dimension, psi # is vectorized so it has its i-th draw stored in the i-th row # Pi: p * pk * n_reps, each [,,i] stores Pi' # Sigma: p * p * n_reps # psi: n_reps * p # Z: T * p * n_reps ### If forecasting (h is horizon): # Z_fcst: hk * p * n_reps # d_fcst_lags: hk * m ### If root checking: # roots: n_reps vector # num_tries: n_reps vector ### If smoothing of the state vector: # smoothed_Z: T * p * n_reps Pi <- array(NA, dim = c(n_vars, n_vars * n_lags, n_reps/n_thin)) Sigma <- array(NA, dim = c(n_vars, n_vars, n_reps/n_thin)) psi <- array(NA, dim = c(n_reps/n_thin, n_vars * n_determ)) Z <- array(NA, dim = c(n_T, n_vars, n_reps/n_thin)) phi <- rep(NA, n_reps/n_thin) sigma <- rep(NA, n_reps/n_thin) f <- matrix(NA, n_reps/n_thin, n_T_) omega <- matrix(NA, nrow = n_reps/n_thin, ncol = n_vars * n_determ) phi_mu <- rep(NA, n_reps/n_thin) lambda_mu <- rep(NA, n_reps/n_thin) Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) if (n_fcst > 0) { rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) Z_fcst[,,1] <- 0 } else { rownames(Z_fcst) <- (n_T-n_lags+1):n_T } d_fcst_lags <- as.matrix(rbind(d[(n_T-n_lags+1):n_T, , drop = FALSE], d_fcst)) d_fcst_lags <- d_fcst_lags[1:(n_lags+n_fcst), , drop = FALSE] roots <- vector("numeric", n_reps/n_thin) num_tries <- roots ################################################################ ### MCMC sampling initialization # If the initial values are not provided, the missing values in # Z are filled with the next observed value and Pi, Sigma and # psi are then computed using maximum likelihood # This allows the user to run the MCMC sampler for a burn-in # period, then use the final draw of that as initialization # for multiple chains if (is.null(init_Z)) { Z[,, 1] <- fill_na(Y) } else { if (all(dim(Z[,, 1]) == dim(init_Z))) { Z[,, 1] <- init_Z } else { stop(paste0("The dimension of init_Z is ", paste(dim(init_Z), collapse = " x "), ", but should be ", paste(dim(Z[,, 1]), collapse = " x "))) } } ols_results <- tryCatch(ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = n_determ), error = function(cond) NULL) if (is.null(ols_results)) { ols_results <- list() ols_results$Pi <- prior_Pi_mean ols_results$S <- prior_S ols_results$psi <- prior_psi_mean } if (is.null(init_Pi)) { Pi[,, 1] <- ols_results$Pi } else { if (all(dim(Pi[,, 1]) == dim(init_Pi))) { Pi[,, 1] <- init_Pi } else { stop(paste0("The dimension of init_Pi is ", paste(dim(init_Pi), collapse = " x "), ", but should be ", paste(dim(Pi[,, 1]), collapse = " x "))) } } # Compute the maximum eigenvalue of the initial Pi if (check_roots == TRUE) { Pi_comp <- build_companion(Pi = Pi[,, 1], n_vars = n_vars, n_lags = n_lags) roots[1] <- max_eig_cpp(Pi_comp) } if (is.null(init_Sigma)) { Sigma[,, 1] <- ols_results$S } else { if (all(dim(Sigma[,,1]) == dim(init_Sigma))) { Sigma[,, 1] <- init_Sigma } else { stop(paste0("The dimension of init_Sigma is ", paste(dim(init_Sigma), collapse = " x "), ", but should be ", paste(dim(Sigma[,,1]), collapse = " x "))) } } if (is.null(init_psi)) { if (roots[1] < 1) { psi[1, ] <- ols_results$psi } else { psi[1, ] <- prior_psi_mean } } else { if (length(psi[1, ]) == length(init_psi)) { psi[1,] <- init_psi } else { stop(paste0("The length of init_psi is ", paste(length(init_psi), collapse = " x "), ", but should be ", paste(length(psi[1,]), collapse = " x "))) } } if (is.null(init_phi)) { phi[1] <- x$prior_phi[1] } else { phi[1] <- init_phi } if (is.null(init_sigma)) { sigma[1] <- sqrt(x$prior_sigma2[1]) } else { sigma[1] <- init_sigma } if (is.null(init_f)) { f[1,] <- 0.0 } else { f[1,] <- init_f } if (is.null(init_omega)) { if (is.null(prior_psi_Omega)) { omega[1, ] <- diag(prior_psi_Omega) } else { omega[1, ] <- rep(0.1, n_determ*n_vars) } } else { omega[1, ] <- init_omega } if (is.null(init_phi_mu)) { phi_mu[1] <- 1 } else { phi_mu[1] <- init_phi_mu } if (is.null(init_lambda_mu)) { lambda_mu[1] <- 1 } else { lambda_mu[1] <- init_lambda_mu } ################################################################ ### Compute terms which do not vary in the sampler # Create D (does not vary in the sampler), and find roots of Pi # if requested D_mat <- build_DD(d = d, n_lags = n_lags) dt <- d[-(1:n_lags), , drop = FALSE] d1 <- d[1:n_lags, , drop = FALSE] # For the posterior of Pi inv_prior_Pi_Omega <- chol2inv(chol(prior_Pi_Omega)) Omega_Pi <- inv_prior_Pi_Omega %*% prior_Pi_mean Z_1 <- Z[1:n_pseudolags,, 1] mcmc_ssng_csv(Y[-(1:n_lags),],Pi,Sigma,psi,phi_mu,lambda_mu,omega,Z,Z_fcst,phi,sigma,f,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega,Omega_Pi,prior_Pi_mean, prior_S,D_mat,dt,d1,d_fcst_lags,prior_psi_mean,c0,c1,s,check_roots,Z_1, 10,phi_invvar,phi_meaninvvar,prior_sigma2,prior_df,n_reps,n_burnin,n_q,T_b,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin,verbose,TRUE) return_obj <- list(Pi = Pi, Sigma = Sigma, psi = psi, Z = Z, phi_mu = phi_mu, lambda_mu = lambda_mu, omega = omega, phi = phi, sigma = sigma, f = f, Z_fcst = NULL, aggregation = x$aggregation, n_determ = n_determ, n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, prior_S = prior_S, prior_nu = n_vars+2, post_nu = n_T + n_vars+2, d = d, Y = Y, n_T = n_T, n_T_ = n_T_, prior_psi_Omega = prior_psi_Omega, prior_psi_mean = prior_psi_mean, n_reps = n_reps, Lambda_ = Lambda_, init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_psi = psi[n_reps/n_thin, ], init_Z = Z[,, n_reps/n_thin], init_phi = phi[n_reps/n_thin], init_sigma = sigma[n_reps/n_thin], init_f = f[n_reps/n_thin,], init_omega = omega[n_reps/n_thin, ], init_lambda_mu = lambda_mu[n_reps/n_thin], init_phi_mu = phi_mu[n_reps/n_thin])) if (check_roots == TRUE) { return_obj$roots <- roots return_obj$num_tries <- num_tries } if (n_fcst > 0) { return_obj$Z_fcst <- Z_fcst } return(return_obj) }
# Clear workspace rm(list = ls()) # Setup ################################################################################ # Packages library(sf) library(raster) library(tidyverse) library(lubridate) library(grid) library(gridExtra) # Directories preddir <- "output/model_preds" plotdir <- "figures" # Study species study_species <- c("Dungeness crab", "Rock crab", "Spiny lobster", "Razor clam") # Build data ################################################################################ # Calculate mean calc_avg_by_layer <- function(rbrick){ vals <- sapply(1:nlayers(rbrick), function(x) mean(getValues(rbrick[[x]]), na.rm=T)) return(vals) } # Calculate SD calc_sd_by_layer <- function(rbrick){ vals <- sapply(1:nlayers(rbrick), function(x) sd(getValues(rbrick[[x]]), na.rm=T)) return(vals) } # If build build <- T if(build){ # Models models <- c("dungeness_crab_model_rf_cda.Rds", "rock_crab_model_brt_cda.Rds", "spiny_lobster_model_rf_cda.Rds", "razor_clam_model_rf_cda.Rds") # Loop through models x <- models[2] data_orig <- purrr::map_df(models, function(x) { # Model model_do <- gsub(".Rds", "", x) infile <- paste0(model_do, "_predictions_range_mask.grd") # Read model sbrick <- brick(file.path(preddir, infile)) # Calculate averages pcontam_avgs <- calc_avg_by_layer(sbrick) pcontam_sds <- calc_sd_by_layer(sbrick) # Build data frame spp_do <- gsub("_model_rf_cda|_model_brt_cda", "", model_do) %>% gsub("_", " ", .) %>% stringr::str_to_sentence() dates <- names(sbrick) %>% gsub("X", "", .) %>% ymd() df <- tibble(species=spp_do, date=dates, pcontam_avg=pcontam_avgs, pcontam_sd=pcontam_sds) }) # Format data data <- data_orig %>% mutate(species=factor(species, study_species)) # Export saveRDS(data, file=file.path(preddir, "species_range_pcontam_averages.Rds")) }else{ # Read built data data <- readRDS(file.path(preddir, "species_range_pcontam_averages.Rds")) } # Build season lines ################################################################################ # Function to build season key # species <- "Dungeness crab"; fishery_type <- "Commercial"; region <- "Northern"; open_date <- "12-01"; close_date <- "07-15" build_season_key <- function(species, fishery_type, region, open_date, close_date){ dates_open <- paste(2013:2019, open_date, sep="-") %>% ymd() dates_close <- paste0(2014:2020, close_date, sep="-") %>% ymd() season_key <- tibble(species=species, fishery_type=fishery_type, region=region, open_date=dates_open, close_date=dates_close) %>% mutate(line_group=1:n()) %>% select(species:region, line_group, everything()) %>% gather(key="endpoint", value="date", 5:ncol(.)) %>% arrange(species, fishery_type, region, line_group) return(season_key) } # Dungeness crab season keys dcrab_comm_n_key <- build_season_key(species="Dungeness crab", fishery_type="Commercial", region="Northern", open_date="12-01", close_date="07-15") dcrab_comm_c_key <- build_season_key(species="Dungeness crab", fishery_type="Commercial", region="Central", open_date="11-15", close_date="06-30") dcrab_rec_n_key <- build_season_key(species="Dungeness crab", fishery_type="Recreational", region="Northern", open_date="11-01", close_date="07-30") dcrab_rec_c_key <- build_season_key(species="Dungeness crab", fishery_type="Recreational", region="Central", open_date="11-01", close_date="06-30") # Lobster season keys lobster_comm_key <- build_season_key(species="Spiny lobster", fishery_type="Commercial", region="All", open_date="10-01", close_date="03-15") lobster_rec_key <- build_season_key(species="Spiny lobster", fishery_type="Recreational", region="All", open_date="10-01", close_date="03-15") # Season key season_key <- bind_rows(dcrab_comm_n_key, dcrab_comm_c_key, dcrab_rec_n_key, dcrab_rec_c_key, lobster_comm_key, lobster_rec_key) %>% # Add latitudes to plot at mutate(lat_plot=31.5, lat_plot=ifelse(fishery_type=="Commercial", lat_plot+0.3, lat_plot), lat_plot=ifelse(region=="Central", lat_plot-0.15, lat_plot)) %>% # Make new line group id (unique) mutate(line_group=paste(species, fishery_type, region, line_group), sep="-") %>% mutate(species=factor(species, levels=study_species)) # Plot data ################################################################################ # Format data for plotting data <- data %>% mutate(species=factor(species, levels=study_species)) # Setup theme my_theme <- theme(axis.text=element_text(size=6), axis.title=element_text(size=8), axis.title.x = element_blank(), plot.title=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black")) # Plot contamination time series g <- ggplot(data, aes(x=date, y=pcontam_avg)) + facet_wrap(~species, ncol=1) + # Plot errors geom_ribbon(mapping=aes(x=date, ymin=pcontam_avg-pcontam_sd, ymax=pcontam_avg+pcontam_sd), fill="grey80") + # Plot lines geom_line(lwd=0.2) + # Plot seasons geom_line(data=season_key, mapping=aes(x=date, y = 0, group=line_group)) + # Axis scale_y_continuous(lim=c(0,1)) + scale_x_date(breaks=seq(ymd("2014-01-01"), ymd("2020-01-02"), by="1 year"), labels=2014:2020) + # Labels labs(x="", y="Mean daily contamination risk\ninside fishing grounds") + # Theme theme_bw() + my_theme g # Export ggsave(g, filename=file.path(plotdir, "Fig7_contamination_time_series.png"), width=4.5, height=4.5, units="in", dpi=600)
/code/figures/Fig7_contamination_time_series.R
no_license
cfree14/domoic_acid
R
false
false
6,206
r
# Clear workspace rm(list = ls()) # Setup ################################################################################ # Packages library(sf) library(raster) library(tidyverse) library(lubridate) library(grid) library(gridExtra) # Directories preddir <- "output/model_preds" plotdir <- "figures" # Study species study_species <- c("Dungeness crab", "Rock crab", "Spiny lobster", "Razor clam") # Build data ################################################################################ # Calculate mean calc_avg_by_layer <- function(rbrick){ vals <- sapply(1:nlayers(rbrick), function(x) mean(getValues(rbrick[[x]]), na.rm=T)) return(vals) } # Calculate SD calc_sd_by_layer <- function(rbrick){ vals <- sapply(1:nlayers(rbrick), function(x) sd(getValues(rbrick[[x]]), na.rm=T)) return(vals) } # If build build <- T if(build){ # Models models <- c("dungeness_crab_model_rf_cda.Rds", "rock_crab_model_brt_cda.Rds", "spiny_lobster_model_rf_cda.Rds", "razor_clam_model_rf_cda.Rds") # Loop through models x <- models[2] data_orig <- purrr::map_df(models, function(x) { # Model model_do <- gsub(".Rds", "", x) infile <- paste0(model_do, "_predictions_range_mask.grd") # Read model sbrick <- brick(file.path(preddir, infile)) # Calculate averages pcontam_avgs <- calc_avg_by_layer(sbrick) pcontam_sds <- calc_sd_by_layer(sbrick) # Build data frame spp_do <- gsub("_model_rf_cda|_model_brt_cda", "", model_do) %>% gsub("_", " ", .) %>% stringr::str_to_sentence() dates <- names(sbrick) %>% gsub("X", "", .) %>% ymd() df <- tibble(species=spp_do, date=dates, pcontam_avg=pcontam_avgs, pcontam_sd=pcontam_sds) }) # Format data data <- data_orig %>% mutate(species=factor(species, study_species)) # Export saveRDS(data, file=file.path(preddir, "species_range_pcontam_averages.Rds")) }else{ # Read built data data <- readRDS(file.path(preddir, "species_range_pcontam_averages.Rds")) } # Build season lines ################################################################################ # Function to build season key # species <- "Dungeness crab"; fishery_type <- "Commercial"; region <- "Northern"; open_date <- "12-01"; close_date <- "07-15" build_season_key <- function(species, fishery_type, region, open_date, close_date){ dates_open <- paste(2013:2019, open_date, sep="-") %>% ymd() dates_close <- paste0(2014:2020, close_date, sep="-") %>% ymd() season_key <- tibble(species=species, fishery_type=fishery_type, region=region, open_date=dates_open, close_date=dates_close) %>% mutate(line_group=1:n()) %>% select(species:region, line_group, everything()) %>% gather(key="endpoint", value="date", 5:ncol(.)) %>% arrange(species, fishery_type, region, line_group) return(season_key) } # Dungeness crab season keys dcrab_comm_n_key <- build_season_key(species="Dungeness crab", fishery_type="Commercial", region="Northern", open_date="12-01", close_date="07-15") dcrab_comm_c_key <- build_season_key(species="Dungeness crab", fishery_type="Commercial", region="Central", open_date="11-15", close_date="06-30") dcrab_rec_n_key <- build_season_key(species="Dungeness crab", fishery_type="Recreational", region="Northern", open_date="11-01", close_date="07-30") dcrab_rec_c_key <- build_season_key(species="Dungeness crab", fishery_type="Recreational", region="Central", open_date="11-01", close_date="06-30") # Lobster season keys lobster_comm_key <- build_season_key(species="Spiny lobster", fishery_type="Commercial", region="All", open_date="10-01", close_date="03-15") lobster_rec_key <- build_season_key(species="Spiny lobster", fishery_type="Recreational", region="All", open_date="10-01", close_date="03-15") # Season key season_key <- bind_rows(dcrab_comm_n_key, dcrab_comm_c_key, dcrab_rec_n_key, dcrab_rec_c_key, lobster_comm_key, lobster_rec_key) %>% # Add latitudes to plot at mutate(lat_plot=31.5, lat_plot=ifelse(fishery_type=="Commercial", lat_plot+0.3, lat_plot), lat_plot=ifelse(region=="Central", lat_plot-0.15, lat_plot)) %>% # Make new line group id (unique) mutate(line_group=paste(species, fishery_type, region, line_group), sep="-") %>% mutate(species=factor(species, levels=study_species)) # Plot data ################################################################################ # Format data for plotting data <- data %>% mutate(species=factor(species, levels=study_species)) # Setup theme my_theme <- theme(axis.text=element_text(size=6), axis.title=element_text(size=8), axis.title.x = element_blank(), plot.title=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black")) # Plot contamination time series g <- ggplot(data, aes(x=date, y=pcontam_avg)) + facet_wrap(~species, ncol=1) + # Plot errors geom_ribbon(mapping=aes(x=date, ymin=pcontam_avg-pcontam_sd, ymax=pcontam_avg+pcontam_sd), fill="grey80") + # Plot lines geom_line(lwd=0.2) + # Plot seasons geom_line(data=season_key, mapping=aes(x=date, y = 0, group=line_group)) + # Axis scale_y_continuous(lim=c(0,1)) + scale_x_date(breaks=seq(ymd("2014-01-01"), ymd("2020-01-02"), by="1 year"), labels=2014:2020) + # Labels labs(x="", y="Mean daily contamination risk\ninside fishing grounds") + # Theme theme_bw() + my_theme g # Export ggsave(g, filename=file.path(plotdir, "Fig7_contamination_time_series.png"), width=4.5, height=4.5, units="in", dpi=600)
library(data.table) X=as.matrix(fread("data.csv")) ns=nrow(X) ###outlier removal library(outliers) Y=rm.outlier(X) while(is.null(nrow(Y))==0 && nrow(Y)<ns){ X=Y ns=nrow(X) Y=rm.outlier(X,fill=TRUE) } ###print(X) X=rbind(X[,1:13],X[,14:26],X[,27:39]) ###pca biplots library(pcaMethods) res=pca(X, method="bpca", nPcs=6) labs=c(rep("·",nrow(X)/3),rep("+",nrow(X)/3),rep("x",nrow(X)/3)) plotPcs(res,sl=labs,cex=0.5) ###hca library(swamp) Y=t(X) colnames(Y)=1:ncol(Y) o=data.frame(Factor=factor(labs),row.names=colnames(Y)) hca.plot(Y,o) ###similarity tree in 3 circumstances library(ape) X1=X[1:(nrow(X)/3),] X2=X[(nrow(X)/3+1):(nrow(X)/3*2),] X3=X[(nrow(X)/3*2+1):(nrow(X)),] D1=dist(X1) D2=dist(X2) D3=dist(X3) plot(as.phylo(hclust(D1)),type="radial",cex=0.2) plot(as.phylo(hclust(D2)),type="radial",cex=0.2) plot(as.phylo(hclust(D3)),type="radial",cex=0.2) library(vegan) print(mantel(D1,D2)) print(mantel(D1,D3)) ###SVM library(e1071) train=sample(1:nrow(X),nrow(X)/2) m=svm(X[train,],(factor(labs))[train],kernel="linear") print(m) pred=predict(m,X[-train,]) table(pred,(factor(labs))[-train]) ###ANOVA Xg=X for(i in 1:(nrow(X)/3)){ Xg[i,]=(X[i,]+X[i+nrow(X)/3,]+X[i+nrow(X)/3*2])/3 Xg[i+nrow(X)/3,]=Xg[i,] Xg[i+nrow(X)/3*2,]=Xg[i,] } Xe=X for(i in 1:(nrow(X)/3)) Xe[i,]=colMeans(X1) for(i in (nrow(X)/3+1):(nrow(X)/3*2)) Xe[i,]=colMeans(X2) for(i in (nrow(X)/3*2+1):nrow(X)) Xe[i,]=colMeans(X3) colvars=colSums((X-colMeans(X))^2)/(dim(X)[1]-1) colvarsg=colSums((X-Xe)^2)/(dim(X)[1]-1) colvarse=colSums((X-Xg)^2)/(dim(X)[1]-1) print(colvarsg/colvars) print(colvarse/colvars) ###mixed model library(lme4) fe=factor(labs) fg=factor(c(1:(nrow(X)/3),1:(nrow(X)/3),1:(nrow(X)/3))) for(i in 1:13){ res=glmer(X[,i]~1+(1|fe)+(1|fg)) print(res)} ###Phenotype correlation library(psych) options(scipen=999,nsmall=6) heatmap(corr.test(X)$p)
/stat.R
permissive
wuchenxi/stat
R
false
false
1,885
r
library(data.table) X=as.matrix(fread("data.csv")) ns=nrow(X) ###outlier removal library(outliers) Y=rm.outlier(X) while(is.null(nrow(Y))==0 && nrow(Y)<ns){ X=Y ns=nrow(X) Y=rm.outlier(X,fill=TRUE) } ###print(X) X=rbind(X[,1:13],X[,14:26],X[,27:39]) ###pca biplots library(pcaMethods) res=pca(X, method="bpca", nPcs=6) labs=c(rep("·",nrow(X)/3),rep("+",nrow(X)/3),rep("x",nrow(X)/3)) plotPcs(res,sl=labs,cex=0.5) ###hca library(swamp) Y=t(X) colnames(Y)=1:ncol(Y) o=data.frame(Factor=factor(labs),row.names=colnames(Y)) hca.plot(Y,o) ###similarity tree in 3 circumstances library(ape) X1=X[1:(nrow(X)/3),] X2=X[(nrow(X)/3+1):(nrow(X)/3*2),] X3=X[(nrow(X)/3*2+1):(nrow(X)),] D1=dist(X1) D2=dist(X2) D3=dist(X3) plot(as.phylo(hclust(D1)),type="radial",cex=0.2) plot(as.phylo(hclust(D2)),type="radial",cex=0.2) plot(as.phylo(hclust(D3)),type="radial",cex=0.2) library(vegan) print(mantel(D1,D2)) print(mantel(D1,D3)) ###SVM library(e1071) train=sample(1:nrow(X),nrow(X)/2) m=svm(X[train,],(factor(labs))[train],kernel="linear") print(m) pred=predict(m,X[-train,]) table(pred,(factor(labs))[-train]) ###ANOVA Xg=X for(i in 1:(nrow(X)/3)){ Xg[i,]=(X[i,]+X[i+nrow(X)/3,]+X[i+nrow(X)/3*2])/3 Xg[i+nrow(X)/3,]=Xg[i,] Xg[i+nrow(X)/3*2,]=Xg[i,] } Xe=X for(i in 1:(nrow(X)/3)) Xe[i,]=colMeans(X1) for(i in (nrow(X)/3+1):(nrow(X)/3*2)) Xe[i,]=colMeans(X2) for(i in (nrow(X)/3*2+1):nrow(X)) Xe[i,]=colMeans(X3) colvars=colSums((X-colMeans(X))^2)/(dim(X)[1]-1) colvarsg=colSums((X-Xe)^2)/(dim(X)[1]-1) colvarse=colSums((X-Xg)^2)/(dim(X)[1]-1) print(colvarsg/colvars) print(colvarse/colvars) ###mixed model library(lme4) fe=factor(labs) fg=factor(c(1:(nrow(X)/3),1:(nrow(X)/3),1:(nrow(X)/3))) for(i in 1:13){ res=glmer(X[,i]~1+(1|fe)+(1|fg)) print(res)} ###Phenotype correlation library(psych) options(scipen=999,nsmall=6) heatmap(corr.test(X)$p)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/adjusted_roc_curve.R \name{adjusted_roc_curve} \alias{adjusted_roc_curve} \title{Obtain an adjusted ROC curve from rocdata} \usage{ adjusted_roc_curve( rocdata = NULL, coef = NULL, test_grouped_dat = NULL, Xprotein_test = NULL, order = 1, refstate = NULL, verbose = T, py1, plot = TRUE ) } \arguments{ \item{rocdata}{NULL or an output from the rocdata function} \item{coef}{a named vector for the fitted coefficient vector} \item{test_grouped_dat}{a data table (sequence, labeled, unlabeled, seqId) containing validation examples} \item{order}{a considered order of effects} \item{refstate}{a character which will be used for the common reference state; the default is to use the most frequent amino acid as the reference state for each of the position.} \item{verbose}{a logical value} \item{py1}{the prevalence of positives in the unlabeled set (which will be used for a correction)} \item{plot}{a logical value. If TRUE, a plot will be returned} } \value{ a list containing a data frame for generating a roc plot and the plot } \description{ Obtain an adjusted ROC curve from rocdata }
/man/adjusted_roc_curve.Rd
no_license
mhu48/pudms
R
false
true
1,192
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/adjusted_roc_curve.R \name{adjusted_roc_curve} \alias{adjusted_roc_curve} \title{Obtain an adjusted ROC curve from rocdata} \usage{ adjusted_roc_curve( rocdata = NULL, coef = NULL, test_grouped_dat = NULL, Xprotein_test = NULL, order = 1, refstate = NULL, verbose = T, py1, plot = TRUE ) } \arguments{ \item{rocdata}{NULL or an output from the rocdata function} \item{coef}{a named vector for the fitted coefficient vector} \item{test_grouped_dat}{a data table (sequence, labeled, unlabeled, seqId) containing validation examples} \item{order}{a considered order of effects} \item{refstate}{a character which will be used for the common reference state; the default is to use the most frequent amino acid as the reference state for each of the position.} \item{verbose}{a logical value} \item{py1}{the prevalence of positives in the unlabeled set (which will be used for a correction)} \item{plot}{a logical value. If TRUE, a plot will be returned} } \value{ a list containing a data frame for generating a roc plot and the plot } \description{ Obtain an adjusted ROC curve from rocdata }
library(lda) library(ggplot2) library(reshape2) library(tm) library(SnowballC) library(RMySQL) library(stringr) drv=dbDriver("MySQL") con=dbConnect(drv,dbname='bug_report',user='root') stat="select Bugzilla_severity.report_id,Bugzilla_severity.timestamp,Bugzilla_severity.what as 'severity',Bugzilla_desc.what as 'desc' from Bugzilla_severity left join Bugzilla_desc on (Bugzilla_desc.report_id=Bugzilla_severity.report_id and Bugzilla_desc.timestamp=Bugzilla_severity.timestamp);" table1=dbGetQuery(con,stat) sw <- c(stopwords("english"),"na","NA","content","will") desc<-table1["desc"] docu="" i<-1 while(i<=5616) { temp<-desc[i,1] temp<-lapply(temp,tolower) temp<-lapply(temp,removePunctuation) temp<-lapply(temp,removeNumbers) temp<-unlist(temp) temp<-removeWords(temp,sw) temp<-strsplit(temp," ") temp<-lapply(temp,stemDocument) j<-2 while(j<=length(temp[[1]])) { temp[[1]][1]<-paste(temp[[1]][1],temp[[1]][j]) j<-j+1 } temp<-temp[[1]][1] desc[i,1]<-relist(temp,desc[i,1]) docu[[i]]<-desc[i,1] i<-i+1 } Table<-table1 Table['desc']<-docu docucorp<-subset(docu,docu!="NA") docs<-lexicalize(docucorp) vocab=docs$vocab docs=docs$documents K<-10 result<-lda.collapsed.gibbs.sampler(docs,K,vocab,800,0.15,0.01,compute.log.likelihood=TRUE) top.words <- top.topic.words(result$topics, 20, by.score=TRUE) i<-1 j<-1 #reassign table k<-0 #document matrix iterator table<-data.frame(report_id=integer(),severity1=character(),severity2=character(),topic=integer(),stringsAsFactors=FALSE) while(i<=length(docu)) { if((docu[[i]]=="NA"||is.na(docu[[i]])) && i>1) { table[j,1]<-Table[i,1] table[j,2]<-Table[i-1,3] table[j,3]<-Table[i,3] table[j,4]<-which.max(result$document_sums[,k]) j<-j+1 } if(docu[[i]]!="NA"||is.na(docu[[i]])) { k<-k+1 } i<-i+1 } i<-1 tabnor<-subset(table,severity1=="normal") distnormal<-data.frame(trivial=integer(),minor=integer(),normal=integer(),major=integer(),critical=integer(),blocker=integer()) distnormal[1,]<-c(0,0,0,0,0,0) while(i<=dim(tabnor)[1]) { if(tabnor$severity2[i]=="trivial") { distnormal[1,1]<-distnormal[1,1]+1 } if(tabnor$severity2[i]=="minor") { distnormal[1,2]<-distnormal[1,2]+1 } if(tabnor$severity2[i]=="normal") { distnormal[1,3]<-distnormal[1,3]+1 } if(tabnor$severity2[i]=="major") { distnormal[1,4]<-distnormal[1,4]+1 } if(tabnor$severity2[i]=="critical") { distnormal[1,5]<-distnormal[1,5]+1 } if(tabnor$severity2[i]=="blocker") { distnormal[1,6]<-distnormal[1,6]+1 } i<-i+1 } dist<-data.frame(severity=character(),t1=integer(),t2=integer(),t3=integer(),t4=integer(),t5=integer(),t6=integer(),t7=integer(),t8=integer(),t9=integer(),t10=integer(),stringsAsFactors=FALSE) i<-1 sev<-c("trivial","minor","normal","major","critical","blocker") while(i<=6) { dist[i,1]<-sev[i] dist[i,2:11]<-tabulate(subset(tabnor,severity2==sev[i])$topic, nbins = 10) i<-i+1 }
/topicrreassign.R
no_license
suparna-ghanvatkar/bug_lda
R
false
false
2,947
r
library(lda) library(ggplot2) library(reshape2) library(tm) library(SnowballC) library(RMySQL) library(stringr) drv=dbDriver("MySQL") con=dbConnect(drv,dbname='bug_report',user='root') stat="select Bugzilla_severity.report_id,Bugzilla_severity.timestamp,Bugzilla_severity.what as 'severity',Bugzilla_desc.what as 'desc' from Bugzilla_severity left join Bugzilla_desc on (Bugzilla_desc.report_id=Bugzilla_severity.report_id and Bugzilla_desc.timestamp=Bugzilla_severity.timestamp);" table1=dbGetQuery(con,stat) sw <- c(stopwords("english"),"na","NA","content","will") desc<-table1["desc"] docu="" i<-1 while(i<=5616) { temp<-desc[i,1] temp<-lapply(temp,tolower) temp<-lapply(temp,removePunctuation) temp<-lapply(temp,removeNumbers) temp<-unlist(temp) temp<-removeWords(temp,sw) temp<-strsplit(temp," ") temp<-lapply(temp,stemDocument) j<-2 while(j<=length(temp[[1]])) { temp[[1]][1]<-paste(temp[[1]][1],temp[[1]][j]) j<-j+1 } temp<-temp[[1]][1] desc[i,1]<-relist(temp,desc[i,1]) docu[[i]]<-desc[i,1] i<-i+1 } Table<-table1 Table['desc']<-docu docucorp<-subset(docu,docu!="NA") docs<-lexicalize(docucorp) vocab=docs$vocab docs=docs$documents K<-10 result<-lda.collapsed.gibbs.sampler(docs,K,vocab,800,0.15,0.01,compute.log.likelihood=TRUE) top.words <- top.topic.words(result$topics, 20, by.score=TRUE) i<-1 j<-1 #reassign table k<-0 #document matrix iterator table<-data.frame(report_id=integer(),severity1=character(),severity2=character(),topic=integer(),stringsAsFactors=FALSE) while(i<=length(docu)) { if((docu[[i]]=="NA"||is.na(docu[[i]])) && i>1) { table[j,1]<-Table[i,1] table[j,2]<-Table[i-1,3] table[j,3]<-Table[i,3] table[j,4]<-which.max(result$document_sums[,k]) j<-j+1 } if(docu[[i]]!="NA"||is.na(docu[[i]])) { k<-k+1 } i<-i+1 } i<-1 tabnor<-subset(table,severity1=="normal") distnormal<-data.frame(trivial=integer(),minor=integer(),normal=integer(),major=integer(),critical=integer(),blocker=integer()) distnormal[1,]<-c(0,0,0,0,0,0) while(i<=dim(tabnor)[1]) { if(tabnor$severity2[i]=="trivial") { distnormal[1,1]<-distnormal[1,1]+1 } if(tabnor$severity2[i]=="minor") { distnormal[1,2]<-distnormal[1,2]+1 } if(tabnor$severity2[i]=="normal") { distnormal[1,3]<-distnormal[1,3]+1 } if(tabnor$severity2[i]=="major") { distnormal[1,4]<-distnormal[1,4]+1 } if(tabnor$severity2[i]=="critical") { distnormal[1,5]<-distnormal[1,5]+1 } if(tabnor$severity2[i]=="blocker") { distnormal[1,6]<-distnormal[1,6]+1 } i<-i+1 } dist<-data.frame(severity=character(),t1=integer(),t2=integer(),t3=integer(),t4=integer(),t5=integer(),t6=integer(),t7=integer(),t8=integer(),t9=integer(),t10=integer(),stringsAsFactors=FALSE) i<-1 sev<-c("trivial","minor","normal","major","critical","blocker") while(i<=6) { dist[i,1]<-sev[i] dist[i,2:11]<-tabulate(subset(tabnor,severity2==sev[i])$topic, nbins = 10) i<-i+1 }
library(micar) ### Name: mica.dar.form ### Title: Get the data access request form ### Aliases: mica.dar.form ### ** Examples ## Not run: ##D m <- mica.login("someuser", "somepassword", "https://mica-demo.obiba.org") ##D mica.dar.form(m) ##D mica.logout(m) ## End(Not run)
/data/genthat_extracted_code/micar/examples/mica.dar.form.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
281
r
library(micar) ### Name: mica.dar.form ### Title: Get the data access request form ### Aliases: mica.dar.form ### ** Examples ## Not run: ##D m <- mica.login("someuser", "somepassword", "https://mica-demo.obiba.org") ##D mica.dar.form(m) ##D mica.logout(m) ## End(Not run)
indsF <- function(m, fct, fctterm){ # add assign-like info to fctterm: # which penalization/ranef groups and coefficients (fixed/random) belong to which function # also include info on global intercept and by-level intercepts ranefinds <- lme4:::reinds(m@Gp) indIntercept <- ifelse("(Intercept)" %in% names(fixef(m)), 1, 0) for(i in 1:length(fctterm)){ if(length(fct[[i]]$Z) == 1){ attr(fctterm[[i]], "indGrp") <- match(names(fct)[i], colnames(m@flist)) if(eval(attr(fct[[i]], "call")$allPen)) { #add pen. group(s) with grouping factor u.x.by indUGrp <- match(sub("f.", "u.", names(fct)[i]), colnames(m@flist)) attr(fctterm[[i]], "indGrp") <- c(attr(fctterm[[i]], "indGrp"), which(attr(m@flist, "assign")==indUGrp)) } attr(fctterm[[i]], "indPen") <- unlist(ranefinds[attr(fctterm[[i]], "indGrp")]) if(!(eval(attr(fct[[i]], "call")$allPen)||ncol(fct[[i]]$X[[1]])==0)){ attr(fctterm[[i]], "indUnpen") <- sapply(paste("^",colnames(fct[[i]]$X[[1]]),"$",sep=""), grep, x=names(m@fixef)) names(attr(fctterm[[i]], "indUnpen")) <- colnames(fct[[i]]$X[[1]]) } else attr(fctterm[[i]], "indUnpen") <- 0 attr(fctterm[[i]], "indConst") <- indIntercept attr(fctterm[[i]], "indGrp") <- list(attr(fctterm[[i]], "indGrp")) attr(fctterm[[i]], "indPen") <- list(attr(fctterm[[i]], "indPen")) attr(fctterm[[i]], "indUnpen") <- list(attr(fctterm[[i]], "indUnpen")) attr(fctterm[[i]], "indConst") <- list(attr(fctterm[[i]], "indConst")) } else { by <- eval(attr(fct[[i]],"call")$by, m@frame) attr(fctterm[[i]], "indGrp") <- vector(mode="list", length=nlevels(by)) attr(fctterm[[i]], "indPen") <- vector(mode="list", length=nlevels(by)) attr(fctterm[[i]], "indUnpen") <- vector(mode="list", length=nlevels(by)) attr(fctterm[[i]], "indConst") <- vector(mode="list", length=nlevels(by)) for(j in 1:nlevels(by)){ attr(fctterm[[i]], "indGrp")[[j]] <- grep(paste(names(fct)[i],".",names(fct[[i]]$Z)[j],sep=""), colnames(m@flist)) attr(fctterm[[i]], "indPen")[[j]] <- ranefinds[[attr(fctterm[[i]], "indGrp")[[j]]]] if( ncol(fct[[i]]$X[[j]]) == 0){ attr(fctterm[[i]], "indUnpen")[[j]] <- 0 } else { attr(fctterm[[i]], "indUnpen")[[j]] <- sapply( paste("^",colnames(fct[[i]]$X[[j]]),"$",sep=""), grep, x=names(m@fixef)) names(attr(fctterm[[i]], "indUnpen")[[j]]) <- colnames(fct[[i]]$X[[j]]) } #add by-level intercept: indBy <- grep(paste("^",deparse(attr(fct[[i]],"call")$by), levels(by)[j],"$", sep=""), names(m@fixef)) indBy <- indBy[!(indBy %in% attr(fctterm[[i]], "indUnpen")[[j]])] attr(fctterm[[i]], "indConst")[[j]] <- c(indIntercept, indBy) } } } return(fctterm) }
/.roxygen/RegularBayes/Workspace/amer/amer/R.roxygen/RegularBayes/Workspace/amer/amer/R/indsF.R
no_license
cran/amer
R
false
false
2,777
r
indsF <- function(m, fct, fctterm){ # add assign-like info to fctterm: # which penalization/ranef groups and coefficients (fixed/random) belong to which function # also include info on global intercept and by-level intercepts ranefinds <- lme4:::reinds(m@Gp) indIntercept <- ifelse("(Intercept)" %in% names(fixef(m)), 1, 0) for(i in 1:length(fctterm)){ if(length(fct[[i]]$Z) == 1){ attr(fctterm[[i]], "indGrp") <- match(names(fct)[i], colnames(m@flist)) if(eval(attr(fct[[i]], "call")$allPen)) { #add pen. group(s) with grouping factor u.x.by indUGrp <- match(sub("f.", "u.", names(fct)[i]), colnames(m@flist)) attr(fctterm[[i]], "indGrp") <- c(attr(fctterm[[i]], "indGrp"), which(attr(m@flist, "assign")==indUGrp)) } attr(fctterm[[i]], "indPen") <- unlist(ranefinds[attr(fctterm[[i]], "indGrp")]) if(!(eval(attr(fct[[i]], "call")$allPen)||ncol(fct[[i]]$X[[1]])==0)){ attr(fctterm[[i]], "indUnpen") <- sapply(paste("^",colnames(fct[[i]]$X[[1]]),"$",sep=""), grep, x=names(m@fixef)) names(attr(fctterm[[i]], "indUnpen")) <- colnames(fct[[i]]$X[[1]]) } else attr(fctterm[[i]], "indUnpen") <- 0 attr(fctterm[[i]], "indConst") <- indIntercept attr(fctterm[[i]], "indGrp") <- list(attr(fctterm[[i]], "indGrp")) attr(fctterm[[i]], "indPen") <- list(attr(fctterm[[i]], "indPen")) attr(fctterm[[i]], "indUnpen") <- list(attr(fctterm[[i]], "indUnpen")) attr(fctterm[[i]], "indConst") <- list(attr(fctterm[[i]], "indConst")) } else { by <- eval(attr(fct[[i]],"call")$by, m@frame) attr(fctterm[[i]], "indGrp") <- vector(mode="list", length=nlevels(by)) attr(fctterm[[i]], "indPen") <- vector(mode="list", length=nlevels(by)) attr(fctterm[[i]], "indUnpen") <- vector(mode="list", length=nlevels(by)) attr(fctterm[[i]], "indConst") <- vector(mode="list", length=nlevels(by)) for(j in 1:nlevels(by)){ attr(fctterm[[i]], "indGrp")[[j]] <- grep(paste(names(fct)[i],".",names(fct[[i]]$Z)[j],sep=""), colnames(m@flist)) attr(fctterm[[i]], "indPen")[[j]] <- ranefinds[[attr(fctterm[[i]], "indGrp")[[j]]]] if( ncol(fct[[i]]$X[[j]]) == 0){ attr(fctterm[[i]], "indUnpen")[[j]] <- 0 } else { attr(fctterm[[i]], "indUnpen")[[j]] <- sapply( paste("^",colnames(fct[[i]]$X[[j]]),"$",sep=""), grep, x=names(m@fixef)) names(attr(fctterm[[i]], "indUnpen")[[j]]) <- colnames(fct[[i]]$X[[j]]) } #add by-level intercept: indBy <- grep(paste("^",deparse(attr(fct[[i]],"call")$by), levels(by)[j],"$", sep=""), names(m@fixef)) indBy <- indBy[!(indBy %in% attr(fctterm[[i]], "indUnpen")[[j]])] attr(fctterm[[i]], "indConst")[[j]] <- c(indIntercept, indBy) } } } return(fctterm) }
library(dataRetrieval) ### Name: readNGWMNdata ### Title: import data from the National Groundwater Monitoring Network ### <URL: http://cida.usgs.gov/ngwmn/>. ### Aliases: readNGWMNdata ### ** Examples ## Not run: ##D #one site ##D site <- "USGS.430427089284901" ##D oneSite <- readNGWMNdata(siteNumbers = site, service = "observation") ##D ##D #multiple sites ##D sites <- c("USGS.272838082142201","USGS.404159100494601", "USGS.401216080362703") ##D multiSiteData <- readNGWMNdata(siteNumbers = sites, service = "observation") ##D attributes(multiSiteData) ##D ##D #non-USGS site ##D #accepts colon or period between agency and ID ##D site <- "MBMG:702934" ##D data <- readNGWMNdata(siteNumbers = site, service = "featureOfInterest") ##D ##D #site with no data returns empty data frame ##D noDataSite <- "UTGS.401544112060301" ##D noDataSite <- readNGWMNdata(siteNumbers = noDataSite, service = "observation") ##D ##D #bounding box ##D bboxSites <- readNGWMNdata(service = "featureOfInterest", bbox = c(30, -102, 31, 99)) ##D #retrieve sites. Set asDateTime to false since one site has an invalid date ##D bboxData <- readNGWMNdata(service = "observation", siteNumbers = bboxSites$site[1:3], ##D asDateTime = FALSE) ## End(Not run)
/data/genthat_extracted_code/dataRetrieval/examples/readNGWMNdata.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
1,251
r
library(dataRetrieval) ### Name: readNGWMNdata ### Title: import data from the National Groundwater Monitoring Network ### <URL: http://cida.usgs.gov/ngwmn/>. ### Aliases: readNGWMNdata ### ** Examples ## Not run: ##D #one site ##D site <- "USGS.430427089284901" ##D oneSite <- readNGWMNdata(siteNumbers = site, service = "observation") ##D ##D #multiple sites ##D sites <- c("USGS.272838082142201","USGS.404159100494601", "USGS.401216080362703") ##D multiSiteData <- readNGWMNdata(siteNumbers = sites, service = "observation") ##D attributes(multiSiteData) ##D ##D #non-USGS site ##D #accepts colon or period between agency and ID ##D site <- "MBMG:702934" ##D data <- readNGWMNdata(siteNumbers = site, service = "featureOfInterest") ##D ##D #site with no data returns empty data frame ##D noDataSite <- "UTGS.401544112060301" ##D noDataSite <- readNGWMNdata(siteNumbers = noDataSite, service = "observation") ##D ##D #bounding box ##D bboxSites <- readNGWMNdata(service = "featureOfInterest", bbox = c(30, -102, 31, 99)) ##D #retrieve sites. Set asDateTime to false since one site has an invalid date ##D bboxData <- readNGWMNdata(service = "observation", siteNumbers = bboxSites$site[1:3], ##D asDateTime = FALSE) ## End(Not run)
################################################################################### ## This script fits a linear model of the invertebrate Bray-Curtis Dissimilarity ## ## in the Grassland Resilience experiment with fixed factors "day" (days after ## ## the end of the drought), effect size of soil moisture (calculated as soil ## ## moisture in each plot C1-D3 minus the average soil moisture in control plots) ## ## and treatment (control vs. drought) and random effects Region (Border, Cork, ## ## Dublin, Limerick) and Farm (1-5) ## ## The response is the invertebrate BC Index (each plot compared to an average ## ## invertebrate community composition in control plots at each site and each ## ## sampling day (9, 32, 64), i.e. reference community) ## ## Compared to the original script, here, treatment is coded according to the ## ## measured soil moisture, as some of the drought plots show a behaviour that ## ## is much more similar to the control plots. The updated coding of the column ## ## "treatment" is based on my personal opinion, not on any statistics or similar.## ## ## ## The script has be modified in order to test for the best moving window width ## ## used to calculate the effect size of the soil moisture ## ## It uses previously written function that calculates the soil moisture ES ## ## for any given timepoint of the sampling day (e.g. 12:00 noon) and any given ## ## window width (e.g. 24 h, 48 h etc., negative if in the past) ## ## The aim is to find the moving window width that explains most of the variance ## ## ## ## Author of the modified script: ## ## Maja Ilic M.Ilic@qub.ac.uk ## ## first modified: 16 Feb 2020 ## ## last modified: 16 Feb 2020 ## ################################################################################### #=========================================== #### Clear objects from the workspace #### rm(list = ls()) #=========================================== #### Set working directory #### # Maja's desktop PC setwd("C:/Users/3054311/Documents/My Documents/Grassland project/07_Invertebrates") data.dir <- paste0(getwd(),"/Data/") figures.dir <- paste0(getwd(),"/Figures/") script.dir <- paste0(getwd(),"/Script/") #=========================================== #### Packages #### library(ggplot2) library(lubridate) library(dplyr) library(tidyr) library(stringr) library(lme4) library(rcompanion) library(car) library(performance) library(insight) library(see) library(cowplot) library(patchwork) library(gridExtra) library(effects) library(afex) #=========================================== #### Get raw data for soil moisture #### raw.data.dir <- "C:/Users/3054311/Documents/My Documents/Grassland project/01_Probes_soil_moisture_and_temp/Data/" load(paste0(raw.data.dir,"Cleaned_Soil_Moisture_Data_from_Loggers.RData")) # Recode all drought plots that show soil moisture behaviour more similar to control plots within the respective site df.moisture.recoded <- df.moisture df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Border3" & df.moisture.recoded$Plot == "D1"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Border3" & df.moisture.recoded$Plot == "D3"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Cork1" & df.moisture.recoded$Plot == "D1"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Dublin1" & df.moisture.recoded$Plot == "D1"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Dublin1" & df.moisture.recoded$Plot == "D3"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Dublin5" & df.moisture.recoded$Plot == "D1"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Dublin5" & df.moisture.recoded$Plot == "D2"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Dublin5" & df.moisture.recoded$Plot == "D3"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Limerick1" & df.moisture.recoded$Plot == "D2"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Limerick3" & df.moisture.recoded$Plot == "D3"] <- "C" # Finally, export this file: save(df.moisture.recoded, file = paste0(raw.data.dir,"Cleaned_Soil_Moisture_Data_from_Loggers_recoded.RData")) # Events load(file = paste0(raw.data.dir,"All events.RData")) #================================================ #### Set directory for data and plots #### # dir.create(paste0(data.dir,"LMER model invertebrates soil trt 2")) # dir.create(paste0(figures.dir,"LMER model invertebrates soil trt 2")) # dir.create(paste0(figures.dir,"LMER model invertebrates soil trt 2/Model validation")) # dir.create(paste0(figures.dir,"LMER model invertebrates soil trt 2/Model output")) # dir.create(paste0(figures.dir,"LMER model invertebrates soil trt 2/Residuals")) # dir.create(paste0(figures.dir,"LMER model invertebrates soil trt 2/Soil moisture effect size boxplot")) # dir.create(paste0(figures.dir,"LMER model invertebrates soil trt 2/Soil moisture effect size V1")) # dir.create(paste0(figures.dir,"LMER model invertebrates soil trt 2/Soil moisture effect size V2")) mydir.data <- paste0(data.dir,"LMER model invertebrates soil trt 2") mydir <- paste0(figures.dir,"LMER model invertebrates soil trt 2") # Create a list of figure titles to be used within the mov.win.diff() function mydir.plot1 <- paste0(mydir,"/Soil moisture effect size boxplot/") mydir.plot2 <- paste0(mydir,"/Soil moisture effect size V1/") mydir.plot3 <- paste0(mydir,"/Soil moisture effect size V2/") myplot <- c("plot1", "plot2", "plot3") plot.dir <- c(mydir.plot1, mydir.plot2, mydir.plot3) list.figures <- setNames(as.list(plot.dir), myplot) #================================================ #### Load function mov.win.diff #### source(paste0(script.dir,"Function mov.win.diff.R")) #================================================ #### Import invertebrate data: Bray-Curtis Dissimilarity #### inverts_data <- read.csv(paste0(data.dir,"Invertebrates with Bray Curtis.csv"), sep = ",", header = T) # Extract only relevant columns df_inverts <- inverts_data[,c(1:6,36:37)] # Change the column "BC_meanC_all" to "inverts_BC_all" names(df_inverts)[which(names(df_inverts) == "BC_meanC_all")] <- "inverts_BC_all" # Change the column "BC_meanC_soil" to "inverts_BC_soil" names(df_inverts)[which(names(df_inverts) == "BC_meanC_soil")] <- "inverts_BC_soil" # Remove plots C4 and D4 df_inverts <- df_inverts %>% filter(Plot != "C4" & Plot != "D4") # Remove Border 2 and 4 df_inverts <- df_inverts %>% filter(site_ID != "Border2" & site_ID != "Border4") #================================================ #### Run a for-loop for soil moisture data #### timepoint.hr <- c() width.hr <- c() obs_norm <- c() obs_var_trt <- c() obs_var_region <- c() obs_var_farm <- c() obs_var_region_farm <- c() resid_norm <- c() resid_var_trt <- c() resid_var_region <- c() resid_var_farm <- c() resid_var_region_farm <- c() mod_AIC <- c() mod_Rsq_marginal <- c() mod_Rsq_conditional <- c() timepoint <- 12 width <- seq(-192,0,1) m <- 1 resp.variable <- "inverts_BC_all" for (daytime.hr in timepoint) { for (duration.hr in width) { # Use the mov.win.diff function to extract relevant soil moisture data for the given time inverval # and calculate the effect size (for the days 0, 32 and 64) df_soil <- mov.win.diff(data = df.moisture.recoded, dates = events, daytime.hr = daytime.hr, duration.hr = duration.hr, doplot = TRUE, list.figures = list.figures) #================================================ # Combine effect size (ES) for soil moisture and BC for invertebrates # Make sure that you DO NOT select the column "treatment" in the invertebrate data names(df_soil)[names(df_soil) == "plot_meanC_ES"] <- "soil_ES" df_joined <- full_join(df_inverts[, c("Region", "Farm", "Plot", "site_ID", "day","inverts_BC_all", "inverts_BC_soil")], df_soil[, c("Region", "Farm", "Plot", "site_ID", "day", "treatment", "soil_ES")], by = c("Region", "Farm", "Plot", "site_ID", "day")) # Exclude all rows with NAs (columns BC_meanC_all and soil_ES) df_joined <- df_joined[which(!is.na(df_joined$inverts_BC_all)),] df_joined <- df_joined[which(!is.na(df_joined$soil_ES)),] # Change region, farm and treatment factors df_joined$Region <- as.factor(df_joined$Region) df_joined$Farm <- as.factor(df_joined$Farm) df_joined$treatment <- as.factor(df_joined$treatment) #================================================ ################### ## ## ## Fit a model ## ## ## ################### # Fit a model of the ratio of the plot to the mean of the control plots as # a function Day since end of drought (numeric), the effect size of soil moisture (numeric) and treatment (Control vs. Drought) # Random factors included: Farm nested in Region (1|Region/Farm) lmer_full_mod <- lmer(inverts_BC_all ~ day*soil_ES*treatment + (1|Region/Farm), data = df_joined) #================================================ # Add fitted values and residuals to the raw data df_mod <- data.frame(df_joined, "Fitted" = fitted(lmer_full_mod)) df_mod$Residuals <- residuals(lmer_full_mod) #================================================ # Run anova() and extract the results aov_mod <- as.data.frame(anova(lmer_full_mod)) aov_mod$Term <- rownames(aov_mod) aov_mod <- aov_mod[,c(7,1:6)] names(aov_mod)[2:7] <- c("Sum.Sq","Mean.Sq","NumDF","DenDF","F.value","p.value") rownames(aov_mod) <- 1:nrow(aov_mod) aov_mod$Daytime <- daytime.hr aov_mod$Duration <- duration.hr aov_mod$Trial <- m if(m == 1){ aov_mod_final <- aov_mod } if(m > 1){ aov_mod_final <- rbind(aov_mod_final,aov_mod) } #================================================ # Extract model coefficients # Fixed coeff_fixef <- summary(lmer_full_mod)$coefficients rownames_fixef <- data.frame("Term" = rownames(coeff_fixef)) dimnames(coeff_fixef)[[2]] <- c("Estimate","Std.Error","DF","t.value","p.value") fixef_out <- cbind(rownames_fixef, coeff_fixef) rownames(fixef_out) <- rownames(rownames_fixef) fixef_out$Daytime <- daytime.hr fixef_out$Duration <- duration.hr fixef_out$Trial <- m if(m == 1){ fixef_out_final <- fixef_out } if(m > 1){ fixef_out_final <- rbind(fixef_out_final,fixef_out) } # Random: Farm:Region (Farm nested in Region) coeff_random.1 <- ranef(lmer_full_mod)$`Farm:Region` rownames_random.1 <- data.frame(rownames(coeff_random.1)) random_out.1 <- rownames_random.1 %>% separate(rownames.coeff_random.1., c("Farm", "Region"), ":") random_out.1 <- data.frame(random_out.1, "Intercept" = coeff_random.1$`(Intercept)`) random_out.1$Intercept_1 <- random_out.1$Intercept + fixef_out$Estimate[fixef_out$Term == "(Intercept)"] random_out.1$Daytime <- daytime.hr random_out.1$Duration <- duration.hr random_out.1$Trail <- m if(m == 1){ random_out_final.1 <- random_out.1 } if(m > 1){ random_out_final.1 <- rbind( random_out_final.1, random_out.1) } # Random: Farm coeff_random.2 <- ranef(lmer_full_mod)$Region rownames_random.2 <- rownames(coeff_random.2) random_out.2 <- data.frame("Region" = rownames_random.2, "Intercept" = coeff_random.2$`(Intercept)`) random_out.2$Intercept_1 <- random_out.2$Intercept + fixef_out$Estimate[fixef_out$Term == "(Intercept)"] random_out.2$Daytime <- daytime.hr random_out.2$Duration <- duration.hr random_out.2$Trail <- m if(m == 1){ random_out_final.2 <- random_out.2 } if(m > 1){ random_out_final.2 <- rbind( random_out_final.2, random_out.2) } #================================================ # Define figure titles for the plots if (daytime.hr < 10) { title <- paste0("LMER Model ",duration.hr, " h period, starting at 0",daytime.hr,":00") figure.title0 <- paste0(mydir,"/Model validation/Model assumptions ",duration.hr, " h, 0",daytime.hr,"-00.png") figure.title1 <- paste0(mydir,"/Model validation/Model validation ",duration.hr, " h, 0",daytime.hr,"-00.png") figure.title2.1 <- paste0(mydir,"/Residuals/Residuals ",duration.hr, " h, 0",daytime.hr,"-00.png") figure.title2.2 <- paste0(mydir,"/Residuals/Residuals fixed effect ",duration.hr, " h, 0",daytime.hr,"-00.png") figure.title2.3 <- paste0(mydir,"/Residuals/Residuals random effect ",duration.hr, " h, 0",daytime.hr,"-00.png") figure.title3.1 <- paste0(mydir,"/Model output/Model output A ",duration.hr, " h, 0",daytime.hr,"-00.png") figure.title3.2 <- paste0(mydir,"/Model output/Model output B ",duration.hr, " h, 0",daytime.hr,"-00.png") } if (daytime.hr >= 10) { title <- paste0("LMER Model ",duration.hr, " h period, starting at ",daytime.hr,":00") figure.title0 <- paste0(mydir,"/Model validation/Model assumptions ",duration.hr, " h, ",daytime.hr,"-00.png") figure.title1 <- paste0(mydir,"/Model validation/Model validation ",duration.hr, " h, ",daytime.hr,"-00.png") figure.title2.1 <- paste0(mydir,"/Residuals/Residuals ",duration.hr, " h, ",daytime.hr,"-00.png") figure.title2.2 <- paste0(mydir,"/Residuals/Residuals fixed effect ",duration.hr, " h, ",daytime.hr,"-00.png") figure.title2.3 <- paste0(mydir,"/Residuals/Residuals random effect ",duration.hr, " h, ",daytime.hr,"-00.png") figure.title3.1 <- paste0(mydir,"/Model output/Model output A ",duration.hr, " h, ",daytime.hr,"-00.png") figure.title3.2 <- paste0(mydir,"/Model output/Model output B ",duration.hr, " h, ",daytime.hr,"-00.png") } #================================================ # Model assumptions # Check normality of the data (invert_BC_all) norm0 <- shapiro.test(df_mod$inverts_BC_all) if(norm0$p.value < 0.001){ main1.0 <- "p < 0.001" } if(norm0$p.value >= 0.001 & norm0$p.value < 0.01){ main1.0 <- "p < 0.01" } if(norm0$p.value >= 0.01 & norm0$p.value < 0.05){ main1.0 <- "p < 0.05" } if(norm0$p.value >= 0.05 ){ main1.0 <- parse(text = paste0('p == ', round(norm0$p.value, digits = 3))) } median.inverts.BC <- median(df_mod$inverts_BC_all, na.rm = T) mean.inverts.BC <- mean(df_mod$inverts_BC_all, na.rm = T) sd.inverts.BC <- sd(df_mod$inverts_BC_all) var.inverts.BC <- var(df_mod$inverts_BC_all) plot.norm.0 <- ggplot(df_mod, aes(x = inverts_BC_all)) + geom_histogram(aes(y = ..density..), binwidth = 0.05, alpha = 0.6, fill = "lightblue", color = "grey70") + geom_density(alpha = 0.5, fill = "lightblue", color = "grey40") + theme_minimal() + stat_function(fun = dnorm, color = rgb(22, 160, 133, max = 255), size = 1, args = list(mean = mean.inverts.BC, sd = sd.inverts.BC)) + geom_vline(xintercept = mean.inverts.BC, color = "blue", size = 1) + geom_vline(xintercept = mean.inverts.BC*0.9, color = "blue", linetype = "dashed", size = 0.6) + geom_vline(xintercept = mean.inverts.BC*1.1, color = "blue", linetype = "dashed", size = 0.6) + geom_vline(xintercept = median.inverts.BC, color = "red", size = 1) + geom_vline(xintercept = median.inverts.BC*0.9, color = "red", linetype = "dashed", size = 0.6) + geom_vline(xintercept = median.inverts.BC*1.1, color = "red", linetype = "dashed", size = 0.6) + geom_vline(xintercept = mean.inverts.BC - sd.inverts.BC, color = "green", linetype = "dashed", size = 0.8) + geom_vline(xintercept = mean.inverts.BC + sd.inverts.BC, color = "green", linetype = "dashed", size = 0.8) + labs(x = "Bray-Curtis Dissimilarity Index\nInvertebrates", y = "Density", title = "Normality of Invertebrates Bray-Curtis Dissimilarity Index", subtitle = paste0("Shapiro's-Test: ",main1.0, "\nMedian: ",round(median.inverts.BC, digits = 4), "\nMean: ",round(mean.inverts.BC, digits = 4), "\nVariance: ",round(var.inverts.BC, digits = 4), "\nSD: ",round(sd.inverts.BC, digits = 4))) plot.qq.0 <- ggplot(df_mod, aes(sample = inverts_BC_all)) + stat_qq(color = rgb(44, 62, 80, max = 255), size = 2) + stat_qq_line(size = 1, color = rgb(22, 160, 133, max = 255)) + theme_minimal() + labs(x = "Theoretical Quantiles", y = "Sample Quantiles", title = "Invertebrates Bray-Curtis Dissimilarity Index", subtitle = "Normal Q-Q Plot") ## Check variance homogeneity # ~ treatment OBS_var_trt <- leveneTest(inverts_BC_all ~ treatment, data = df_mod) if(OBS_var_trt$`Pr(>F)`[1] < 0.001){ main.trt.0 <- "p < 0.001" } if(OBS_var_trt$`Pr(>F)`[1] >= 0.001 & OBS_var_trt$`Pr(>F)`[1] < 0.01){ main.trt.0 <- "p < 0.01" } if(OBS_var_trt$`Pr(>F)`[1] >= 0.01 & OBS_var_trt$`Pr(>F)`[1] < 0.05){ main.trt.0 <- "p < 0.05" } if(OBS_var_trt$`Pr(>F)`[1] >= 0.05 ){ main.trt.0 <- parse(text = paste0('p == ', round(OBS_var_trt$`Pr(>F)`[1], digits = 3))) } box.obs.trt <- ggplot(df_mod, aes(x = treatment, y = inverts_BC_all, fill = treatment)) + geom_boxplot(outlier.shape = 21) + theme_minimal() + theme(legend.position = "none") + scale_fill_manual(values = c("blue","red")) + labs(x = "treatment", y = "Bray-Curtis Dissimilarity Index\nInvertebrates", title = "Invertebrates Bray-Crutis Dissimilarity Index ~ treatment", subtitle = main.trt.0) # ~ Region OBS_var_region <- leveneTest(inverts_BC_all ~ Region, data = df_mod) if(OBS_var_region$`Pr(>F)`[1] < 0.001){ main.region.0 <- "p < 0.001" } if(OBS_var_region$`Pr(>F)`[1] >= 0.001 & OBS_var_region$`Pr(>F)`[1] < 0.01){ main.region.0 <- "p < 0.01" } if(OBS_var_region$`Pr(>F)`[1] >= 0.01 & OBS_var_region$`Pr(>F)`[1] < 0.05){ main.region.0 <- "p < 0.05" } if(OBS_var_region$`Pr(>F)`[1] >= 0.05 ){ main.region.0 <- parse(text = paste0('p == ', round(OBS_var_region$`Pr(>F)`[1], digits = 3))) } box.obs.region <- ggplot(df_mod, aes(x = Region, y = inverts_BC_all, fill = Region)) + geom_boxplot(outlier.shape = 21, alpha = 0.7) + theme_minimal() + theme(legend.position = "none") + labs(x = "Region", y = "Bray-Curtis Dissimilarity Index\nInvertebrates", title = "Invertebrates Bray-Crutis Dissimilarity Index ~ Region", subtitle = main.region.0) # ~ Farm OBS_var_farm <- leveneTest(inverts_BC_all ~ Farm, data = df_mod) if(OBS_var_farm$`Pr(>F)`[1] < 0.001){ main.farm.0 <- "p < 0.001" } if(OBS_var_farm$`Pr(>F)`[1] >= 0.001 & OBS_var_farm$`Pr(>F)`[1] < 0.01){ main.farm.0 <- "p < 0.01" } if(OBS_var_farm$`Pr(>F)`[1] >= 0.01 & OBS_var_farm$`Pr(>F)`[1] < 0.05){ main.farm.0 <- "p < 0.05" } if(OBS_var_farm$`Pr(>F)`[1] >= 0.05 ){ main.farm.0 <- parse(text = paste0('p == ', round(OBS_var_farm$`Pr(>F)`[1], digits = 3))) } box.obs.farm <- ggplot(df_mod, aes(x = Farm, y = inverts_BC_all, fill = Farm)) + geom_boxplot(outlier.shape = 21, alpha = 0.7) + scale_fill_brewer(palette = "GnBu") + theme_minimal() + theme(legend.position = "none") + labs(x = "Farm", y = "Bray-Curtis Dissimilarity Index\nInvertebrates", title = "Invertebrates Bray-Crutis Dissimilarity Index ~ Farm", subtitle = main.farm.0) # ~ Region * Farm OBS_var_region_farm <- leveneTest(inverts_BC_all ~ Region*Farm, data = df_mod) if(OBS_var_region_farm$`Pr(>F)`[1] < 0.001){ main.region.farm.0 <- "p < 0.001" } if(OBS_var_region_farm$`Pr(>F)`[1] >= 0.001 & OBS_var_region_farm$`Pr(>F)`[1] < 0.01){ main.region.farm.0 <- "p < 0.01" } if(OBS_var_region_farm$`Pr(>F)`[1] >= 0.01 & OBS_var_region_farm$`Pr(>F)`[1] < 0.05){ main.region.farm.0 <- "p < 0.05" } if(OBS_var_region_farm$`Pr(>F)`[1] >= 0.05 ){ main.region.farm.0 <- parse(text = paste0('p == ', round(OBS_var_region_farm$`Pr(>F)`[1], digits = 3))) } box.obs.region.farm <- ggplot(df_mod, aes(x = Region, y = inverts_BC_all, fill = Farm)) + geom_boxplot(outlier.shape = 21, alpha = 0.7) + scale_fill_brewer(palette = "GnBu") + theme_minimal() + theme(legend.position = "none") + labs(x = "Region", y = "Bray-Curtis Dissimilarity Index\nInvertebrates", title = "Invertebrates Bray-Crutis Dissimilarity Index ~ Region * Farm", subtitle = main.region.farm.0) # Combine all plots in one figure final.plot0 <- grid.arrange(plot.norm.0, plot.qq.0, box.obs.trt, box.obs.region, box.obs.farm, box.obs.region.farm, ncol = 2, top = tableGrob(t("lmer(inverts_BC_all ~ day*soil_ES*treatment_recoded + (1|Region/Farm)"), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 12))) ggsave(figure.title0, final.plot0 <- grid.arrange(final.plot0, top = tableGrob(t(title), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 16))), width = 25, height = 32, units = "cm") #================================================ ## Model validation # Observed vs fitted values # ~ treatment obs.fit.trt <- ggplot(df_mod, aes(x = Fitted, y = inverts_BC_all, color = treatment, fill = treatment)) + geom_point(shape = 21, size = 2, alpha = 0.5) + facet_wrap(~ treatment, ncol = 2) + geom_abline(slope = 1, intercept = 0) + theme_minimal() + theme(strip.background = element_rect(color = "grey50"), legend.position = "none") + scale_fill_manual(values = c("blue","red")) + scale_color_manual(values = c("blue","red")) + labs(x = "Fitted", y = "Bray-Curtis Dissimilarity Index\nInvertebrates", title = "Observed vs. Fitted", subtitle = "~ treatment") # ~ Region obs.fit.region <- ggplot(df_mod, aes(x = Fitted, y = inverts_BC_all, color = Region, fill = Region)) + geom_point(shape = 21, size = 2, alpha = 0.5) + facet_wrap(~ Region, ncol = 2) + geom_abline(slope = 1, intercept = 0) + theme_minimal() + theme(strip.background = element_rect(color = "grey50"), legend.position = "none") + labs(x = "Fitted", y = "Bray-Curtis Dissimilarity Index\nInvertebrates", title = "Observed vs. Fitted", subtitle = "~ Region") # ~ Farm obs.fit.farm <- ggplot(df_mod, aes(x = Fitted, y = inverts_BC_all, fill = Farm)) + geom_point(shape = 21, size = 2, alpha = 0.5, color = "grey50") + scale_fill_brewer(palette = "GnBu") + facet_wrap(~ Farm, ncol = 3) + geom_abline(slope = 1, intercept = 0) + theme_minimal() + theme(strip.background = element_rect(color = "grey50"), legend.position = "none") + labs(x = "Fitted", y = "Bray-Curtis Dissimilarity Index\nInvertebrates", title = "Observed vs. Fitted", subtitle = "~ Farm") # ~ Region * Farm obs.fit.region.farm <- ggplot(df_mod, aes(x = Fitted, y = inverts_BC_all, color = Region, fill = Region)) + geom_point(shape = 21, size = 2, alpha = 0.5) + facet_grid(Region ~ Farm) + geom_abline(slope = 1, intercept = 0) + theme_minimal() + theme(strip.background = element_rect(color = "grey50"), legend.position = "none") + labs(x = "Fitted", y = "Bray-Curtis Dissimilarity Index\nInvertebrates", title = "Observed vs. Fitted", subtitle = "~ Region * Farm") #================================================ # Collinearity result.coll <- check_collinearity(lmer_full_mod) result.coll[which(result.coll$VIF < 5),"Correlation"] <- "low" result.coll[which(result.coll$VIF >= 5 & result.coll$VIF < 10),"Correlation"] <- "moderate" result.coll[which(result.coll$VIF >= 10),"Correlation"] <- "high" result.coll$Correlation <- as.factor(result.coll$Correlation) result.coll$Correlation <- factor(result.coll$Correlation, levels = c("low", "moderate", "high")) mycol.coll <- c(rgb(39, 174, 96, max = 255), rgb(230, 126, 34, max = 255), rgb(228, 26, 28, max = 255)) corr.levels <- unique(result.coll$Correlation) mycol <- c() if ("low" %in% corr.levels){ mycol <- mycol.coll[1] } if ("moderate" %in% corr.levels){ mycol <- c(mycol,mycol.coll[2]) } if ("high" %in% corr.levels){ mycol <- c(mycol,mycol.coll[3]) } plot.coll <- ggplot(result.coll, aes(x = Parameter, y = VIF)) + geom_bar(stat = "identity", width = 0.7, aes(fill = Correlation)) + theme_minimal() + theme(axis.text.x = element_text(angle = 30, hjust = 1)) + scale_fill_manual(values = mycol) + labs(x = "Parameter", y = "VIF", title = "Check for Multicollinearity", subtitle = "") #================================================ ################# ## ## ## Residuals ## ## ## ################# # Binned residuals # Function binned_residuals produces a plot if not saved to an object # However, that plot can't be further modified # Therefore, I save the output to an object and recreated the plot result.binned <- binned_residuals(lmer_full_mod) resid.inside.err <- sum(result.binned$group == "yes")/nrow(result.binned)*100 resid.inside.err <- round(resid.inside.err, digits = 2) plot.binned <- ggplot(result.binned, aes(x = xbar*100, y = ybar)) + geom_ribbon(aes(ymin = -Inf, ymax = -se), color = "grey80", fill = "grey95", alpha = 0.5) + geom_ribbon(aes(ymin = se, ymax = +Inf), color = "grey80", fill = "grey95", alpha = 0.5) + geom_hline(yintercept = 0, color = "grey80") + geom_point(aes(color = group), size = 3) + theme_bw() + scale_color_brewer(palette = "Set1") + labs(x = paste0("Estimated probability of ", resp.variable), y = "Average residual", title = "Binned residuals", subtitle = paste0(resid.inside.err, "% of the residuals are inside the error bounds.")) # ~ Fitted result.heteroscedasticity <- check_heteroscedasticity(lmer_full_mod) if(result.heteroscedasticity[1] < 0.001){ p.res.fit <- "p < 0.001" } if(result.heteroscedasticity[1] >= 0.001 & result.heteroscedasticity[1] < 0.01){ p.res.fit <- "p < 0.01" } if(result.heteroscedasticity[1] >= 0.01 & result.heteroscedasticity[1] < 0.05){ p.res.fit <- "p < 0.05" } if(result.heteroscedasticity[1] >= 0.05 ){ p.res.fit <- parse(text = paste0('p == ', round(result.heteroscedasticity[1], digits = 3))) } plot.res.fit <- ggplot(df_mod, aes(x = Fitted, y = Residuals)) + geom_point(size = 2, color = rgb(44, 62, 80, max = 255)) + theme_minimal() + geom_smooth(method = "loess", size = 1, color = rgb(228, 26, 28, max = 255), se = F) + labs(x = "Fitted", y = "Residuals", title = "Residuals vs. Fitted", subtitle = p.res.fit) # ~ treatment RESID_var_trt <- leveneTest(Residuals ~ treatment, data = df_mod) if(RESID_var_trt$`Pr(>F)`[1] < 0.001){ main.trt <- "p < 0.001" } if(RESID_var_trt$`Pr(>F)`[1] >= 0.001 & RESID_var_trt$`Pr(>F)`[1] < 0.01){ main.trt <- "p < 0.01" } if(RESID_var_trt$`Pr(>F)`[1] >= 0.01 & RESID_var_trt$`Pr(>F)`[1] < 0.05){ main.trt <- "p < 0.05" } if(RESID_var_trt$`Pr(>F)`[1] >= 0.05 ){ main.trt <- parse(text = paste0('p == ', round(RESID_var_trt$`Pr(>F)`[1], digits = 3))) } box.trt <- ggplot(df_mod, aes(x = treatment, y = Residuals, fill = treatment)) + geom_boxplot(outlier.shape = 21) + theme_minimal() + theme(legend.position = "none") + scale_fill_manual(values = c("blue","red")) + labs(x = "treatment", y = "Residuals", title = "Residuals ~ treatment", subtitle = main.trt) # ~ Region RESID_var_region <- leveneTest(Residuals ~ Region, data = df_mod) if(RESID_var_region$`Pr(>F)`[1] < 0.001){ main.region <- "p < 0.001" } if(RESID_var_region$`Pr(>F)`[1] >= 0.001 & RESID_var_region$`Pr(>F)`[1] < 0.01){ main.region <- "p < 0.01" } if(RESID_var_region$`Pr(>F)`[1] >= 0.01 & RESID_var_region$`Pr(>F)`[1] < 0.05){ main.region <- "p < 0.05" } if(RESID_var_region$`Pr(>F)`[1] >= 0.05 ){ main.region <- parse(text = paste0('p == ', round(RESID_var_region$`Pr(>F)`[1], digits = 3))) } box.region <- ggplot(df_mod, aes(x = Region, y = Residuals, fill = Region)) + geom_boxplot(outlier.shape = 21, alpha = 0.7) + theme_minimal() + theme(legend.position = "none") + labs(x = "Region", y = "Residuals", title = "Residuals ~ Region", subtitle = main.region) # ~ Farm RESID_var_farm <- leveneTest(Residuals ~ Farm, data = df_mod) if(RESID_var_farm$`Pr(>F)`[1] < 0.001){ main.farm <- "p < 0.001" } if(RESID_var_farm$`Pr(>F)`[1] >= 0.001 & RESID_var_farm$`Pr(>F)`[1] < 0.01){ main.farm <- "p < 0.01" } if(RESID_var_farm$`Pr(>F)`[1] >= 0.01 & RESID_var_farm$`Pr(>F)`[1] < 0.05){ main.farm <- "p < 0.05" } if(RESID_var_farm$`Pr(>F)`[1] >= 0.05 ){ main.farm <- parse(text = paste0('p == ', round(RESID_var_farm$`Pr(>F)`[1], digits = 3))) } box.farm <- ggplot(df_mod, aes(x = Farm, y = Residuals, fill = Farm)) + geom_boxplot(outlier.shape = 21, alpha = 0.7) + scale_fill_brewer(palette = "GnBu") + theme_minimal() + theme(legend.position = "none") + labs(x = "Farm", y = "Residuals", title = "Residuals ~ Farm", subtitle = main.farm) # ~ Region * Farm RESID_var_region_farm <- leveneTest(Residuals ~ Region*Farm, data = df_mod) if(RESID_var_region_farm$`Pr(>F)`[1] < 0.001){ main.region.farm <- "p < 0.001" } if(RESID_var_region_farm$`Pr(>F)`[1] >= 0.001 & RESID_var_region_farm$`Pr(>F)`[1] < 0.01){ main.region.farm <- "p < 0.01" } if(RESID_var_region_farm$`Pr(>F)`[1] >= 0.01 & RESID_var_region_farm$`Pr(>F)`[1] < 0.05){ main.region.farm <- "p < 0.05" } if(RESID_var_region_farm$`Pr(>F)`[1] >= 0.05 ){ main.region.farm <- parse(text = paste0('p == ', round(RESID_var_region_farm$`Pr(>F)`[1], digits = 3))) } box.region.farm <- ggplot(df_mod, aes(x = Region, y = Residuals, fill = Farm)) + geom_boxplot(outlier.shape = 21, alpha = 0.7) + scale_fill_brewer(palette = "GnBu") + theme_minimal() + theme(legend.position = "none") + labs(x = "Region", y = "Residuals", title = "Residuals ~ Region * Farm", subtitle = main.region.farm) #================================================ # Check for normal distribution of residuals norm1 <- shapiro.test(df_mod$Residuals) if(norm1$p.value < 0.001){ main1 <- "p < 0.001" } if(norm1$p.value >= 0.001 & norm1$p.value < 0.01){ main1 <- "p < 0.01" } if(norm1$p.value >= 0.01 & norm1$p.value < 0.05){ main1 <- "p < 0.05" } if(norm1$p.value >= 0.05 ){ main1 <- parse(text = paste0('p == ', round(norm1$p.value, digits = 3))) } median.resid <- median(df_mod$Residuals) mean.resid <- mean(df_mod$Residuals) sd.resid <- sd(df_mod$Residuals) var.resid <- var(df_mod$Residuals) plot.norm <- ggplot(df_mod, aes(x = Residuals)) + geom_histogram(aes(y = ..density..), binwidth = 0.05, alpha = 0.6, fill = "lightblue", color = "grey70") + geom_density(alpha = 0.5, fill = "lightblue", color = "grey40") + theme_minimal() + stat_function(fun = dnorm, color = rgb(22, 160, 133, max = 255), size = 1, args = list(mean = mean.resid, sd = sd.resid)) + geom_vline(xintercept = mean.resid, color = "blue", size = 1) + geom_vline(xintercept = mean.resid*0.9, color = "blue", linetype = "dashed", size = 0.6) + geom_vline(xintercept = mean.resid*1.1, color = "blue", linetype = "dashed", size = 0.6) + geom_vline(xintercept = median.resid, color = "red", size = 1) + geom_vline(xintercept = median.resid*0.9, color = "red", linetype = "dashed", size = 0.6) + geom_vline(xintercept = median.resid*1.1, color = "red", linetype = "dashed", size = 0.6) + geom_vline(xintercept = mean.resid - sd.resid, color = "green", linetype = "dashed", size = 0.8) + geom_vline(xintercept = mean.resid + sd.resid, color = "green", linetype = "dashed", size = 0.8) + labs(x = "Residuals", y = "Density", title = "Normality of residuals", subtitle = paste0("Shapiro's-Test: ",main1, "\nMedian: ",round(median.resid, digits = 4), "\nMean: ",round(mean.resid, digits = 4), "\nVariance: ",round(var.resid, digits = 4), "\nSD: ",round(sd.resid, digits = 4))) plot.qq <- ggplot(df_mod, aes(sample = Residuals)) + stat_qq(color = rgb(44, 62, 80, max = 255), size = 2) + stat_qq_line(size = 1, color = rgb(22, 160, 133, max = 255)) + theme_minimal() + labs(x = "Theoretical Quantiles", y = "Sample Quantiles", title = "Residuals", subtitle = "Normal Q-Q Plot") #================================================ # Normality of Random Effects result.mod <- check_model(lmer_full_mod) # ~ Region REQQ.region <- result.mod$REQQ$Region reqq.plot.region <- ggplot(REQQ.region, aes(x = x, y = y)) + geom_point(size = 2, color = rgb(44, 62, 80, max = 255)) + geom_errorbar(aes(ymin = conf.low, ymax = conf.high), width = 0.2) + geom_smooth(method = "lm", color = rgb(22, 160, 133, max = 255), size = 1, se = F) + theme_minimal() + labs(x = "Theoretical Quantiles", y = "RE Quantiles", title = "Normality of Random Effects", subtitle = "Region") # ~ Farm:Region REQQ.farm.region <- result.mod$REQQ$"Farm:Region" reqq.plot.farm.region <- ggplot(REQQ.farm.region, aes(x = x, y = y)) + geom_point(size = 2, color = rgb(44, 62, 80, max = 255)) + geom_errorbar(aes(ymin = conf.low, ymax = conf.high), width = 0.2) + geom_smooth(method = "lm", color = rgb(22, 160, 133, max = 255), size = 1, se = F) + theme_minimal() + labs(x = "Theoretical Quantiles", y = "RE Quantiles", title = "Normality of Random Effects", subtitle = "Farm:Region") #================================================ ################################################ ## ## ## Plot observed vs fitted and collinearity ## ## ## ################################################ final.plot1 <- grid.arrange(obs.fit.trt, obs.fit.region, obs.fit.farm, obs.fit.region.farm, plot.coll, ncol = 2, top = tableGrob(t("lmer(inverts_BC_all ~ day*soil_ES*treatment_recoded + (1|Region/Farm)"), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 12))) ggsave(figure.title1, final.plot1 <- grid.arrange(final.plot1, top = tableGrob(t(title), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 16))), width = 26, height = 32, units = "cm") #================================================ ################################ ## ## ## Plot residuals vs fitted ## ## ## ################################ final.plot2 <- grid.arrange(plot.norm, plot.qq, plot.res.fit, plot.binned, ncol = 2, top = tableGrob(t("lmer(inverts_BC_all ~ day*soil_ES*treatment_recoded + (1|Region/Farm)"), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 12))) ggsave(figure.title2.1, final.plot2 <- grid.arrange(final.plot2, top = tableGrob(t(title), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 16))), width = 28, height = 25, units = "cm") #================================================ ######################################## ## ## ## Plot residuals vs random effects ## ## ## ######################################## final.plot3 <- grid.arrange(reqq.plot.region, reqq.plot.farm.region, box.trt, box.region, box.farm, box.region.farm, ncol = 2, top = tableGrob(t("lmer(inverts_BC_all ~ day*soil_ES*treatment_recoded + (1|Region/Farm)"), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 12))) ggsave(figure.title2.3, final.plot3 <- grid.arrange(final.plot3, top = tableGrob(t(title), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 16))), width = 25, height = 32, units = "cm") #================================================ ################################ ## ## ## Plot data - Model output ## ## ## ################################ ylab <- expression(paste(bold("Invertebrates Bray-Curtis Dissimilarity"))) ## Panels: Region ~ Farm # Fitted data g1 <- ggplot(df_mod, aes(x = day, y = Fitted)) + geom_point(aes(fill = treatment, color = treatment), size = 2, alpha = 0.2) + facet_grid(Region ~ Farm) + geom_smooth(method = "lm", se = F, color = "black") + geom_smooth(aes(color = treatment), method = "lm", se = F, linetype = "dashed") + theme_minimal() + theme(strip.background = element_rect(), panel.spacing = unit(1, "lines")) + scale_fill_manual(values = c("blue","red")) + scale_color_manual(values = c("blue","red")) + scale_shape_manual(values = c(21,22,23,24,25)) + labs(x = "Days since the end of the drought", title = "Fitted data", subtitle = "lm(fitted data ~ day)") # Raw data g2 <- ggplot(df_mod, aes(x = day, y = inverts_BC_all)) + geom_point(aes(fill = treatment, color = treatment), size = 2, alpha = 0.2) + facet_grid(Region ~ Farm) + geom_smooth(aes(y = Fitted), method = "lm", se = F, color = "black") + geom_smooth(aes(y = Fitted, color = treatment), method = "lm", se = F, linetype = "dashed") + theme_minimal() + theme(strip.background = element_rect(), panel.spacing = unit(1, "lines")) + scale_fill_manual(values = c("blue","red")) + scale_color_manual(values = c("blue","red")) + scale_shape_manual(values = c(21,22,23,24,25)) + labs(x = "Days since the end of the drought", title = "Raw data", subtitle = "lm(fitted data ~ day)") ## Panels: Region # Fitted data g3 <- ggplot(df_mod, aes(x = day, y = Fitted)) + geom_point(aes(fill = treatment, color = treatment), size = 3, alpha = 0.2) + facet_wrap(~ Region) + geom_smooth(aes(color = treatment, linetype = Farm), method = "lm", se = F, size = 0.7) + geom_smooth(method = "lm", se = F, color = "black", size = 1) + theme_minimal() + theme(strip.background = element_rect(), panel.spacing = unit(1, "lines")) + scale_fill_manual(values = c("blue","red")) + scale_color_manual(values = c("blue","red")) + scale_shape_manual(values = c(21,22,23,24,25)) + labs(x = "Days since the end of the drought", title = "Fitted data", subtitle = "lm(fitted data ~ day)") # Raw data g4 <- ggplot(df_mod, aes(x = day, y = inverts_BC_all)) + geom_point(aes(fill = treatment, color = treatment), size = 3, alpha = 0.2) + facet_wrap(~ Region) + geom_smooth(aes(y = Fitted, color = treatment, linetype = Farm), method = "lm", se = F, size = 0.7) + geom_smooth(aes(y = Fitted), method = "lm", se = F, color = "black", size = 1) + theme_minimal() + theme(strip.background = element_rect(), panel.spacing = unit(1, "lines")) + scale_fill_manual(values = c("blue","red")) + scale_color_manual(values = c("blue","red")) + scale_shape_manual(values = c(21,22,23,24,25)) + labs(x = "Days since the end of the drought", title = "Raw data", subtitle = "lm(fitted data ~ day)") # All four plots together final.plot4 <- grid.arrange(g2,g4,g1,g3, ncol = 2, top = tableGrob(t("lmer(inverts_BC_all ~ day*soil_ES*treatment_recoded + (1|Region/Farm)"), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 12))) ggsave(figure.title3.1, final.plot4 <- grid.arrange(final.plot4, top = tableGrob(t(title), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 16))), width = 40, height = 30, units = "cm") #================================================ ## Panels: Day # Fitted data ~ soil moisture g5 <- ggplot(df_mod, aes(x = soil_ES, y = Fitted)) + geom_point(aes(fill = treatment, color = treatment), size = 3, alpha = 0.2) + facet_grid(Region ~ day) + geom_smooth(aes(color = treatment, linetype = Farm), method = "lm", se = F, size = 0.7) + geom_smooth(method = "lm", se = F, color = "black", size = 1) + theme_minimal() + theme(strip.background = element_rect(), panel.spacing = unit(1, "lines")) + scale_fill_manual(values = c("blue","red")) + scale_color_manual(values = c("blue","red")) + scale_shape_manual(values = c(21,22,23,24,25)) + geom_vline(xintercept = 0, color = "grey50", linetype = "dashed") + labs(x = "Soil moisture - Effect size", title = "Fitted data", subtitle = "lm(fitted data ~ soil moisture effect size)") # Raw data g6 <- ggplot(df_mod, aes(x = soil_ES, y = inverts_BC_all)) + geom_point(aes(fill = treatment, color = treatment), size = 3, alpha = 0.2) + facet_grid(Region ~ day) + geom_smooth(aes(y = Fitted, color = treatment, linetype = Farm), method = "lm", se = F, size = 0.7) + geom_smooth(aes(y = Fitted), method = "lm", se = F, color = "black", size = 1) + theme_minimal() + theme(strip.background = element_rect(), panel.spacing = unit(1, "lines")) + scale_fill_manual(values = c("blue","red")) + scale_color_manual(values = c("blue","red")) + scale_shape_manual(values = c(21,22,23,24,25)) + geom_vline(xintercept = 0, color = "grey50", linetype = "dashed") + labs(x = "Soil moisture - Effect size", title = "Raw data", subtitle = "lm(fitted data ~ soil moisture effect size)") # Combine plots final.plot5 <- grid.arrange(g6,g5, ncol = 1, top = tableGrob(t("lmer(inverts_BC_all ~ day*soil_ES*treatment_recoded + (1|Region/Farm)"), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 12))) ggsave(figure.title3.2, final.plot5 <- grid.arrange(final.plot5, top = tableGrob(t(title), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 16))), width = 24, height = 30, units = "cm") #================================================ ####################### ## ## ## Save statistics ## ## ## ####################### timepoint.hr[m] <- daytime.hr width.hr[m] <- duration.hr obs_norm[m] <- norm0$p.value obs_var_trt[m] <- OBS_var_trt$`Pr(>F)`[1] obs_var_region[m] <- OBS_var_region$`Pr(>F)`[1] obs_var_farm[m] <- OBS_var_farm$`Pr(>F)`[1] obs_var_region_farm[m] <- OBS_var_region_farm$`Pr(>F)`[1] resid_norm[m] <- norm1$p.value resid_var_trt[m] <- RESID_var_trt$`Pr(>F)`[1] resid_var_region[m] <- RESID_var_region$`Pr(>F)`[1] resid_var_farm[m] <- RESID_var_farm$`Pr(>F)`[1] resid_var_region_farm[m] <- RESID_var_region_farm$`Pr(>F)`[1] mod_AIC[m] <- AIC(lmer_full_mod) ########################## ## ## ## Calculation of Rsq ## ## ## ########################## # For details, see Nakagawa and Schielzeth, 2012 # https://doi.org/10.1111/j.2041-210x.2012.00261.x # Approach later modified by Johnson, 2014 # https://doi.org/10.1111/2041-210X.12225 # var(f): variance of the fixed effects # var(r): variance of the radnom effects # var(e): variance of the model residuals # Marginal # var(f) / [var(f) + var(r) + var(e)] mod_Rsq_marginal[m] <- model_performance(lmer_full_mod)$R2_marginal # Conditional # [var(f) + var(r)] / [var(f) + var(r) + var(e)] mod_Rsq_conditional[m] <- model_performance(lmer_full_mod)$R2_conditional # Increase m m <- m + 1 # remove the model object rm(lmer_full_mod) } } #================================================ # Cobine all summary vectors into a data frame mod_summary <- data.frame(timepoint.hr, width.hr, obs_norm, obs_var_trt, obs_var_region, obs_var_farm, obs_var_region_farm, resid_norm, resid_var_trt, resid_var_region, resid_var_farm, resid_var_region_farm, mod_AIC, mod_Rsq_marginal, mod_Rsq_conditional) #================================================ # Export summary and output of the models write.table(mod_summary, paste0(mydir.data, "/LMER Model summary - inverts BC vs soil moisture trt 2.csv"), sep = ",", row.names = F) write.table(aov_mod_final, paste0(mydir.data, "/LMER Anova output - inverts BC vs soil moisture trt 2.csv"), sep = ",", row.names = F) write.table(fixef_out_final, paste0(mydir.data, "/LMER Model output fixef - inverts BC vs soil moisture trt 2.csv"), sep = ",", row.names = F) write.table(random_out_final.1, paste0(mydir.data, "/LMER Model output random Region - inverts BC vs soil moisture trt 2.csv"), sep = ",", row.names = F) write.table(random_out_final.2, paste0(mydir.data, "/LMER Model output random Farm Region - inverts BC vs soil moisture trt 2.csv"), sep = ",", row.names = F) #================================================ # Explore the summary par(mfrow = c(1,1), oma = c(0,0,0,0)) mar.Rsq <- expression(paste(bold("Marginal")~bolditalic("R"^"2"))) con.Rsq <- expression(paste(bold("Conditional")~bolditalic("R"^"2"))) # AIC vs. Window width and marginal R2 gp1 <- ggplot(mod_summary, aes(x = width.hr, y = mod_AIC)) + geom_point(aes(color = mod_Rsq_marginal, fill = mod_Rsq_marginal), shape = 21, size = 4, alpha = 0.5) + scale_color_viridis_c(name = mar.Rsq) + scale_fill_viridis_c(name = mar.Rsq) + theme_minimal() + theme(plot.title = element_text(face = "bold")) + labs(x = expression(paste(bold("Window width (h)"))), y = expression(paste(bold("AIC"))), title = "LMER - Invertebrates Bray-Curtis Dissimilarity", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") + geom_vline(xintercept = -0.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -15.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -38.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -79.5, color = "grey80", linetype = "dashed", size = 0.5) ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - AIC vs Window width and mar R2.png"), gp1, width = 20, height = 16, units = "cm") # AIC vs. Window width and conditional R2 gp2 <- ggplot(mod_summary, aes(x = width.hr, y = mod_AIC)) + geom_point(aes(color = mod_Rsq_conditional, fill = mod_Rsq_conditional), shape = 21, size = 4, alpha = 0.5) + scale_color_viridis_c(name = con.Rsq) + scale_fill_viridis_c(name = con.Rsq) + theme_minimal() + theme(plot.title = element_text(face = "bold")) + labs(x = expression(paste(bold("Window width (h)"))), y = expression(paste(bold("AIC"))), title = "LMER - Invertebrates Bray-Curtis Dissimilarity", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") + geom_vline(xintercept = -0.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -15.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -38.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -79.5, color = "grey80", linetype = "dashed", size = 0.5) ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - AIC vs Window width and con R2.png"), gp2, width = 20, height = 16, units = "cm") # Both combined gp1 <- gp1 + theme(legend.position = "bottom") + guides(fill = guide_colorbar(title.position = "top", title.hjust = 0.5, direction = "horizontal", barwidth = unit(5, "cm")), color = guide_colorbar(title.position = "top", title.hjust = 0.5, direction = "horizontal", barwidth = unit(5, "cm"))) gp2 <- gp2 + theme(legend.position = "bottom") + guides(fill = guide_colorbar(title.position = "top", title.hjust = 0.5, direction = "horizontal", barwidth = unit(5, "cm")), color = guide_colorbar(title.position = "top", title.hjust = 0.5, direction = "horizontal", barwidth = unit(5, "cm"))) ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - AIC vs Window width R2.png"), gp1 | gp2, width = 28, height = 16, units = "cm") # Marginal R2 vs. AIC gp3 <- ggplot(mod_summary, aes(x = mod_AIC, y = mod_Rsq_marginal, fill = width.hr, color = width.hr)) + geom_point(size = 4, shape = 21, alpha = 0.5) + scale_fill_viridis_c(name = "Window width (h)") + scale_color_viridis_c(name = "Window width (h)") + theme_minimal() + theme(plot.title = element_text(face = "bold"), legend.title = element_text(face = "bold")) + labs(x = expression(paste(bold("AIC"))), y = mar.Rsq, title = "LMER - Invertebrates Bray-Curtis Dissimilarity", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - Marginal R2 vs AIC.png"), gp3, width = 20, height = 16, units = "cm") # Conditional R2 vs. AIC gp4 <- ggplot(mod_summary, aes(x = mod_AIC, y = mod_Rsq_conditional, fill = width.hr, color = width.hr)) + geom_point(size = 4, shape = 21, alpha = 0.5) + scale_fill_viridis_c(name = "Window width (h)") + scale_color_viridis_c(name = "Window width (h)") + theme_minimal() + theme(plot.title = element_text(face = "bold"), legend.title = element_text(face = "bold")) + labs(x = expression(paste(bold("AIC"))), y = con.Rsq, title = "LMER - Invertebrates Bray-Curtis Dissimilarity", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - Conditional R2 vs AIC.png"), gp4, width = 20, height = 16, units = "cm") ################# ## ## ## Residuals ## ## ## ################# ylab <- expression(paste(bolditalic("P"),bold("-value"))) # Shapiro.test p1 <- ggplot(mod_summary, aes(x = width.hr, y = resid_norm)) + geom_point(aes(color = mod_Rsq_conditional, fill = mod_Rsq_conditional, size = mod_AIC), shape = 21, alpha = 0.5) + scale_color_viridis_c(name = con.Rsq) + scale_fill_viridis_c(name = con.Rsq) + theme_minimal() + theme(plot.title = element_text(face = "bold"), axis.title.x = element_text(face = "bold"), legend.title = element_text(face = "bold")) + labs(x = "Window width (h)", y = ylab, title = "LMER - Invertebrates Bray-Curtis Dissimilarity\nShapiro's Test Residuals", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") + geom_vline(xintercept = -0.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -15.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -38.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -79.5, color = "grey80", linetype = "dashed", size = 0.5) ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - Shapiro test.png"), p1, width = 20, height = 16, units = "cm") # leveneTest ~ Region p2 <- ggplot(mod_summary, aes(x = width.hr, y = resid_var_region)) + geom_point(aes(color = mod_Rsq_conditional, fill = mod_Rsq_conditional, size = mod_AIC), shape = 21, alpha = 0.5) + scale_color_viridis_c(name = con.Rsq) + scale_fill_viridis_c(name = con.Rsq) + theme_minimal() + theme(plot.title = element_text(face = "bold"), axis.title.x = element_text(face = "bold"), legend.title = element_text(face = "bold")) + labs(x = "Window width (h)", y = ylab, title = "LMER - Invertebrates Bray-Curtis Dissimilarity\nResiduals ~ Region", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") + geom_vline(xintercept = -0.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -15.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -38.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -79.5, color = "grey80", linetype = "dashed", size = 0.5) ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - Levene Test Region.png"), p2, width = 20, height = 16, units = "cm") # leveneTest ~ Farm p3 <- ggplot(mod_summary, aes(x = width.hr, y = resid_var_farm)) + geom_point(aes(color = mod_Rsq_conditional, fill = mod_Rsq_conditional, size = mod_AIC), shape = 21, alpha = 0.5) + scale_color_viridis_c(name = con.Rsq) + scale_fill_viridis_c(name = con.Rsq) + theme_minimal() + theme(plot.title = element_text(face = "bold"), axis.title.x = element_text(face = "bold"), legend.title = element_text(face = "bold")) + labs(x = "Window width (h)", y = ylab, title = "LMER - Invertebrates Bray-Curtis Dissimilarity\nResiduals ~ Farm", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") + geom_vline(xintercept = -0.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -15.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -38.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -79.5, color = "grey80", linetype = "dashed", size = 0.5) ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - Levene Test Farm.png"), p3, width = 20, height = 16, units = "cm") # leveneTest ~ Region * Farm p4 <- ggplot(mod_summary, aes(x = width.hr, y = resid_var_region_farm)) + geom_point(aes(color = mod_Rsq_conditional, fill = mod_Rsq_conditional, size = mod_AIC), shape = 21, alpha = 0.5) + scale_color_viridis_c(name = con.Rsq) + scale_fill_viridis_c(name = con.Rsq) + theme_minimal() + theme(plot.title = element_text(face = "bold"), axis.title.x = element_text(face = "bold"), legend.title = element_text(face = "bold")) + labs(x = "Window width (h)", y = ylab, title = "LMER - Invertebrates Bray-Curtis Dissimilarity\nResiduals ~ Region * Farm", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") + geom_vline(xintercept = -0.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -15.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -38.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -79.5, color = "grey80", linetype = "dashed", size = 0.5) ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - Levene Test Region Farm.png"), p4, width = 20, height = 16, units = "cm") ################ ## ## ## p-values ## ## ## ################ names(mod_summary)[1:2] <- c("Daytime","Duration") mod_output <- left_join(aov_mod_final, mod_summary[,c("Daytime","Duration","mod_AIC","mod_Rsq_marginal","mod_Rsq_conditional")], by = c("Daytime","Duration")) mod_output$Term <- as.factor(mod_output$Term) mod_output$Term <- factor(mod_output$Term, levels = c("day","soil_ES","treatment", "day:soil_ES","day:treatment","soil_ES:treatment", "day:soil_ES:treatment")) # p-values vs. window width p5 <- ggplot(mod_output, aes(x = Duration, y = p.value)) + geom_point(aes(color = mod_Rsq_marginal, fill = mod_Rsq_marginal), shape = 21, alpha = 0.3, size = 3) + facet_wrap(~ Term, ncol = 3) + scale_color_viridis_c(name = mar.Rsq) + scale_fill_viridis_c(name = mar.Rsq) + theme_minimal() + theme(strip.background = element_rect(), plot.title = element_text(face = "bold"), axis.title.x = element_text(face = "bold"), legend.title = element_text(face = "bold"), panel.spacing = unit(1, "line")) + labs(x = "Window width (h)", y = ylab, title = "LMER - Invertebrates Bray-Curtis Dissimilarity", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") + geom_hline(yintercept = 0.05, linetype = "dashed") + scale_y_continuous(limits = c(0,1), breaks = c(0,0.05,0.2,0.4,0.6,0.8,1), labels = c("0",expression(paste(bold("0.05"))),"0.2","0.4","0.6","0.8","1.0")) + geom_vline(xintercept = -0.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -15.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -38.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -79.5, color = "grey80", linetype = "dashed", size = 0.5) ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - P values vs Window width.png"), p5, width = 24, height = 24, units = "cm") # p-values as barchart (significant vs non significant) mod_output[which(mod_output$p.value < 0.05), "Significant"] <- "yes" mod_output[which(mod_output$p.value >= 0.05), "Significant"] <- "no" mod_sig <- mod_output %>% group_by(Term,Significant) %>% summarise(n = n()) %>% mutate(freq = n / sum(n) * 100) p6 <- ggplot(mod_sig, aes(x = Term, y = freq)) + geom_bar(stat = "identity", width = 0.8, aes(fill = Significant)) + scale_fill_brewer(palette = "Set1") + theme_minimal() + theme(strip.background = element_rect(), axis.title = element_text(face = "bold"), axis.text.x = element_text(angle = 45, hjust = 1), plot.title = element_text(face = "bold")) + labs(x = "Term", y = "Frequency (%)", title = "LMER - Invertebrates Bray-Curtis Dissimilarity", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Farm\nDaytime: 12:00 h") ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - Significance of the terms.png"), p6, width = 18, height = 15, units = "cm")
/Invertebrates/Script/05_LMER_invertebrates_soil_treatment_recoded.R
permissive
DrJonYearsley/Grassland
R
false
false
66,416
r
################################################################################### ## This script fits a linear model of the invertebrate Bray-Curtis Dissimilarity ## ## in the Grassland Resilience experiment with fixed factors "day" (days after ## ## the end of the drought), effect size of soil moisture (calculated as soil ## ## moisture in each plot C1-D3 minus the average soil moisture in control plots) ## ## and treatment (control vs. drought) and random effects Region (Border, Cork, ## ## Dublin, Limerick) and Farm (1-5) ## ## The response is the invertebrate BC Index (each plot compared to an average ## ## invertebrate community composition in control plots at each site and each ## ## sampling day (9, 32, 64), i.e. reference community) ## ## Compared to the original script, here, treatment is coded according to the ## ## measured soil moisture, as some of the drought plots show a behaviour that ## ## is much more similar to the control plots. The updated coding of the column ## ## "treatment" is based on my personal opinion, not on any statistics or similar.## ## ## ## The script has be modified in order to test for the best moving window width ## ## used to calculate the effect size of the soil moisture ## ## It uses previously written function that calculates the soil moisture ES ## ## for any given timepoint of the sampling day (e.g. 12:00 noon) and any given ## ## window width (e.g. 24 h, 48 h etc., negative if in the past) ## ## The aim is to find the moving window width that explains most of the variance ## ## ## ## Author of the modified script: ## ## Maja Ilic M.Ilic@qub.ac.uk ## ## first modified: 16 Feb 2020 ## ## last modified: 16 Feb 2020 ## ################################################################################### #=========================================== #### Clear objects from the workspace #### rm(list = ls()) #=========================================== #### Set working directory #### # Maja's desktop PC setwd("C:/Users/3054311/Documents/My Documents/Grassland project/07_Invertebrates") data.dir <- paste0(getwd(),"/Data/") figures.dir <- paste0(getwd(),"/Figures/") script.dir <- paste0(getwd(),"/Script/") #=========================================== #### Packages #### library(ggplot2) library(lubridate) library(dplyr) library(tidyr) library(stringr) library(lme4) library(rcompanion) library(car) library(performance) library(insight) library(see) library(cowplot) library(patchwork) library(gridExtra) library(effects) library(afex) #=========================================== #### Get raw data for soil moisture #### raw.data.dir <- "C:/Users/3054311/Documents/My Documents/Grassland project/01_Probes_soil_moisture_and_temp/Data/" load(paste0(raw.data.dir,"Cleaned_Soil_Moisture_Data_from_Loggers.RData")) # Recode all drought plots that show soil moisture behaviour more similar to control plots within the respective site df.moisture.recoded <- df.moisture df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Border3" & df.moisture.recoded$Plot == "D1"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Border3" & df.moisture.recoded$Plot == "D3"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Cork1" & df.moisture.recoded$Plot == "D1"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Dublin1" & df.moisture.recoded$Plot == "D1"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Dublin1" & df.moisture.recoded$Plot == "D3"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Dublin5" & df.moisture.recoded$Plot == "D1"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Dublin5" & df.moisture.recoded$Plot == "D2"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Dublin5" & df.moisture.recoded$Plot == "D3"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Limerick1" & df.moisture.recoded$Plot == "D2"] <- "C" df.moisture.recoded$treatment[df.moisture.recoded$site_ID == "Limerick3" & df.moisture.recoded$Plot == "D3"] <- "C" # Finally, export this file: save(df.moisture.recoded, file = paste0(raw.data.dir,"Cleaned_Soil_Moisture_Data_from_Loggers_recoded.RData")) # Events load(file = paste0(raw.data.dir,"All events.RData")) #================================================ #### Set directory for data and plots #### # dir.create(paste0(data.dir,"LMER model invertebrates soil trt 2")) # dir.create(paste0(figures.dir,"LMER model invertebrates soil trt 2")) # dir.create(paste0(figures.dir,"LMER model invertebrates soil trt 2/Model validation")) # dir.create(paste0(figures.dir,"LMER model invertebrates soil trt 2/Model output")) # dir.create(paste0(figures.dir,"LMER model invertebrates soil trt 2/Residuals")) # dir.create(paste0(figures.dir,"LMER model invertebrates soil trt 2/Soil moisture effect size boxplot")) # dir.create(paste0(figures.dir,"LMER model invertebrates soil trt 2/Soil moisture effect size V1")) # dir.create(paste0(figures.dir,"LMER model invertebrates soil trt 2/Soil moisture effect size V2")) mydir.data <- paste0(data.dir,"LMER model invertebrates soil trt 2") mydir <- paste0(figures.dir,"LMER model invertebrates soil trt 2") # Create a list of figure titles to be used within the mov.win.diff() function mydir.plot1 <- paste0(mydir,"/Soil moisture effect size boxplot/") mydir.plot2 <- paste0(mydir,"/Soil moisture effect size V1/") mydir.plot3 <- paste0(mydir,"/Soil moisture effect size V2/") myplot <- c("plot1", "plot2", "plot3") plot.dir <- c(mydir.plot1, mydir.plot2, mydir.plot3) list.figures <- setNames(as.list(plot.dir), myplot) #================================================ #### Load function mov.win.diff #### source(paste0(script.dir,"Function mov.win.diff.R")) #================================================ #### Import invertebrate data: Bray-Curtis Dissimilarity #### inverts_data <- read.csv(paste0(data.dir,"Invertebrates with Bray Curtis.csv"), sep = ",", header = T) # Extract only relevant columns df_inverts <- inverts_data[,c(1:6,36:37)] # Change the column "BC_meanC_all" to "inverts_BC_all" names(df_inverts)[which(names(df_inverts) == "BC_meanC_all")] <- "inverts_BC_all" # Change the column "BC_meanC_soil" to "inverts_BC_soil" names(df_inverts)[which(names(df_inverts) == "BC_meanC_soil")] <- "inverts_BC_soil" # Remove plots C4 and D4 df_inverts <- df_inverts %>% filter(Plot != "C4" & Plot != "D4") # Remove Border 2 and 4 df_inverts <- df_inverts %>% filter(site_ID != "Border2" & site_ID != "Border4") #================================================ #### Run a for-loop for soil moisture data #### timepoint.hr <- c() width.hr <- c() obs_norm <- c() obs_var_trt <- c() obs_var_region <- c() obs_var_farm <- c() obs_var_region_farm <- c() resid_norm <- c() resid_var_trt <- c() resid_var_region <- c() resid_var_farm <- c() resid_var_region_farm <- c() mod_AIC <- c() mod_Rsq_marginal <- c() mod_Rsq_conditional <- c() timepoint <- 12 width <- seq(-192,0,1) m <- 1 resp.variable <- "inverts_BC_all" for (daytime.hr in timepoint) { for (duration.hr in width) { # Use the mov.win.diff function to extract relevant soil moisture data for the given time inverval # and calculate the effect size (for the days 0, 32 and 64) df_soil <- mov.win.diff(data = df.moisture.recoded, dates = events, daytime.hr = daytime.hr, duration.hr = duration.hr, doplot = TRUE, list.figures = list.figures) #================================================ # Combine effect size (ES) for soil moisture and BC for invertebrates # Make sure that you DO NOT select the column "treatment" in the invertebrate data names(df_soil)[names(df_soil) == "plot_meanC_ES"] <- "soil_ES" df_joined <- full_join(df_inverts[, c("Region", "Farm", "Plot", "site_ID", "day","inverts_BC_all", "inverts_BC_soil")], df_soil[, c("Region", "Farm", "Plot", "site_ID", "day", "treatment", "soil_ES")], by = c("Region", "Farm", "Plot", "site_ID", "day")) # Exclude all rows with NAs (columns BC_meanC_all and soil_ES) df_joined <- df_joined[which(!is.na(df_joined$inverts_BC_all)),] df_joined <- df_joined[which(!is.na(df_joined$soil_ES)),] # Change region, farm and treatment factors df_joined$Region <- as.factor(df_joined$Region) df_joined$Farm <- as.factor(df_joined$Farm) df_joined$treatment <- as.factor(df_joined$treatment) #================================================ ################### ## ## ## Fit a model ## ## ## ################### # Fit a model of the ratio of the plot to the mean of the control plots as # a function Day since end of drought (numeric), the effect size of soil moisture (numeric) and treatment (Control vs. Drought) # Random factors included: Farm nested in Region (1|Region/Farm) lmer_full_mod <- lmer(inverts_BC_all ~ day*soil_ES*treatment + (1|Region/Farm), data = df_joined) #================================================ # Add fitted values and residuals to the raw data df_mod <- data.frame(df_joined, "Fitted" = fitted(lmer_full_mod)) df_mod$Residuals <- residuals(lmer_full_mod) #================================================ # Run anova() and extract the results aov_mod <- as.data.frame(anova(lmer_full_mod)) aov_mod$Term <- rownames(aov_mod) aov_mod <- aov_mod[,c(7,1:6)] names(aov_mod)[2:7] <- c("Sum.Sq","Mean.Sq","NumDF","DenDF","F.value","p.value") rownames(aov_mod) <- 1:nrow(aov_mod) aov_mod$Daytime <- daytime.hr aov_mod$Duration <- duration.hr aov_mod$Trial <- m if(m == 1){ aov_mod_final <- aov_mod } if(m > 1){ aov_mod_final <- rbind(aov_mod_final,aov_mod) } #================================================ # Extract model coefficients # Fixed coeff_fixef <- summary(lmer_full_mod)$coefficients rownames_fixef <- data.frame("Term" = rownames(coeff_fixef)) dimnames(coeff_fixef)[[2]] <- c("Estimate","Std.Error","DF","t.value","p.value") fixef_out <- cbind(rownames_fixef, coeff_fixef) rownames(fixef_out) <- rownames(rownames_fixef) fixef_out$Daytime <- daytime.hr fixef_out$Duration <- duration.hr fixef_out$Trial <- m if(m == 1){ fixef_out_final <- fixef_out } if(m > 1){ fixef_out_final <- rbind(fixef_out_final,fixef_out) } # Random: Farm:Region (Farm nested in Region) coeff_random.1 <- ranef(lmer_full_mod)$`Farm:Region` rownames_random.1 <- data.frame(rownames(coeff_random.1)) random_out.1 <- rownames_random.1 %>% separate(rownames.coeff_random.1., c("Farm", "Region"), ":") random_out.1 <- data.frame(random_out.1, "Intercept" = coeff_random.1$`(Intercept)`) random_out.1$Intercept_1 <- random_out.1$Intercept + fixef_out$Estimate[fixef_out$Term == "(Intercept)"] random_out.1$Daytime <- daytime.hr random_out.1$Duration <- duration.hr random_out.1$Trail <- m if(m == 1){ random_out_final.1 <- random_out.1 } if(m > 1){ random_out_final.1 <- rbind( random_out_final.1, random_out.1) } # Random: Farm coeff_random.2 <- ranef(lmer_full_mod)$Region rownames_random.2 <- rownames(coeff_random.2) random_out.2 <- data.frame("Region" = rownames_random.2, "Intercept" = coeff_random.2$`(Intercept)`) random_out.2$Intercept_1 <- random_out.2$Intercept + fixef_out$Estimate[fixef_out$Term == "(Intercept)"] random_out.2$Daytime <- daytime.hr random_out.2$Duration <- duration.hr random_out.2$Trail <- m if(m == 1){ random_out_final.2 <- random_out.2 } if(m > 1){ random_out_final.2 <- rbind( random_out_final.2, random_out.2) } #================================================ # Define figure titles for the plots if (daytime.hr < 10) { title <- paste0("LMER Model ",duration.hr, " h period, starting at 0",daytime.hr,":00") figure.title0 <- paste0(mydir,"/Model validation/Model assumptions ",duration.hr, " h, 0",daytime.hr,"-00.png") figure.title1 <- paste0(mydir,"/Model validation/Model validation ",duration.hr, " h, 0",daytime.hr,"-00.png") figure.title2.1 <- paste0(mydir,"/Residuals/Residuals ",duration.hr, " h, 0",daytime.hr,"-00.png") figure.title2.2 <- paste0(mydir,"/Residuals/Residuals fixed effect ",duration.hr, " h, 0",daytime.hr,"-00.png") figure.title2.3 <- paste0(mydir,"/Residuals/Residuals random effect ",duration.hr, " h, 0",daytime.hr,"-00.png") figure.title3.1 <- paste0(mydir,"/Model output/Model output A ",duration.hr, " h, 0",daytime.hr,"-00.png") figure.title3.2 <- paste0(mydir,"/Model output/Model output B ",duration.hr, " h, 0",daytime.hr,"-00.png") } if (daytime.hr >= 10) { title <- paste0("LMER Model ",duration.hr, " h period, starting at ",daytime.hr,":00") figure.title0 <- paste0(mydir,"/Model validation/Model assumptions ",duration.hr, " h, ",daytime.hr,"-00.png") figure.title1 <- paste0(mydir,"/Model validation/Model validation ",duration.hr, " h, ",daytime.hr,"-00.png") figure.title2.1 <- paste0(mydir,"/Residuals/Residuals ",duration.hr, " h, ",daytime.hr,"-00.png") figure.title2.2 <- paste0(mydir,"/Residuals/Residuals fixed effect ",duration.hr, " h, ",daytime.hr,"-00.png") figure.title2.3 <- paste0(mydir,"/Residuals/Residuals random effect ",duration.hr, " h, ",daytime.hr,"-00.png") figure.title3.1 <- paste0(mydir,"/Model output/Model output A ",duration.hr, " h, ",daytime.hr,"-00.png") figure.title3.2 <- paste0(mydir,"/Model output/Model output B ",duration.hr, " h, ",daytime.hr,"-00.png") } #================================================ # Model assumptions # Check normality of the data (invert_BC_all) norm0 <- shapiro.test(df_mod$inverts_BC_all) if(norm0$p.value < 0.001){ main1.0 <- "p < 0.001" } if(norm0$p.value >= 0.001 & norm0$p.value < 0.01){ main1.0 <- "p < 0.01" } if(norm0$p.value >= 0.01 & norm0$p.value < 0.05){ main1.0 <- "p < 0.05" } if(norm0$p.value >= 0.05 ){ main1.0 <- parse(text = paste0('p == ', round(norm0$p.value, digits = 3))) } median.inverts.BC <- median(df_mod$inverts_BC_all, na.rm = T) mean.inverts.BC <- mean(df_mod$inverts_BC_all, na.rm = T) sd.inverts.BC <- sd(df_mod$inverts_BC_all) var.inverts.BC <- var(df_mod$inverts_BC_all) plot.norm.0 <- ggplot(df_mod, aes(x = inverts_BC_all)) + geom_histogram(aes(y = ..density..), binwidth = 0.05, alpha = 0.6, fill = "lightblue", color = "grey70") + geom_density(alpha = 0.5, fill = "lightblue", color = "grey40") + theme_minimal() + stat_function(fun = dnorm, color = rgb(22, 160, 133, max = 255), size = 1, args = list(mean = mean.inverts.BC, sd = sd.inverts.BC)) + geom_vline(xintercept = mean.inverts.BC, color = "blue", size = 1) + geom_vline(xintercept = mean.inverts.BC*0.9, color = "blue", linetype = "dashed", size = 0.6) + geom_vline(xintercept = mean.inverts.BC*1.1, color = "blue", linetype = "dashed", size = 0.6) + geom_vline(xintercept = median.inverts.BC, color = "red", size = 1) + geom_vline(xintercept = median.inverts.BC*0.9, color = "red", linetype = "dashed", size = 0.6) + geom_vline(xintercept = median.inverts.BC*1.1, color = "red", linetype = "dashed", size = 0.6) + geom_vline(xintercept = mean.inverts.BC - sd.inverts.BC, color = "green", linetype = "dashed", size = 0.8) + geom_vline(xintercept = mean.inverts.BC + sd.inverts.BC, color = "green", linetype = "dashed", size = 0.8) + labs(x = "Bray-Curtis Dissimilarity Index\nInvertebrates", y = "Density", title = "Normality of Invertebrates Bray-Curtis Dissimilarity Index", subtitle = paste0("Shapiro's-Test: ",main1.0, "\nMedian: ",round(median.inverts.BC, digits = 4), "\nMean: ",round(mean.inverts.BC, digits = 4), "\nVariance: ",round(var.inverts.BC, digits = 4), "\nSD: ",round(sd.inverts.BC, digits = 4))) plot.qq.0 <- ggplot(df_mod, aes(sample = inverts_BC_all)) + stat_qq(color = rgb(44, 62, 80, max = 255), size = 2) + stat_qq_line(size = 1, color = rgb(22, 160, 133, max = 255)) + theme_minimal() + labs(x = "Theoretical Quantiles", y = "Sample Quantiles", title = "Invertebrates Bray-Curtis Dissimilarity Index", subtitle = "Normal Q-Q Plot") ## Check variance homogeneity # ~ treatment OBS_var_trt <- leveneTest(inverts_BC_all ~ treatment, data = df_mod) if(OBS_var_trt$`Pr(>F)`[1] < 0.001){ main.trt.0 <- "p < 0.001" } if(OBS_var_trt$`Pr(>F)`[1] >= 0.001 & OBS_var_trt$`Pr(>F)`[1] < 0.01){ main.trt.0 <- "p < 0.01" } if(OBS_var_trt$`Pr(>F)`[1] >= 0.01 & OBS_var_trt$`Pr(>F)`[1] < 0.05){ main.trt.0 <- "p < 0.05" } if(OBS_var_trt$`Pr(>F)`[1] >= 0.05 ){ main.trt.0 <- parse(text = paste0('p == ', round(OBS_var_trt$`Pr(>F)`[1], digits = 3))) } box.obs.trt <- ggplot(df_mod, aes(x = treatment, y = inverts_BC_all, fill = treatment)) + geom_boxplot(outlier.shape = 21) + theme_minimal() + theme(legend.position = "none") + scale_fill_manual(values = c("blue","red")) + labs(x = "treatment", y = "Bray-Curtis Dissimilarity Index\nInvertebrates", title = "Invertebrates Bray-Crutis Dissimilarity Index ~ treatment", subtitle = main.trt.0) # ~ Region OBS_var_region <- leveneTest(inverts_BC_all ~ Region, data = df_mod) if(OBS_var_region$`Pr(>F)`[1] < 0.001){ main.region.0 <- "p < 0.001" } if(OBS_var_region$`Pr(>F)`[1] >= 0.001 & OBS_var_region$`Pr(>F)`[1] < 0.01){ main.region.0 <- "p < 0.01" } if(OBS_var_region$`Pr(>F)`[1] >= 0.01 & OBS_var_region$`Pr(>F)`[1] < 0.05){ main.region.0 <- "p < 0.05" } if(OBS_var_region$`Pr(>F)`[1] >= 0.05 ){ main.region.0 <- parse(text = paste0('p == ', round(OBS_var_region$`Pr(>F)`[1], digits = 3))) } box.obs.region <- ggplot(df_mod, aes(x = Region, y = inverts_BC_all, fill = Region)) + geom_boxplot(outlier.shape = 21, alpha = 0.7) + theme_minimal() + theme(legend.position = "none") + labs(x = "Region", y = "Bray-Curtis Dissimilarity Index\nInvertebrates", title = "Invertebrates Bray-Crutis Dissimilarity Index ~ Region", subtitle = main.region.0) # ~ Farm OBS_var_farm <- leveneTest(inverts_BC_all ~ Farm, data = df_mod) if(OBS_var_farm$`Pr(>F)`[1] < 0.001){ main.farm.0 <- "p < 0.001" } if(OBS_var_farm$`Pr(>F)`[1] >= 0.001 & OBS_var_farm$`Pr(>F)`[1] < 0.01){ main.farm.0 <- "p < 0.01" } if(OBS_var_farm$`Pr(>F)`[1] >= 0.01 & OBS_var_farm$`Pr(>F)`[1] < 0.05){ main.farm.0 <- "p < 0.05" } if(OBS_var_farm$`Pr(>F)`[1] >= 0.05 ){ main.farm.0 <- parse(text = paste0('p == ', round(OBS_var_farm$`Pr(>F)`[1], digits = 3))) } box.obs.farm <- ggplot(df_mod, aes(x = Farm, y = inverts_BC_all, fill = Farm)) + geom_boxplot(outlier.shape = 21, alpha = 0.7) + scale_fill_brewer(palette = "GnBu") + theme_minimal() + theme(legend.position = "none") + labs(x = "Farm", y = "Bray-Curtis Dissimilarity Index\nInvertebrates", title = "Invertebrates Bray-Crutis Dissimilarity Index ~ Farm", subtitle = main.farm.0) # ~ Region * Farm OBS_var_region_farm <- leveneTest(inverts_BC_all ~ Region*Farm, data = df_mod) if(OBS_var_region_farm$`Pr(>F)`[1] < 0.001){ main.region.farm.0 <- "p < 0.001" } if(OBS_var_region_farm$`Pr(>F)`[1] >= 0.001 & OBS_var_region_farm$`Pr(>F)`[1] < 0.01){ main.region.farm.0 <- "p < 0.01" } if(OBS_var_region_farm$`Pr(>F)`[1] >= 0.01 & OBS_var_region_farm$`Pr(>F)`[1] < 0.05){ main.region.farm.0 <- "p < 0.05" } if(OBS_var_region_farm$`Pr(>F)`[1] >= 0.05 ){ main.region.farm.0 <- parse(text = paste0('p == ', round(OBS_var_region_farm$`Pr(>F)`[1], digits = 3))) } box.obs.region.farm <- ggplot(df_mod, aes(x = Region, y = inverts_BC_all, fill = Farm)) + geom_boxplot(outlier.shape = 21, alpha = 0.7) + scale_fill_brewer(palette = "GnBu") + theme_minimal() + theme(legend.position = "none") + labs(x = "Region", y = "Bray-Curtis Dissimilarity Index\nInvertebrates", title = "Invertebrates Bray-Crutis Dissimilarity Index ~ Region * Farm", subtitle = main.region.farm.0) # Combine all plots in one figure final.plot0 <- grid.arrange(plot.norm.0, plot.qq.0, box.obs.trt, box.obs.region, box.obs.farm, box.obs.region.farm, ncol = 2, top = tableGrob(t("lmer(inverts_BC_all ~ day*soil_ES*treatment_recoded + (1|Region/Farm)"), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 12))) ggsave(figure.title0, final.plot0 <- grid.arrange(final.plot0, top = tableGrob(t(title), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 16))), width = 25, height = 32, units = "cm") #================================================ ## Model validation # Observed vs fitted values # ~ treatment obs.fit.trt <- ggplot(df_mod, aes(x = Fitted, y = inverts_BC_all, color = treatment, fill = treatment)) + geom_point(shape = 21, size = 2, alpha = 0.5) + facet_wrap(~ treatment, ncol = 2) + geom_abline(slope = 1, intercept = 0) + theme_minimal() + theme(strip.background = element_rect(color = "grey50"), legend.position = "none") + scale_fill_manual(values = c("blue","red")) + scale_color_manual(values = c("blue","red")) + labs(x = "Fitted", y = "Bray-Curtis Dissimilarity Index\nInvertebrates", title = "Observed vs. Fitted", subtitle = "~ treatment") # ~ Region obs.fit.region <- ggplot(df_mod, aes(x = Fitted, y = inverts_BC_all, color = Region, fill = Region)) + geom_point(shape = 21, size = 2, alpha = 0.5) + facet_wrap(~ Region, ncol = 2) + geom_abline(slope = 1, intercept = 0) + theme_minimal() + theme(strip.background = element_rect(color = "grey50"), legend.position = "none") + labs(x = "Fitted", y = "Bray-Curtis Dissimilarity Index\nInvertebrates", title = "Observed vs. Fitted", subtitle = "~ Region") # ~ Farm obs.fit.farm <- ggplot(df_mod, aes(x = Fitted, y = inverts_BC_all, fill = Farm)) + geom_point(shape = 21, size = 2, alpha = 0.5, color = "grey50") + scale_fill_brewer(palette = "GnBu") + facet_wrap(~ Farm, ncol = 3) + geom_abline(slope = 1, intercept = 0) + theme_minimal() + theme(strip.background = element_rect(color = "grey50"), legend.position = "none") + labs(x = "Fitted", y = "Bray-Curtis Dissimilarity Index\nInvertebrates", title = "Observed vs. Fitted", subtitle = "~ Farm") # ~ Region * Farm obs.fit.region.farm <- ggplot(df_mod, aes(x = Fitted, y = inverts_BC_all, color = Region, fill = Region)) + geom_point(shape = 21, size = 2, alpha = 0.5) + facet_grid(Region ~ Farm) + geom_abline(slope = 1, intercept = 0) + theme_minimal() + theme(strip.background = element_rect(color = "grey50"), legend.position = "none") + labs(x = "Fitted", y = "Bray-Curtis Dissimilarity Index\nInvertebrates", title = "Observed vs. Fitted", subtitle = "~ Region * Farm") #================================================ # Collinearity result.coll <- check_collinearity(lmer_full_mod) result.coll[which(result.coll$VIF < 5),"Correlation"] <- "low" result.coll[which(result.coll$VIF >= 5 & result.coll$VIF < 10),"Correlation"] <- "moderate" result.coll[which(result.coll$VIF >= 10),"Correlation"] <- "high" result.coll$Correlation <- as.factor(result.coll$Correlation) result.coll$Correlation <- factor(result.coll$Correlation, levels = c("low", "moderate", "high")) mycol.coll <- c(rgb(39, 174, 96, max = 255), rgb(230, 126, 34, max = 255), rgb(228, 26, 28, max = 255)) corr.levels <- unique(result.coll$Correlation) mycol <- c() if ("low" %in% corr.levels){ mycol <- mycol.coll[1] } if ("moderate" %in% corr.levels){ mycol <- c(mycol,mycol.coll[2]) } if ("high" %in% corr.levels){ mycol <- c(mycol,mycol.coll[3]) } plot.coll <- ggplot(result.coll, aes(x = Parameter, y = VIF)) + geom_bar(stat = "identity", width = 0.7, aes(fill = Correlation)) + theme_minimal() + theme(axis.text.x = element_text(angle = 30, hjust = 1)) + scale_fill_manual(values = mycol) + labs(x = "Parameter", y = "VIF", title = "Check for Multicollinearity", subtitle = "") #================================================ ################# ## ## ## Residuals ## ## ## ################# # Binned residuals # Function binned_residuals produces a plot if not saved to an object # However, that plot can't be further modified # Therefore, I save the output to an object and recreated the plot result.binned <- binned_residuals(lmer_full_mod) resid.inside.err <- sum(result.binned$group == "yes")/nrow(result.binned)*100 resid.inside.err <- round(resid.inside.err, digits = 2) plot.binned <- ggplot(result.binned, aes(x = xbar*100, y = ybar)) + geom_ribbon(aes(ymin = -Inf, ymax = -se), color = "grey80", fill = "grey95", alpha = 0.5) + geom_ribbon(aes(ymin = se, ymax = +Inf), color = "grey80", fill = "grey95", alpha = 0.5) + geom_hline(yintercept = 0, color = "grey80") + geom_point(aes(color = group), size = 3) + theme_bw() + scale_color_brewer(palette = "Set1") + labs(x = paste0("Estimated probability of ", resp.variable), y = "Average residual", title = "Binned residuals", subtitle = paste0(resid.inside.err, "% of the residuals are inside the error bounds.")) # ~ Fitted result.heteroscedasticity <- check_heteroscedasticity(lmer_full_mod) if(result.heteroscedasticity[1] < 0.001){ p.res.fit <- "p < 0.001" } if(result.heteroscedasticity[1] >= 0.001 & result.heteroscedasticity[1] < 0.01){ p.res.fit <- "p < 0.01" } if(result.heteroscedasticity[1] >= 0.01 & result.heteroscedasticity[1] < 0.05){ p.res.fit <- "p < 0.05" } if(result.heteroscedasticity[1] >= 0.05 ){ p.res.fit <- parse(text = paste0('p == ', round(result.heteroscedasticity[1], digits = 3))) } plot.res.fit <- ggplot(df_mod, aes(x = Fitted, y = Residuals)) + geom_point(size = 2, color = rgb(44, 62, 80, max = 255)) + theme_minimal() + geom_smooth(method = "loess", size = 1, color = rgb(228, 26, 28, max = 255), se = F) + labs(x = "Fitted", y = "Residuals", title = "Residuals vs. Fitted", subtitle = p.res.fit) # ~ treatment RESID_var_trt <- leveneTest(Residuals ~ treatment, data = df_mod) if(RESID_var_trt$`Pr(>F)`[1] < 0.001){ main.trt <- "p < 0.001" } if(RESID_var_trt$`Pr(>F)`[1] >= 0.001 & RESID_var_trt$`Pr(>F)`[1] < 0.01){ main.trt <- "p < 0.01" } if(RESID_var_trt$`Pr(>F)`[1] >= 0.01 & RESID_var_trt$`Pr(>F)`[1] < 0.05){ main.trt <- "p < 0.05" } if(RESID_var_trt$`Pr(>F)`[1] >= 0.05 ){ main.trt <- parse(text = paste0('p == ', round(RESID_var_trt$`Pr(>F)`[1], digits = 3))) } box.trt <- ggplot(df_mod, aes(x = treatment, y = Residuals, fill = treatment)) + geom_boxplot(outlier.shape = 21) + theme_minimal() + theme(legend.position = "none") + scale_fill_manual(values = c("blue","red")) + labs(x = "treatment", y = "Residuals", title = "Residuals ~ treatment", subtitle = main.trt) # ~ Region RESID_var_region <- leveneTest(Residuals ~ Region, data = df_mod) if(RESID_var_region$`Pr(>F)`[1] < 0.001){ main.region <- "p < 0.001" } if(RESID_var_region$`Pr(>F)`[1] >= 0.001 & RESID_var_region$`Pr(>F)`[1] < 0.01){ main.region <- "p < 0.01" } if(RESID_var_region$`Pr(>F)`[1] >= 0.01 & RESID_var_region$`Pr(>F)`[1] < 0.05){ main.region <- "p < 0.05" } if(RESID_var_region$`Pr(>F)`[1] >= 0.05 ){ main.region <- parse(text = paste0('p == ', round(RESID_var_region$`Pr(>F)`[1], digits = 3))) } box.region <- ggplot(df_mod, aes(x = Region, y = Residuals, fill = Region)) + geom_boxplot(outlier.shape = 21, alpha = 0.7) + theme_minimal() + theme(legend.position = "none") + labs(x = "Region", y = "Residuals", title = "Residuals ~ Region", subtitle = main.region) # ~ Farm RESID_var_farm <- leveneTest(Residuals ~ Farm, data = df_mod) if(RESID_var_farm$`Pr(>F)`[1] < 0.001){ main.farm <- "p < 0.001" } if(RESID_var_farm$`Pr(>F)`[1] >= 0.001 & RESID_var_farm$`Pr(>F)`[1] < 0.01){ main.farm <- "p < 0.01" } if(RESID_var_farm$`Pr(>F)`[1] >= 0.01 & RESID_var_farm$`Pr(>F)`[1] < 0.05){ main.farm <- "p < 0.05" } if(RESID_var_farm$`Pr(>F)`[1] >= 0.05 ){ main.farm <- parse(text = paste0('p == ', round(RESID_var_farm$`Pr(>F)`[1], digits = 3))) } box.farm <- ggplot(df_mod, aes(x = Farm, y = Residuals, fill = Farm)) + geom_boxplot(outlier.shape = 21, alpha = 0.7) + scale_fill_brewer(palette = "GnBu") + theme_minimal() + theme(legend.position = "none") + labs(x = "Farm", y = "Residuals", title = "Residuals ~ Farm", subtitle = main.farm) # ~ Region * Farm RESID_var_region_farm <- leveneTest(Residuals ~ Region*Farm, data = df_mod) if(RESID_var_region_farm$`Pr(>F)`[1] < 0.001){ main.region.farm <- "p < 0.001" } if(RESID_var_region_farm$`Pr(>F)`[1] >= 0.001 & RESID_var_region_farm$`Pr(>F)`[1] < 0.01){ main.region.farm <- "p < 0.01" } if(RESID_var_region_farm$`Pr(>F)`[1] >= 0.01 & RESID_var_region_farm$`Pr(>F)`[1] < 0.05){ main.region.farm <- "p < 0.05" } if(RESID_var_region_farm$`Pr(>F)`[1] >= 0.05 ){ main.region.farm <- parse(text = paste0('p == ', round(RESID_var_region_farm$`Pr(>F)`[1], digits = 3))) } box.region.farm <- ggplot(df_mod, aes(x = Region, y = Residuals, fill = Farm)) + geom_boxplot(outlier.shape = 21, alpha = 0.7) + scale_fill_brewer(palette = "GnBu") + theme_minimal() + theme(legend.position = "none") + labs(x = "Region", y = "Residuals", title = "Residuals ~ Region * Farm", subtitle = main.region.farm) #================================================ # Check for normal distribution of residuals norm1 <- shapiro.test(df_mod$Residuals) if(norm1$p.value < 0.001){ main1 <- "p < 0.001" } if(norm1$p.value >= 0.001 & norm1$p.value < 0.01){ main1 <- "p < 0.01" } if(norm1$p.value >= 0.01 & norm1$p.value < 0.05){ main1 <- "p < 0.05" } if(norm1$p.value >= 0.05 ){ main1 <- parse(text = paste0('p == ', round(norm1$p.value, digits = 3))) } median.resid <- median(df_mod$Residuals) mean.resid <- mean(df_mod$Residuals) sd.resid <- sd(df_mod$Residuals) var.resid <- var(df_mod$Residuals) plot.norm <- ggplot(df_mod, aes(x = Residuals)) + geom_histogram(aes(y = ..density..), binwidth = 0.05, alpha = 0.6, fill = "lightblue", color = "grey70") + geom_density(alpha = 0.5, fill = "lightblue", color = "grey40") + theme_minimal() + stat_function(fun = dnorm, color = rgb(22, 160, 133, max = 255), size = 1, args = list(mean = mean.resid, sd = sd.resid)) + geom_vline(xintercept = mean.resid, color = "blue", size = 1) + geom_vline(xintercept = mean.resid*0.9, color = "blue", linetype = "dashed", size = 0.6) + geom_vline(xintercept = mean.resid*1.1, color = "blue", linetype = "dashed", size = 0.6) + geom_vline(xintercept = median.resid, color = "red", size = 1) + geom_vline(xintercept = median.resid*0.9, color = "red", linetype = "dashed", size = 0.6) + geom_vline(xintercept = median.resid*1.1, color = "red", linetype = "dashed", size = 0.6) + geom_vline(xintercept = mean.resid - sd.resid, color = "green", linetype = "dashed", size = 0.8) + geom_vline(xintercept = mean.resid + sd.resid, color = "green", linetype = "dashed", size = 0.8) + labs(x = "Residuals", y = "Density", title = "Normality of residuals", subtitle = paste0("Shapiro's-Test: ",main1, "\nMedian: ",round(median.resid, digits = 4), "\nMean: ",round(mean.resid, digits = 4), "\nVariance: ",round(var.resid, digits = 4), "\nSD: ",round(sd.resid, digits = 4))) plot.qq <- ggplot(df_mod, aes(sample = Residuals)) + stat_qq(color = rgb(44, 62, 80, max = 255), size = 2) + stat_qq_line(size = 1, color = rgb(22, 160, 133, max = 255)) + theme_minimal() + labs(x = "Theoretical Quantiles", y = "Sample Quantiles", title = "Residuals", subtitle = "Normal Q-Q Plot") #================================================ # Normality of Random Effects result.mod <- check_model(lmer_full_mod) # ~ Region REQQ.region <- result.mod$REQQ$Region reqq.plot.region <- ggplot(REQQ.region, aes(x = x, y = y)) + geom_point(size = 2, color = rgb(44, 62, 80, max = 255)) + geom_errorbar(aes(ymin = conf.low, ymax = conf.high), width = 0.2) + geom_smooth(method = "lm", color = rgb(22, 160, 133, max = 255), size = 1, se = F) + theme_minimal() + labs(x = "Theoretical Quantiles", y = "RE Quantiles", title = "Normality of Random Effects", subtitle = "Region") # ~ Farm:Region REQQ.farm.region <- result.mod$REQQ$"Farm:Region" reqq.plot.farm.region <- ggplot(REQQ.farm.region, aes(x = x, y = y)) + geom_point(size = 2, color = rgb(44, 62, 80, max = 255)) + geom_errorbar(aes(ymin = conf.low, ymax = conf.high), width = 0.2) + geom_smooth(method = "lm", color = rgb(22, 160, 133, max = 255), size = 1, se = F) + theme_minimal() + labs(x = "Theoretical Quantiles", y = "RE Quantiles", title = "Normality of Random Effects", subtitle = "Farm:Region") #================================================ ################################################ ## ## ## Plot observed vs fitted and collinearity ## ## ## ################################################ final.plot1 <- grid.arrange(obs.fit.trt, obs.fit.region, obs.fit.farm, obs.fit.region.farm, plot.coll, ncol = 2, top = tableGrob(t("lmer(inverts_BC_all ~ day*soil_ES*treatment_recoded + (1|Region/Farm)"), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 12))) ggsave(figure.title1, final.plot1 <- grid.arrange(final.plot1, top = tableGrob(t(title), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 16))), width = 26, height = 32, units = "cm") #================================================ ################################ ## ## ## Plot residuals vs fitted ## ## ## ################################ final.plot2 <- grid.arrange(plot.norm, plot.qq, plot.res.fit, plot.binned, ncol = 2, top = tableGrob(t("lmer(inverts_BC_all ~ day*soil_ES*treatment_recoded + (1|Region/Farm)"), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 12))) ggsave(figure.title2.1, final.plot2 <- grid.arrange(final.plot2, top = tableGrob(t(title), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 16))), width = 28, height = 25, units = "cm") #================================================ ######################################## ## ## ## Plot residuals vs random effects ## ## ## ######################################## final.plot3 <- grid.arrange(reqq.plot.region, reqq.plot.farm.region, box.trt, box.region, box.farm, box.region.farm, ncol = 2, top = tableGrob(t("lmer(inverts_BC_all ~ day*soil_ES*treatment_recoded + (1|Region/Farm)"), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 12))) ggsave(figure.title2.3, final.plot3 <- grid.arrange(final.plot3, top = tableGrob(t(title), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 16))), width = 25, height = 32, units = "cm") #================================================ ################################ ## ## ## Plot data - Model output ## ## ## ################################ ylab <- expression(paste(bold("Invertebrates Bray-Curtis Dissimilarity"))) ## Panels: Region ~ Farm # Fitted data g1 <- ggplot(df_mod, aes(x = day, y = Fitted)) + geom_point(aes(fill = treatment, color = treatment), size = 2, alpha = 0.2) + facet_grid(Region ~ Farm) + geom_smooth(method = "lm", se = F, color = "black") + geom_smooth(aes(color = treatment), method = "lm", se = F, linetype = "dashed") + theme_minimal() + theme(strip.background = element_rect(), panel.spacing = unit(1, "lines")) + scale_fill_manual(values = c("blue","red")) + scale_color_manual(values = c("blue","red")) + scale_shape_manual(values = c(21,22,23,24,25)) + labs(x = "Days since the end of the drought", title = "Fitted data", subtitle = "lm(fitted data ~ day)") # Raw data g2 <- ggplot(df_mod, aes(x = day, y = inverts_BC_all)) + geom_point(aes(fill = treatment, color = treatment), size = 2, alpha = 0.2) + facet_grid(Region ~ Farm) + geom_smooth(aes(y = Fitted), method = "lm", se = F, color = "black") + geom_smooth(aes(y = Fitted, color = treatment), method = "lm", se = F, linetype = "dashed") + theme_minimal() + theme(strip.background = element_rect(), panel.spacing = unit(1, "lines")) + scale_fill_manual(values = c("blue","red")) + scale_color_manual(values = c("blue","red")) + scale_shape_manual(values = c(21,22,23,24,25)) + labs(x = "Days since the end of the drought", title = "Raw data", subtitle = "lm(fitted data ~ day)") ## Panels: Region # Fitted data g3 <- ggplot(df_mod, aes(x = day, y = Fitted)) + geom_point(aes(fill = treatment, color = treatment), size = 3, alpha = 0.2) + facet_wrap(~ Region) + geom_smooth(aes(color = treatment, linetype = Farm), method = "lm", se = F, size = 0.7) + geom_smooth(method = "lm", se = F, color = "black", size = 1) + theme_minimal() + theme(strip.background = element_rect(), panel.spacing = unit(1, "lines")) + scale_fill_manual(values = c("blue","red")) + scale_color_manual(values = c("blue","red")) + scale_shape_manual(values = c(21,22,23,24,25)) + labs(x = "Days since the end of the drought", title = "Fitted data", subtitle = "lm(fitted data ~ day)") # Raw data g4 <- ggplot(df_mod, aes(x = day, y = inverts_BC_all)) + geom_point(aes(fill = treatment, color = treatment), size = 3, alpha = 0.2) + facet_wrap(~ Region) + geom_smooth(aes(y = Fitted, color = treatment, linetype = Farm), method = "lm", se = F, size = 0.7) + geom_smooth(aes(y = Fitted), method = "lm", se = F, color = "black", size = 1) + theme_minimal() + theme(strip.background = element_rect(), panel.spacing = unit(1, "lines")) + scale_fill_manual(values = c("blue","red")) + scale_color_manual(values = c("blue","red")) + scale_shape_manual(values = c(21,22,23,24,25)) + labs(x = "Days since the end of the drought", title = "Raw data", subtitle = "lm(fitted data ~ day)") # All four plots together final.plot4 <- grid.arrange(g2,g4,g1,g3, ncol = 2, top = tableGrob(t("lmer(inverts_BC_all ~ day*soil_ES*treatment_recoded + (1|Region/Farm)"), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 12))) ggsave(figure.title3.1, final.plot4 <- grid.arrange(final.plot4, top = tableGrob(t(title), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 16))), width = 40, height = 30, units = "cm") #================================================ ## Panels: Day # Fitted data ~ soil moisture g5 <- ggplot(df_mod, aes(x = soil_ES, y = Fitted)) + geom_point(aes(fill = treatment, color = treatment), size = 3, alpha = 0.2) + facet_grid(Region ~ day) + geom_smooth(aes(color = treatment, linetype = Farm), method = "lm", se = F, size = 0.7) + geom_smooth(method = "lm", se = F, color = "black", size = 1) + theme_minimal() + theme(strip.background = element_rect(), panel.spacing = unit(1, "lines")) + scale_fill_manual(values = c("blue","red")) + scale_color_manual(values = c("blue","red")) + scale_shape_manual(values = c(21,22,23,24,25)) + geom_vline(xintercept = 0, color = "grey50", linetype = "dashed") + labs(x = "Soil moisture - Effect size", title = "Fitted data", subtitle = "lm(fitted data ~ soil moisture effect size)") # Raw data g6 <- ggplot(df_mod, aes(x = soil_ES, y = inverts_BC_all)) + geom_point(aes(fill = treatment, color = treatment), size = 3, alpha = 0.2) + facet_grid(Region ~ day) + geom_smooth(aes(y = Fitted, color = treatment, linetype = Farm), method = "lm", se = F, size = 0.7) + geom_smooth(aes(y = Fitted), method = "lm", se = F, color = "black", size = 1) + theme_minimal() + theme(strip.background = element_rect(), panel.spacing = unit(1, "lines")) + scale_fill_manual(values = c("blue","red")) + scale_color_manual(values = c("blue","red")) + scale_shape_manual(values = c(21,22,23,24,25)) + geom_vline(xintercept = 0, color = "grey50", linetype = "dashed") + labs(x = "Soil moisture - Effect size", title = "Raw data", subtitle = "lm(fitted data ~ soil moisture effect size)") # Combine plots final.plot5 <- grid.arrange(g6,g5, ncol = 1, top = tableGrob(t("lmer(inverts_BC_all ~ day*soil_ES*treatment_recoded + (1|Region/Farm)"), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 12))) ggsave(figure.title3.2, final.plot5 <- grid.arrange(final.plot5, top = tableGrob(t(title), theme = ttheme_minimal(padding = unit(c(0,8),'mm'), base_colour = "black", base_size = 16))), width = 24, height = 30, units = "cm") #================================================ ####################### ## ## ## Save statistics ## ## ## ####################### timepoint.hr[m] <- daytime.hr width.hr[m] <- duration.hr obs_norm[m] <- norm0$p.value obs_var_trt[m] <- OBS_var_trt$`Pr(>F)`[1] obs_var_region[m] <- OBS_var_region$`Pr(>F)`[1] obs_var_farm[m] <- OBS_var_farm$`Pr(>F)`[1] obs_var_region_farm[m] <- OBS_var_region_farm$`Pr(>F)`[1] resid_norm[m] <- norm1$p.value resid_var_trt[m] <- RESID_var_trt$`Pr(>F)`[1] resid_var_region[m] <- RESID_var_region$`Pr(>F)`[1] resid_var_farm[m] <- RESID_var_farm$`Pr(>F)`[1] resid_var_region_farm[m] <- RESID_var_region_farm$`Pr(>F)`[1] mod_AIC[m] <- AIC(lmer_full_mod) ########################## ## ## ## Calculation of Rsq ## ## ## ########################## # For details, see Nakagawa and Schielzeth, 2012 # https://doi.org/10.1111/j.2041-210x.2012.00261.x # Approach later modified by Johnson, 2014 # https://doi.org/10.1111/2041-210X.12225 # var(f): variance of the fixed effects # var(r): variance of the radnom effects # var(e): variance of the model residuals # Marginal # var(f) / [var(f) + var(r) + var(e)] mod_Rsq_marginal[m] <- model_performance(lmer_full_mod)$R2_marginal # Conditional # [var(f) + var(r)] / [var(f) + var(r) + var(e)] mod_Rsq_conditional[m] <- model_performance(lmer_full_mod)$R2_conditional # Increase m m <- m + 1 # remove the model object rm(lmer_full_mod) } } #================================================ # Cobine all summary vectors into a data frame mod_summary <- data.frame(timepoint.hr, width.hr, obs_norm, obs_var_trt, obs_var_region, obs_var_farm, obs_var_region_farm, resid_norm, resid_var_trt, resid_var_region, resid_var_farm, resid_var_region_farm, mod_AIC, mod_Rsq_marginal, mod_Rsq_conditional) #================================================ # Export summary and output of the models write.table(mod_summary, paste0(mydir.data, "/LMER Model summary - inverts BC vs soil moisture trt 2.csv"), sep = ",", row.names = F) write.table(aov_mod_final, paste0(mydir.data, "/LMER Anova output - inverts BC vs soil moisture trt 2.csv"), sep = ",", row.names = F) write.table(fixef_out_final, paste0(mydir.data, "/LMER Model output fixef - inverts BC vs soil moisture trt 2.csv"), sep = ",", row.names = F) write.table(random_out_final.1, paste0(mydir.data, "/LMER Model output random Region - inverts BC vs soil moisture trt 2.csv"), sep = ",", row.names = F) write.table(random_out_final.2, paste0(mydir.data, "/LMER Model output random Farm Region - inverts BC vs soil moisture trt 2.csv"), sep = ",", row.names = F) #================================================ # Explore the summary par(mfrow = c(1,1), oma = c(0,0,0,0)) mar.Rsq <- expression(paste(bold("Marginal")~bolditalic("R"^"2"))) con.Rsq <- expression(paste(bold("Conditional")~bolditalic("R"^"2"))) # AIC vs. Window width and marginal R2 gp1 <- ggplot(mod_summary, aes(x = width.hr, y = mod_AIC)) + geom_point(aes(color = mod_Rsq_marginal, fill = mod_Rsq_marginal), shape = 21, size = 4, alpha = 0.5) + scale_color_viridis_c(name = mar.Rsq) + scale_fill_viridis_c(name = mar.Rsq) + theme_minimal() + theme(plot.title = element_text(face = "bold")) + labs(x = expression(paste(bold("Window width (h)"))), y = expression(paste(bold("AIC"))), title = "LMER - Invertebrates Bray-Curtis Dissimilarity", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") + geom_vline(xintercept = -0.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -15.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -38.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -79.5, color = "grey80", linetype = "dashed", size = 0.5) ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - AIC vs Window width and mar R2.png"), gp1, width = 20, height = 16, units = "cm") # AIC vs. Window width and conditional R2 gp2 <- ggplot(mod_summary, aes(x = width.hr, y = mod_AIC)) + geom_point(aes(color = mod_Rsq_conditional, fill = mod_Rsq_conditional), shape = 21, size = 4, alpha = 0.5) + scale_color_viridis_c(name = con.Rsq) + scale_fill_viridis_c(name = con.Rsq) + theme_minimal() + theme(plot.title = element_text(face = "bold")) + labs(x = expression(paste(bold("Window width (h)"))), y = expression(paste(bold("AIC"))), title = "LMER - Invertebrates Bray-Curtis Dissimilarity", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") + geom_vline(xintercept = -0.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -15.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -38.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -79.5, color = "grey80", linetype = "dashed", size = 0.5) ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - AIC vs Window width and con R2.png"), gp2, width = 20, height = 16, units = "cm") # Both combined gp1 <- gp1 + theme(legend.position = "bottom") + guides(fill = guide_colorbar(title.position = "top", title.hjust = 0.5, direction = "horizontal", barwidth = unit(5, "cm")), color = guide_colorbar(title.position = "top", title.hjust = 0.5, direction = "horizontal", barwidth = unit(5, "cm"))) gp2 <- gp2 + theme(legend.position = "bottom") + guides(fill = guide_colorbar(title.position = "top", title.hjust = 0.5, direction = "horizontal", barwidth = unit(5, "cm")), color = guide_colorbar(title.position = "top", title.hjust = 0.5, direction = "horizontal", barwidth = unit(5, "cm"))) ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - AIC vs Window width R2.png"), gp1 | gp2, width = 28, height = 16, units = "cm") # Marginal R2 vs. AIC gp3 <- ggplot(mod_summary, aes(x = mod_AIC, y = mod_Rsq_marginal, fill = width.hr, color = width.hr)) + geom_point(size = 4, shape = 21, alpha = 0.5) + scale_fill_viridis_c(name = "Window width (h)") + scale_color_viridis_c(name = "Window width (h)") + theme_minimal() + theme(plot.title = element_text(face = "bold"), legend.title = element_text(face = "bold")) + labs(x = expression(paste(bold("AIC"))), y = mar.Rsq, title = "LMER - Invertebrates Bray-Curtis Dissimilarity", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - Marginal R2 vs AIC.png"), gp3, width = 20, height = 16, units = "cm") # Conditional R2 vs. AIC gp4 <- ggplot(mod_summary, aes(x = mod_AIC, y = mod_Rsq_conditional, fill = width.hr, color = width.hr)) + geom_point(size = 4, shape = 21, alpha = 0.5) + scale_fill_viridis_c(name = "Window width (h)") + scale_color_viridis_c(name = "Window width (h)") + theme_minimal() + theme(plot.title = element_text(face = "bold"), legend.title = element_text(face = "bold")) + labs(x = expression(paste(bold("AIC"))), y = con.Rsq, title = "LMER - Invertebrates Bray-Curtis Dissimilarity", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - Conditional R2 vs AIC.png"), gp4, width = 20, height = 16, units = "cm") ################# ## ## ## Residuals ## ## ## ################# ylab <- expression(paste(bolditalic("P"),bold("-value"))) # Shapiro.test p1 <- ggplot(mod_summary, aes(x = width.hr, y = resid_norm)) + geom_point(aes(color = mod_Rsq_conditional, fill = mod_Rsq_conditional, size = mod_AIC), shape = 21, alpha = 0.5) + scale_color_viridis_c(name = con.Rsq) + scale_fill_viridis_c(name = con.Rsq) + theme_minimal() + theme(plot.title = element_text(face = "bold"), axis.title.x = element_text(face = "bold"), legend.title = element_text(face = "bold")) + labs(x = "Window width (h)", y = ylab, title = "LMER - Invertebrates Bray-Curtis Dissimilarity\nShapiro's Test Residuals", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") + geom_vline(xintercept = -0.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -15.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -38.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -79.5, color = "grey80", linetype = "dashed", size = 0.5) ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - Shapiro test.png"), p1, width = 20, height = 16, units = "cm") # leveneTest ~ Region p2 <- ggplot(mod_summary, aes(x = width.hr, y = resid_var_region)) + geom_point(aes(color = mod_Rsq_conditional, fill = mod_Rsq_conditional, size = mod_AIC), shape = 21, alpha = 0.5) + scale_color_viridis_c(name = con.Rsq) + scale_fill_viridis_c(name = con.Rsq) + theme_minimal() + theme(plot.title = element_text(face = "bold"), axis.title.x = element_text(face = "bold"), legend.title = element_text(face = "bold")) + labs(x = "Window width (h)", y = ylab, title = "LMER - Invertebrates Bray-Curtis Dissimilarity\nResiduals ~ Region", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") + geom_vline(xintercept = -0.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -15.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -38.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -79.5, color = "grey80", linetype = "dashed", size = 0.5) ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - Levene Test Region.png"), p2, width = 20, height = 16, units = "cm") # leveneTest ~ Farm p3 <- ggplot(mod_summary, aes(x = width.hr, y = resid_var_farm)) + geom_point(aes(color = mod_Rsq_conditional, fill = mod_Rsq_conditional, size = mod_AIC), shape = 21, alpha = 0.5) + scale_color_viridis_c(name = con.Rsq) + scale_fill_viridis_c(name = con.Rsq) + theme_minimal() + theme(plot.title = element_text(face = "bold"), axis.title.x = element_text(face = "bold"), legend.title = element_text(face = "bold")) + labs(x = "Window width (h)", y = ylab, title = "LMER - Invertebrates Bray-Curtis Dissimilarity\nResiduals ~ Farm", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") + geom_vline(xintercept = -0.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -15.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -38.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -79.5, color = "grey80", linetype = "dashed", size = 0.5) ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - Levene Test Farm.png"), p3, width = 20, height = 16, units = "cm") # leveneTest ~ Region * Farm p4 <- ggplot(mod_summary, aes(x = width.hr, y = resid_var_region_farm)) + geom_point(aes(color = mod_Rsq_conditional, fill = mod_Rsq_conditional, size = mod_AIC), shape = 21, alpha = 0.5) + scale_color_viridis_c(name = con.Rsq) + scale_fill_viridis_c(name = con.Rsq) + theme_minimal() + theme(plot.title = element_text(face = "bold"), axis.title.x = element_text(face = "bold"), legend.title = element_text(face = "bold")) + labs(x = "Window width (h)", y = ylab, title = "LMER - Invertebrates Bray-Curtis Dissimilarity\nResiduals ~ Region * Farm", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") + geom_vline(xintercept = -0.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -15.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -38.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -79.5, color = "grey80", linetype = "dashed", size = 0.5) ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - Levene Test Region Farm.png"), p4, width = 20, height = 16, units = "cm") ################ ## ## ## p-values ## ## ## ################ names(mod_summary)[1:2] <- c("Daytime","Duration") mod_output <- left_join(aov_mod_final, mod_summary[,c("Daytime","Duration","mod_AIC","mod_Rsq_marginal","mod_Rsq_conditional")], by = c("Daytime","Duration")) mod_output$Term <- as.factor(mod_output$Term) mod_output$Term <- factor(mod_output$Term, levels = c("day","soil_ES","treatment", "day:soil_ES","day:treatment","soil_ES:treatment", "day:soil_ES:treatment")) # p-values vs. window width p5 <- ggplot(mod_output, aes(x = Duration, y = p.value)) + geom_point(aes(color = mod_Rsq_marginal, fill = mod_Rsq_marginal), shape = 21, alpha = 0.3, size = 3) + facet_wrap(~ Term, ncol = 3) + scale_color_viridis_c(name = mar.Rsq) + scale_fill_viridis_c(name = mar.Rsq) + theme_minimal() + theme(strip.background = element_rect(), plot.title = element_text(face = "bold"), axis.title.x = element_text(face = "bold"), legend.title = element_text(face = "bold"), panel.spacing = unit(1, "line")) + labs(x = "Window width (h)", y = ylab, title = "LMER - Invertebrates Bray-Curtis Dissimilarity", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Region/Farm\nDaytime: 12:00 h") + geom_hline(yintercept = 0.05, linetype = "dashed") + scale_y_continuous(limits = c(0,1), breaks = c(0,0.05,0.2,0.4,0.6,0.8,1), labels = c("0",expression(paste(bold("0.05"))),"0.2","0.4","0.6","0.8","1.0")) + geom_vline(xintercept = -0.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -15.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -38.5, color = "grey80", linetype = "dashed", size = 0.5) + geom_vline(xintercept = -79.5, color = "grey80", linetype = "dashed", size = 0.5) ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - P values vs Window width.png"), p5, width = 24, height = 24, units = "cm") # p-values as barchart (significant vs non significant) mod_output[which(mod_output$p.value < 0.05), "Significant"] <- "yes" mod_output[which(mod_output$p.value >= 0.05), "Significant"] <- "no" mod_sig <- mod_output %>% group_by(Term,Significant) %>% summarise(n = n()) %>% mutate(freq = n / sum(n) * 100) p6 <- ggplot(mod_sig, aes(x = Term, y = freq)) + geom_bar(stat = "identity", width = 0.8, aes(fill = Significant)) + scale_fill_brewer(palette = "Set1") + theme_minimal() + theme(strip.background = element_rect(), axis.title = element_text(face = "bold"), axis.text.x = element_text(angle = 45, hjust = 1), plot.title = element_text(face = "bold")) + labs(x = "Term", y = "Frequency (%)", title = "LMER - Invertebrates Bray-Curtis Dissimilarity", subtitle = "Fixed: day * soil moisture effect size * treatment recoded\nRandom: Farm\nDaytime: 12:00 h") ggsave(paste0(mydir, "/LMER Model validation Inverts vs Soil Moisture and Trt recoded - Significance of the terms.png"), p6, width = 18, height = 15, units = "cm")
\name{SimpleLearner-package} \alias{SimpleLearner-package} \alias{SimpleLearner} \docType{package} \title{ Basis Learner Algorithm for Deep Networks} \description{ Augments a given basis with a data driven deep neural architechture described in the reference.} \details{ The package implements the basis augmentation algorithm in the reference. This is done in the \code{\link{makeBasis.slearner}} function. The augmented basis, essentially a matrix object, can then be used for any learning algorith. A convenience wrapper for SVM is implemented in \code{\link{svm.slearner}}.} \author{ Jonathan Rosenblatt<john.ros.work@gmail.com> based on Matlab code by Ohad Shamir. Maintainer: Jonathan Rosenblatt <john.ros.work@gmail.com> } \references{ Livni, Roi, Shai Shalev-Shwartz, and Ohad Shamir. "A Provably Efficient Algorithm for Training Deep Networks." arXiv:1304.7045 }
/R/Package/man/SimpleLearner-package.Rd
no_license
Libardo1/SimpleLearner
R
false
false
881
rd
\name{SimpleLearner-package} \alias{SimpleLearner-package} \alias{SimpleLearner} \docType{package} \title{ Basis Learner Algorithm for Deep Networks} \description{ Augments a given basis with a data driven deep neural architechture described in the reference.} \details{ The package implements the basis augmentation algorithm in the reference. This is done in the \code{\link{makeBasis.slearner}} function. The augmented basis, essentially a matrix object, can then be used for any learning algorith. A convenience wrapper for SVM is implemented in \code{\link{svm.slearner}}.} \author{ Jonathan Rosenblatt<john.ros.work@gmail.com> based on Matlab code by Ohad Shamir. Maintainer: Jonathan Rosenblatt <john.ros.work@gmail.com> } \references{ Livni, Roi, Shai Shalev-Shwartz, and Ohad Shamir. "A Provably Efficient Algorithm for Training Deep Networks." arXiv:1304.7045 }
filename <- "household_power_consumption.txt" if(!file.exists(filename)){ download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "./Electric power consumption.zip",method="curl") unzip("Electric power consumption.zip") } data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric')) data$Date <- as.Date(data$Date, "%d/%m/%Y") data <- subset(data,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2")) ## Remove incomplete observation data <- data[complete.cases(data),] ## Combine Date and Time column dateTime <- paste(data$Date, data$Time) ## Name the vector dateTime <- setNames(dateTime, "DateTime") ## Remove Date and Time column data <- data[ ,!(names(data)) %in% c("Date","Time")] ## Add DateTime column data <- cbind(dateTime, data) data$dateTime <- as.POSIXct(dateTime) with(data, { plot(Sub_metering_1~dateTime, type="l", ylab="Global Active Power (kilowatts)", xlab="") lines(Sub_metering_2~dateTime,col='Red') lines(Sub_metering_3~dateTime,col='Blue') }) legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) dev.copy(png,"plot3.png", width=480, height=480) dev.off()
/plot3.R
no_license
roshnilofar/GGplot-package-exploration-in-R
R
false
false
1,428
r
filename <- "household_power_consumption.txt" if(!file.exists(filename)){ download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "./Electric power consumption.zip",method="curl") unzip("Electric power consumption.zip") } data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric')) data$Date <- as.Date(data$Date, "%d/%m/%Y") data <- subset(data,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2")) ## Remove incomplete observation data <- data[complete.cases(data),] ## Combine Date and Time column dateTime <- paste(data$Date, data$Time) ## Name the vector dateTime <- setNames(dateTime, "DateTime") ## Remove Date and Time column data <- data[ ,!(names(data)) %in% c("Date","Time")] ## Add DateTime column data <- cbind(dateTime, data) data$dateTime <- as.POSIXct(dateTime) with(data, { plot(Sub_metering_1~dateTime, type="l", ylab="Global Active Power (kilowatts)", xlab="") lines(Sub_metering_2~dateTime,col='Red') lines(Sub_metering_3~dateTime,col='Blue') }) legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) dev.copy(png,"plot3.png", width=480, height=480) dev.off()
# Exercise-1: practice with basic syntax # Create a variable `hometown` that stores the city in which you were born hometown <- 'Mukilteo' # Assign your name to the variable `my.name` my.name <- 'Justin' # Assign your height to a variable `my.height` my.height <- 68 # Create a variable `puppies` equal to the number of puppies you'd like to have puppies <- 2 # Create a variable `puppy.price`, which is how expensive you think a puppy is puppy.price <- 100 # Create a variable `total.cost` that has the total cost of all of your puppies total.cost <- 200 # Create a boolean variable `too.expensive`, set to true if the cost is greater than $1,000 too.expensive <- total.cost > 1000 # Create a variable `max.puppies`, which is the nuber of puppies you can afford for $1K. max.puppies <- 1000 / puppy.price
/exercise-1/exercise.R
permissive
justinwang98/ch5-r-intro
R
false
false
806
r
# Exercise-1: practice with basic syntax # Create a variable `hometown` that stores the city in which you were born hometown <- 'Mukilteo' # Assign your name to the variable `my.name` my.name <- 'Justin' # Assign your height to a variable `my.height` my.height <- 68 # Create a variable `puppies` equal to the number of puppies you'd like to have puppies <- 2 # Create a variable `puppy.price`, which is how expensive you think a puppy is puppy.price <- 100 # Create a variable `total.cost` that has the total cost of all of your puppies total.cost <- 200 # Create a boolean variable `too.expensive`, set to true if the cost is greater than $1,000 too.expensive <- total.cost > 1000 # Create a variable `max.puppies`, which is the nuber of puppies you can afford for $1K. max.puppies <- 1000 / puppy.price
## Catching Inverse of Matrix ## Cache Matrix returns earlier computed Matrix if the same matrix is ## passed to function again. ## Input Parameter - Irreversible Matrix ## Output Parameter - List of functions containing Set, Get, SetInverse, ## GetInverse functions makeCacheMatrix <- function(x = matrix()) { InverseMatrix <- NULL ##Set the Matrix Set<- function(NewMatrix){ x <<- NewMatrix InverseMatrix <<- NULL } ##Get the Matrix Get<- function() x ##Set the Inverse Matrix SetInverse <- function(MatrixInverse){ InverseMatrix <<- MatrixInverse } ##Get Inverse Matrix GetInverse <- function() InverseMatrix ##Return List of functions for Get Set of Matrix ##and its Inverse Matrix list(set = Set, get = Get, setinverse = SetInverse, getinverse = GetInverse) } ## CacheSolve ## For new Matrix computes the Inverse Matrix and reurns earlier computated Inverse MAtrix ## if calculated earlier maintained in cache. ## Input Parameter - List of functions getting used for computation of Inverse Matrix ## Output Parameter - Matrix inverse of passed Matrix cacheSolve <- function(MatrixList, ...){ ##Get the Inverse Matrix InverseMatrix <- MatrixList$getinverse() ## Get the Cached Matrix if the Inverse Matrix is not NULL if(!is.null(InverseMatrix)){ message("getting cached data") return(InverseMatrix) }else { ## Compute the Inverse Matrix and set it. Matrix <- MatrixList$get() InverseMatrix <- solve(Matrix) MatrixList$setinverse(InverseMatrix) InverseMatrix } }
/cachematrix.R
no_license
leen21/ProgrammingAssignment2
R
false
false
1,629
r
## Catching Inverse of Matrix ## Cache Matrix returns earlier computed Matrix if the same matrix is ## passed to function again. ## Input Parameter - Irreversible Matrix ## Output Parameter - List of functions containing Set, Get, SetInverse, ## GetInverse functions makeCacheMatrix <- function(x = matrix()) { InverseMatrix <- NULL ##Set the Matrix Set<- function(NewMatrix){ x <<- NewMatrix InverseMatrix <<- NULL } ##Get the Matrix Get<- function() x ##Set the Inverse Matrix SetInverse <- function(MatrixInverse){ InverseMatrix <<- MatrixInverse } ##Get Inverse Matrix GetInverse <- function() InverseMatrix ##Return List of functions for Get Set of Matrix ##and its Inverse Matrix list(set = Set, get = Get, setinverse = SetInverse, getinverse = GetInverse) } ## CacheSolve ## For new Matrix computes the Inverse Matrix and reurns earlier computated Inverse MAtrix ## if calculated earlier maintained in cache. ## Input Parameter - List of functions getting used for computation of Inverse Matrix ## Output Parameter - Matrix inverse of passed Matrix cacheSolve <- function(MatrixList, ...){ ##Get the Inverse Matrix InverseMatrix <- MatrixList$getinverse() ## Get the Cached Matrix if the Inverse Matrix is not NULL if(!is.null(InverseMatrix)){ message("getting cached data") return(InverseMatrix) }else { ## Compute the Inverse Matrix and set it. Matrix <- MatrixList$get() InverseMatrix <- solve(Matrix) MatrixList$setinverse(InverseMatrix) InverseMatrix } }
load("results_C_BFG.RData") load("results_C_better.RData") load("results_conj_bin_BFG.RData") load("results_conj_bin_better.RData") load("results_WI.RData") load("results_WI_better.RData") load("results_NI.RData") load("results_NI_better.RData") load("results_L.RData") load("results_L_better.RData") summarize_one_result <- function(result) { cols <- colnames(result$Psamples[[1]]) beta_cols <- grepl("beta", cols) meanESS <- mean(coda::effectiveSize(result$Psamples[[1]][,beta_cols])) Ns <- nrow(result$Psamples[[1]]) # Number of saved samples Nit <- result$niter # Different cases have different ratio of niter / nburnin if(is.null(Nit)) Nit <- 10000 c(ESS_per_Ns = meanESS/Ns, Nit_per_time = Nit/sum(result$t2[1:2]), meanESS_per_time = meanESS/sum(result$t2[1:2])) } ## res_conj <- cbind( do.call('rbind', lapply(results_C_BFG, summarize_one_result)), do.call('rbind', lapply(results_C_better, summarize_one_result)) ) res_conj_bin <- cbind( do.call('rbind', lapply(results_conj_bin_BFG, summarize_one_result)), do.call('rbind', lapply(results_conj_bin_better, summarize_one_result)) ) res_WI <- cbind( do.call('rbind', lapply(results_WI, summarize_one_result)), do.call('rbind', lapply(results_WI_better, summarize_one_result)) ) res_NI <- cbind( do.call('rbind', lapply(results_NI, summarize_one_result)), do.call('rbind', lapply(results_NI_better, summarize_one_result)) ) res_L <- cbind( do.call('rbind', lapply(results_L, summarize_one_result)), do.call('rbind', lapply(results_L_better, summarize_one_result)) ) all_results <- do.call('rbind', list(res_conj, res_conj_bin, res_WI, res_NI, res_L)) all_results <- cbind(all_results, all_results[,6]/all_results[,3]) colnames(all_results) <- c(rep( c("ESS/Ns", "Nit/t", "ESS/t"), 2), "Better by") ## ----------------------------------------------------------------------------- library(kableExtra) kbl(all_results, digits = 2) %>% kable_classic() %>% add_header_above(c(" " = 1, "BFG" = 3, "Better code" = 3, "Improvement" = 1)) %>% pack_rows("LM-C", 1, 6) %>% pack_rows("LM-C Bin", 7, 11) %>% pack_rows("LM-WI", 12, 15) %>% pack_rows("LM-NI", 16, 20) %>% pack_rows("LM-Lasso", 21, 28)
/blog_posts/LM_comparisons_Beraha_etal/make_results_table.R
permissive
nimble-dev/nimble-demos
R
false
false
2,282
r
load("results_C_BFG.RData") load("results_C_better.RData") load("results_conj_bin_BFG.RData") load("results_conj_bin_better.RData") load("results_WI.RData") load("results_WI_better.RData") load("results_NI.RData") load("results_NI_better.RData") load("results_L.RData") load("results_L_better.RData") summarize_one_result <- function(result) { cols <- colnames(result$Psamples[[1]]) beta_cols <- grepl("beta", cols) meanESS <- mean(coda::effectiveSize(result$Psamples[[1]][,beta_cols])) Ns <- nrow(result$Psamples[[1]]) # Number of saved samples Nit <- result$niter # Different cases have different ratio of niter / nburnin if(is.null(Nit)) Nit <- 10000 c(ESS_per_Ns = meanESS/Ns, Nit_per_time = Nit/sum(result$t2[1:2]), meanESS_per_time = meanESS/sum(result$t2[1:2])) } ## res_conj <- cbind( do.call('rbind', lapply(results_C_BFG, summarize_one_result)), do.call('rbind', lapply(results_C_better, summarize_one_result)) ) res_conj_bin <- cbind( do.call('rbind', lapply(results_conj_bin_BFG, summarize_one_result)), do.call('rbind', lapply(results_conj_bin_better, summarize_one_result)) ) res_WI <- cbind( do.call('rbind', lapply(results_WI, summarize_one_result)), do.call('rbind', lapply(results_WI_better, summarize_one_result)) ) res_NI <- cbind( do.call('rbind', lapply(results_NI, summarize_one_result)), do.call('rbind', lapply(results_NI_better, summarize_one_result)) ) res_L <- cbind( do.call('rbind', lapply(results_L, summarize_one_result)), do.call('rbind', lapply(results_L_better, summarize_one_result)) ) all_results <- do.call('rbind', list(res_conj, res_conj_bin, res_WI, res_NI, res_L)) all_results <- cbind(all_results, all_results[,6]/all_results[,3]) colnames(all_results) <- c(rep( c("ESS/Ns", "Nit/t", "ESS/t"), 2), "Better by") ## ----------------------------------------------------------------------------- library(kableExtra) kbl(all_results, digits = 2) %>% kable_classic() %>% add_header_above(c(" " = 1, "BFG" = 3, "Better code" = 3, "Improvement" = 1)) %>% pack_rows("LM-C", 1, 6) %>% pack_rows("LM-C Bin", 7, 11) %>% pack_rows("LM-WI", 12, 15) %>% pack_rows("LM-NI", 16, 20) %>% pack_rows("LM-Lasso", 21, 28)
#' Function for Holling's type III #' #' #' @param x value #' @param a parameter for Holling type I (asymptote) #' @param b parameter for Holling type I #' @return #' @keywords sigmoid #' @export #' @examples #' #' x <- seq(0,1, length=100) #' a <- 0.8 #' b <- 0.3 #' plot(x, Holling3(x,a,b), type ="l", ylim =c(0,a)) #' abline(h = a) #' #point of inflection #' points(b/sqrt(3), Holling3(b/sqrt(3),a ,b), pch = 8, col ="red") #' lines(x,x,col="darkgrey") #' #when ues equals avaiability #' abline(h = a/2 + sqrt((a/2)^2 - b^2), lty = 2) #' abline(v = a/2 + sqrt((a/2)^2 - b^2), lty = 2) #' abline(h = a/2 - sqrt((a/2)^2 - b^2), lty = 2) #' abline(v = a/2 - sqrt((a/2)^2 - b^2), lty = 2) #' #' #' par(mfrow =c(5,5),oma=c(4,4,0,0)) #' #' for(a in c(0.1,0.25,0.5,0.8, 1)){ #' for(b in c(0.05,0.25,0.5,0.8, 1)){ #' plot(x,Holling3(x,a,b),type="l",las=1,bty="l",xlab="",ylab="",mgp=c(1.2,0,0),cex.lab=4,lwd=4,ylim=c(0,1),yaxt="n",xaxt="n")#,main=paste("a=",a," , b=",b),cex.main=2) #' lines(x,x,col="darkgrey") #' axis(1,c(0,0.5,1),c(0,0.5,1)) #' axis(2,c(0,0.5,1),c(0,0.5,1)) #' text(0.1,.95,paste("a=",a),col="red",cex=2) #' text(0.35,.95,paste( "b=",b),col="blue",cex=2) #' points(b/sqrt(3), Holling3(b/sqrt(3),a ,b), pch = 8, col ="red", cex=2) #' abline(h=a,col="red") #' abline(h = a/2 + sqrt((a/2)^2 - b^2), lty = 2) #' abline(v = a/2 + sqrt((a/2)^2 - b^2), lty = 2) #' abline(h = a/2 - sqrt((a/2)^2 - b^2), lty = 2) #' abline(v = a/2 - sqrt((a/2)^2 - b^2), lty = 2) #' axis(1,b,"b",col.lab="blue",cex.axis=2) #' axis(2,a,"a",col.lab="red",las=1,cex.axis=2) #' }} #' title(ylab="Use",xlab="Availability",outer=TRUE,cex.lab=3,mgp=c(1.2,0,0)) #' Holling3 <- function(x,a,b){ a*x*x/(b*b+x*x) }
/R/Holling3.R
permissive
cdupstats/FunResp
R
false
false
1,723
r
#' Function for Holling's type III #' #' #' @param x value #' @param a parameter for Holling type I (asymptote) #' @param b parameter for Holling type I #' @return #' @keywords sigmoid #' @export #' @examples #' #' x <- seq(0,1, length=100) #' a <- 0.8 #' b <- 0.3 #' plot(x, Holling3(x,a,b), type ="l", ylim =c(0,a)) #' abline(h = a) #' #point of inflection #' points(b/sqrt(3), Holling3(b/sqrt(3),a ,b), pch = 8, col ="red") #' lines(x,x,col="darkgrey") #' #when ues equals avaiability #' abline(h = a/2 + sqrt((a/2)^2 - b^2), lty = 2) #' abline(v = a/2 + sqrt((a/2)^2 - b^2), lty = 2) #' abline(h = a/2 - sqrt((a/2)^2 - b^2), lty = 2) #' abline(v = a/2 - sqrt((a/2)^2 - b^2), lty = 2) #' #' #' par(mfrow =c(5,5),oma=c(4,4,0,0)) #' #' for(a in c(0.1,0.25,0.5,0.8, 1)){ #' for(b in c(0.05,0.25,0.5,0.8, 1)){ #' plot(x,Holling3(x,a,b),type="l",las=1,bty="l",xlab="",ylab="",mgp=c(1.2,0,0),cex.lab=4,lwd=4,ylim=c(0,1),yaxt="n",xaxt="n")#,main=paste("a=",a," , b=",b),cex.main=2) #' lines(x,x,col="darkgrey") #' axis(1,c(0,0.5,1),c(0,0.5,1)) #' axis(2,c(0,0.5,1),c(0,0.5,1)) #' text(0.1,.95,paste("a=",a),col="red",cex=2) #' text(0.35,.95,paste( "b=",b),col="blue",cex=2) #' points(b/sqrt(3), Holling3(b/sqrt(3),a ,b), pch = 8, col ="red", cex=2) #' abline(h=a,col="red") #' abline(h = a/2 + sqrt((a/2)^2 - b^2), lty = 2) #' abline(v = a/2 + sqrt((a/2)^2 - b^2), lty = 2) #' abline(h = a/2 - sqrt((a/2)^2 - b^2), lty = 2) #' abline(v = a/2 - sqrt((a/2)^2 - b^2), lty = 2) #' axis(1,b,"b",col.lab="blue",cex.axis=2) #' axis(2,a,"a",col.lab="red",las=1,cex.axis=2) #' }} #' title(ylab="Use",xlab="Availability",outer=TRUE,cex.lab=3,mgp=c(1.2,0,0)) #' Holling3 <- function(x,a,b){ a*x*x/(b*b+x*x) }
## Set working folder setwd("~/GitHub/ExData_Project2/data") ## This first line will likely take a few seconds. Be patient! NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") # Aggregates Emissions <- aggregate(NEI[, 'Emissions'], by = list(NEI$year), FUN = sum) Emissions$PM25 <- round(Emissions[, 2] / 1000000, 2) png(filename = "../plot1.png") barplot(Emissions$PM25, names.arg = Emissions$Group.1, main = expression('Total Emission of PM'[2.5]), xlab = 'Year', ylab = expression(paste('PM', ''[2.5], ' in Megatons')) ) dev.off()
/Project2_load_data.R
no_license
timagrove/ExData_Project2
R
false
false
616
r
## Set working folder setwd("~/GitHub/ExData_Project2/data") ## This first line will likely take a few seconds. Be patient! NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") # Aggregates Emissions <- aggregate(NEI[, 'Emissions'], by = list(NEI$year), FUN = sum) Emissions$PM25 <- round(Emissions[, 2] / 1000000, 2) png(filename = "../plot1.png") barplot(Emissions$PM25, names.arg = Emissions$Group.1, main = expression('Total Emission of PM'[2.5]), xlab = 'Year', ylab = expression(paste('PM', ''[2.5], ' in Megatons')) ) dev.off()
## to execute: R CMD BATCH packages.R # grind it out the long way install.packages(c("evaluate", "base64enc", "devtools"), Sys.getenv("R_LIBS_USER"), repos = "http://lib.stat.cmu.edu/R/CRAN/" ) install.packages(c("libcurl-devel", "curl", "RCurl"), Sys.getenv("R_LIBS_USER"), repos = "http://lib.stat.cmu.edu/R/CRAN/" ) install.packages(c("scales", "ggplot2", "dplyr", "plyr", "knitr"), Sys.getenv("R_LIBS_USER"), repos = "http://lib.stat.cmu.edu/R/CRAN/" ) library('devtools'); install_github('IRkernel/repr')
/r-devel/packages.R
no_license
jeffreymanning/R
R
false
false
514
r
## to execute: R CMD BATCH packages.R # grind it out the long way install.packages(c("evaluate", "base64enc", "devtools"), Sys.getenv("R_LIBS_USER"), repos = "http://lib.stat.cmu.edu/R/CRAN/" ) install.packages(c("libcurl-devel", "curl", "RCurl"), Sys.getenv("R_LIBS_USER"), repos = "http://lib.stat.cmu.edu/R/CRAN/" ) install.packages(c("scales", "ggplot2", "dplyr", "plyr", "knitr"), Sys.getenv("R_LIBS_USER"), repos = "http://lib.stat.cmu.edu/R/CRAN/" ) library('devtools'); install_github('IRkernel/repr')
#' Internal function which does not have to be used by the users #' @author Arnaud Gloaguen #' @keywords internal BinarySearch <- function(argu, sumabs) { if (norm2(argu) == 0 || sum(abs(argu / norm2(argu))) <= sumabs) return(0) lam_max <- max(abs(argu)) lam1 <- 0 lam2 <- lam_max iter <- 1 while (iter < 500) { su <- soft(argu, (lam1 + lam2) / 2) if (sum(abs(su / norm2(su))) < sumabs) { lam2 <- (lam1 + lam2) / 2 } else { lam1 <- (lam1 + lam2) / 2 } if ((lam2 - lam1) / lam1 < 1e-10) { if (lam2 != lam_max) { return(lam2) } else { return(lam1) } } iter <- iter + 1 } warning("Didn't quite converge") return((lam1 + lam2) / 2) }
/R/BinarySearch.R
no_license
jimhester/RGCCA
R
false
false
801
r
#' Internal function which does not have to be used by the users #' @author Arnaud Gloaguen #' @keywords internal BinarySearch <- function(argu, sumabs) { if (norm2(argu) == 0 || sum(abs(argu / norm2(argu))) <= sumabs) return(0) lam_max <- max(abs(argu)) lam1 <- 0 lam2 <- lam_max iter <- 1 while (iter < 500) { su <- soft(argu, (lam1 + lam2) / 2) if (sum(abs(su / norm2(su))) < sumabs) { lam2 <- (lam1 + lam2) / 2 } else { lam1 <- (lam1 + lam2) / 2 } if ((lam2 - lam1) / lam1 < 1e-10) { if (lam2 != lam_max) { return(lam2) } else { return(lam1) } } iter <- iter + 1 } warning("Didn't quite converge") return((lam1 + lam2) / 2) }
#' Backward generation intervals simulated by a SEmInR model #' #' A dataset containing the backward generation intervals simulated by a SEmInR model #' which is individual based and follows the Gillespie algorithm (with tau-leap) for the stochastic component. #' The model is wrapped in a R package and can be downloaded here: \code{https://github.com/davidchampredon/seminribm}. #' #' The parameter values used for this data set are: #' horizon <- 300 #' popSize <- 5e3 #' #' initInfectious <- 2 #' R0 <- 3.0 #' latent_mean <- 2 #' infectious_mean <- 4 #' nE <- 6 #' nI <- 6 #' calc_WIW_Re <- FALSE #' doExact <- FALSE #' timeStepTauLeap <- 0.1 #' rnd_seed <- 1234 #' #' @format A data frame with 4749 rows (=infection events) and 3 variables: #' \describe{ #' \item{at}{Disease acquisition time.} #' \item{rt}{Rounded value of \code{at}.} #' \item{b}{Value of the backward generation interval for this infection event.} #' } #' @source \url{https://github.com/davidchampredon/seminribm} "sim_gi"
/data/data.R
no_license
davidchampredon/GI-dev
R
false
false
1,070
r
#' Backward generation intervals simulated by a SEmInR model #' #' A dataset containing the backward generation intervals simulated by a SEmInR model #' which is individual based and follows the Gillespie algorithm (with tau-leap) for the stochastic component. #' The model is wrapped in a R package and can be downloaded here: \code{https://github.com/davidchampredon/seminribm}. #' #' The parameter values used for this data set are: #' horizon <- 300 #' popSize <- 5e3 #' #' initInfectious <- 2 #' R0 <- 3.0 #' latent_mean <- 2 #' infectious_mean <- 4 #' nE <- 6 #' nI <- 6 #' calc_WIW_Re <- FALSE #' doExact <- FALSE #' timeStepTauLeap <- 0.1 #' rnd_seed <- 1234 #' #' @format A data frame with 4749 rows (=infection events) and 3 variables: #' \describe{ #' \item{at}{Disease acquisition time.} #' \item{rt}{Rounded value of \code{at}.} #' \item{b}{Value of the backward generation interval for this infection event.} #' } #' @source \url{https://github.com/davidchampredon/seminribm} "sim_gi"
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/avg_coverage.R \name{avg_coverage_arima} \alias{avg_coverage_arima} \title{Compute the average coverage of the prediction intervals computed by naive plug-in method and \code{\link{arima_pi}}} \usage{ avg_coverage_arima(phi = NULL, theta = NULL, d = 0, n, n_ahead = 1, nsim2, nsim = 100, level = 0.95, prior = "uniform", return_all_coverages = FALSE, ...) } \arguments{ \item{phi}{vector containing the AR parameters} \item{theta}{vector containing the MA parameters} \item{d}{degree of differencing} \item{n}{length of the time series} \item{n_ahead}{length of the forecast horizon} \item{nsim2}{number of simulations used in computing the expected coverage} \item{nsim}{number of simulations used in importance sampling} \item{level}{desired coverage probability of the prediction intervals} \item{prior}{prior to be used in importance sampling. Multiple choices are allowed.} \item{return_all_coverages}{return raw results i.e. coverages for each simulations. When \code{FALSE} (default), summary statistics are returned.} \item{...}{additional arguments to \code{\link{arima_pi}}.} } \value{ a list containing the coverage probabilities } \description{ Computes expected coverage probabilities of the prediction intervals of ARMA process by simulating time series from the known model. } \examples{ \dontrun{ set.seed(123) # takes a while, notice se, increase nsim2 to get more accurate results avg_coverage_arima(phi = 0.9, n = 50, n_ahead = 10, nsim2 = 100) avg_coverage_arima(phi = 0.9, theta = -0.6, n = 50, n_ahead = 10, nsim2 = 100) } } \seealso{ \code{\link{arima_pi}}. }
/man/avg_coverage_arima.Rd
no_license
helske/tsPI
R
false
true
1,679
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/avg_coverage.R \name{avg_coverage_arima} \alias{avg_coverage_arima} \title{Compute the average coverage of the prediction intervals computed by naive plug-in method and \code{\link{arima_pi}}} \usage{ avg_coverage_arima(phi = NULL, theta = NULL, d = 0, n, n_ahead = 1, nsim2, nsim = 100, level = 0.95, prior = "uniform", return_all_coverages = FALSE, ...) } \arguments{ \item{phi}{vector containing the AR parameters} \item{theta}{vector containing the MA parameters} \item{d}{degree of differencing} \item{n}{length of the time series} \item{n_ahead}{length of the forecast horizon} \item{nsim2}{number of simulations used in computing the expected coverage} \item{nsim}{number of simulations used in importance sampling} \item{level}{desired coverage probability of the prediction intervals} \item{prior}{prior to be used in importance sampling. Multiple choices are allowed.} \item{return_all_coverages}{return raw results i.e. coverages for each simulations. When \code{FALSE} (default), summary statistics are returned.} \item{...}{additional arguments to \code{\link{arima_pi}}.} } \value{ a list containing the coverage probabilities } \description{ Computes expected coverage probabilities of the prediction intervals of ARMA process by simulating time series from the known model. } \examples{ \dontrun{ set.seed(123) # takes a while, notice se, increase nsim2 to get more accurate results avg_coverage_arima(phi = 0.9, n = 50, n_ahead = 10, nsim2 = 100) avg_coverage_arima(phi = 0.9, theta = -0.6, n = 50, n_ahead = 10, nsim2 = 100) } } \seealso{ \code{\link{arima_pi}}. }
% Generated by roxygen2 (4.0.2): do not edit by hand \name{predict.logitreg} \alias{predict.logitreg} \title{Predict values of a logitreg object} \usage{ \method{predict}{logitreg}(object, newdata) } \arguments{ \item{object}{logitreg. An object of class logitreg.} \item{newdata}{data.frame. Optional, additional data to predict. Has to have the same dimensions as the data of the logitreg object. Tries to convert the object to a data.frame if it is not already one.} } \value{ Vector of predicted probabilities. } \description{ Predict values of a logitreg object or for newdata with the same dimensions as the original data. } \author{ Janek Thomas, Philipp Roesch }
/logitreg6/man/predict.logitreg.Rd
no_license
ja-thomas/FProgBlatt2
R
false
false
673
rd
% Generated by roxygen2 (4.0.2): do not edit by hand \name{predict.logitreg} \alias{predict.logitreg} \title{Predict values of a logitreg object} \usage{ \method{predict}{logitreg}(object, newdata) } \arguments{ \item{object}{logitreg. An object of class logitreg.} \item{newdata}{data.frame. Optional, additional data to predict. Has to have the same dimensions as the data of the logitreg object. Tries to convert the object to a data.frame if it is not already one.} } \value{ Vector of predicted probabilities. } \description{ Predict values of a logitreg object or for newdata with the same dimensions as the original data. } \author{ Janek Thomas, Philipp Roesch }
# Developer: Aaron Hsu (aaronhsu.re[at]removethis.gmail.com) # Date: June 2016 # Load required libraries library(ggplot2) library(gridExtra) library(grid) library(scales) # Load data load("kangaroo.RData") load("kangaroo2.RData") # Shiny server script shinyServer(function(input, output) { # Define output object dataPlotCombined output$dataPlotCombined <- renderPlot({ # Generate plot for MSP p1 <- ggplot( data = subset(kangaroo, kangaroo[, "type"] == input$selectType & kangaroo[, "city"] %in% input$selectCity), aes( x = year, y = msp, group = city, color = city ) ) + geom_point() + geom_line() + ggtitle("Median Sale Price") + labs(y = "Median Sale Price, dollars", x = "Year") + scale_colour_discrete(name="") + theme(legend.position = "bottom", axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.3)) + scale_y_continuous(labels = comma) # Developer: Aaron Hsu (aaronhsu.re[at]removethis.gmail.com) # ylim(0, max(subset(kangaroo, kangaroo[, "type"] == input$selectType & kangaroo[, "city"] %in% input$selectCity)[, "msp"])) # Generate plot for ratio p2 <- ggplot( data = subset(kangaroo, kangaroo[, "type"] == input$selectType & kangaroo[, "city"] %in% input$selectCity), aes( x = year, y = ratio, group = city, color = city ) ) + geom_point() + geom_line() + ggtitle("Sale Price to List Price Ratio") + labs(y = "Sale Price to List Price Ratio", x = "Year") + scale_colour_discrete(name="") + theme(legend.position = "bottom", axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.3)) + geom_abline(slope = 0, intercept = 1) + ylim(0.85, 1.15) # xlim(max(subset(kangaroo, kangaroo[, "type"] == input$selectType & kangaroo[, "city"] # Developer: Aaron Hsu (aaronhsu.re[at]removethis.gmail.com) # %in% input$selectCity)), min(subset(kangaroo, kangaroo[, "type"] == input$selectType & kangaroo[, "city"] %in% input$selectCity))) # Generate plot for psf p3 <- ggplot( data = subset(kangaroo2, kangaroo2[, "type"] == input$selectType & kangaroo2[, "city"] %in% input$selectCity), aes( x = year, y = psf, group = city, color = city ) ) + geom_point() + geom_line() + ggtitle("Price per Square Foot") + labs(y = "Price per Square Foot", x = "Year") + scale_colour_discrete(name="") + theme(legend.position = "bottom", axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.3)) + scale_y_continuous(labels = comma) # Generate plot for dom p4 <- ggplot( data = subset(kangaroo2, kangaroo2[, "type"] == input$selectType & kangaroo2[, "city"] %in% input$selectCity), aes( x = year, y = dom, group = city, color = city ) ) + geom_point() + geom_line() + ggtitle("Median Days on Market") + labs(y = "Median Days on Market", x = "Year") + scale_colour_discrete(name="") + theme(legend.position = "bottom", axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.3)) # Convert plots into gtables p1 <- ggplot_gtable(ggplot_build(p1)) p2 <- ggplot_gtable(ggplot_build(p2)) p3 <- ggplot_gtable(ggplot_build(p3)) p4 <- ggplot_gtable(ggplot_build(p4)) # Calculate max widths of the plots maxWidth = unit.pmax(p1$widths[2:3], p2$widths[2:3], p3$widths[2:3], p4$widths[2:3]) # Set the widths to be the same p1$widths[2:3] <- maxWidth p2$widths[2:3] <- maxWidth p3$widths[2:3] <- maxWidth p4$widths[2:3] <- maxWidth # Draw the plots grid.arrange(p1, p2, p3, p4, heights = c(10, 8, 8, 7)) }, height = 1600) })
/server.R
no_license
aaronhsumath/resplendent-condoroo
R
false
false
3,983
r
# Developer: Aaron Hsu (aaronhsu.re[at]removethis.gmail.com) # Date: June 2016 # Load required libraries library(ggplot2) library(gridExtra) library(grid) library(scales) # Load data load("kangaroo.RData") load("kangaroo2.RData") # Shiny server script shinyServer(function(input, output) { # Define output object dataPlotCombined output$dataPlotCombined <- renderPlot({ # Generate plot for MSP p1 <- ggplot( data = subset(kangaroo, kangaroo[, "type"] == input$selectType & kangaroo[, "city"] %in% input$selectCity), aes( x = year, y = msp, group = city, color = city ) ) + geom_point() + geom_line() + ggtitle("Median Sale Price") + labs(y = "Median Sale Price, dollars", x = "Year") + scale_colour_discrete(name="") + theme(legend.position = "bottom", axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.3)) + scale_y_continuous(labels = comma) # Developer: Aaron Hsu (aaronhsu.re[at]removethis.gmail.com) # ylim(0, max(subset(kangaroo, kangaroo[, "type"] == input$selectType & kangaroo[, "city"] %in% input$selectCity)[, "msp"])) # Generate plot for ratio p2 <- ggplot( data = subset(kangaroo, kangaroo[, "type"] == input$selectType & kangaroo[, "city"] %in% input$selectCity), aes( x = year, y = ratio, group = city, color = city ) ) + geom_point() + geom_line() + ggtitle("Sale Price to List Price Ratio") + labs(y = "Sale Price to List Price Ratio", x = "Year") + scale_colour_discrete(name="") + theme(legend.position = "bottom", axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.3)) + geom_abline(slope = 0, intercept = 1) + ylim(0.85, 1.15) # xlim(max(subset(kangaroo, kangaroo[, "type"] == input$selectType & kangaroo[, "city"] # Developer: Aaron Hsu (aaronhsu.re[at]removethis.gmail.com) # %in% input$selectCity)), min(subset(kangaroo, kangaroo[, "type"] == input$selectType & kangaroo[, "city"] %in% input$selectCity))) # Generate plot for psf p3 <- ggplot( data = subset(kangaroo2, kangaroo2[, "type"] == input$selectType & kangaroo2[, "city"] %in% input$selectCity), aes( x = year, y = psf, group = city, color = city ) ) + geom_point() + geom_line() + ggtitle("Price per Square Foot") + labs(y = "Price per Square Foot", x = "Year") + scale_colour_discrete(name="") + theme(legend.position = "bottom", axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.3)) + scale_y_continuous(labels = comma) # Generate plot for dom p4 <- ggplot( data = subset(kangaroo2, kangaroo2[, "type"] == input$selectType & kangaroo2[, "city"] %in% input$selectCity), aes( x = year, y = dom, group = city, color = city ) ) + geom_point() + geom_line() + ggtitle("Median Days on Market") + labs(y = "Median Days on Market", x = "Year") + scale_colour_discrete(name="") + theme(legend.position = "bottom", axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.3)) # Convert plots into gtables p1 <- ggplot_gtable(ggplot_build(p1)) p2 <- ggplot_gtable(ggplot_build(p2)) p3 <- ggplot_gtable(ggplot_build(p3)) p4 <- ggplot_gtable(ggplot_build(p4)) # Calculate max widths of the plots maxWidth = unit.pmax(p1$widths[2:3], p2$widths[2:3], p3$widths[2:3], p4$widths[2:3]) # Set the widths to be the same p1$widths[2:3] <- maxWidth p2$widths[2:3] <- maxWidth p3$widths[2:3] <- maxWidth p4$widths[2:3] <- maxWidth # Draw the plots grid.arrange(p1, p2, p3, p4, heights = c(10, 8, 8, 7)) }, height = 1600) })
library(dplyr) library(lme4) ### we have more usage data in year 1 than year 2 print(load('~/Box Sync/CT/data/sectionLevelUsageData/advanceData.RData')) advance%>%group_by(year)%>%summarize(n_distinct(field_id)) print(load('~/Box Sync/CT/data/problemLevelUsageData/probLevelData.RData')) x%>%group_by(study.year)%>%summarize(n_distinct(field_id)) #### what about as percentage of students in RAND dataset? hs1 <- read.csv('~/Box Sync/CT/data/RANDstudyData/H1_algebra_rcal_20121119_fieldid.csv') hs2 <- read.csv('~/Box Sync/CT/data/RANDstudyData/H2_algebra_rcal_20121119_fieldid.csv') ### treated students in RAND dataset: ids <- unique(c(hs1$field_id[hs1$treatment==1],hs2$field_id[hs2$treatment==1])) year <- ifelse(ids%in%hs1$field_id,1,2) ### "advance" dataset: mean(ids[year==1]%in%advance$field_id) mean(ids[year==2]%in%advance$field_id) ### "problem level" dataset: mean(ids[year==1]%in%x$field_id) mean(ids[year==2]%in%x$field_id) ### both: usageIDs <- unique(c(unique(x$field_id),unique(advance$field_id))) mean(ids[year==1]%in%usageIDs) mean(ids[year==2]%in%usageIDs) ### (is this just because I counted "both-year" students as yr-1?) length(intersect(hs1$field_id,hs2$field_id)) (sum(ids[year==2]%in%usageIDs)+193)/(length(ids[year==2])+193) ### what predicts missingness? stopifnot(all.equal(names(hs1)[1:100],names(hs2)[1:100])) stud <- rbind(hs1[,1:100],hs2[!hs2$field_id%in%hs1$field_id,1:100]) stud$field_id <- c(hs1$field_id,hs2$field_id[!hs2$field_id%in%hs1$field_id]) stud <- stud[stud$treatment==1,] stud$obsUsage <- stud$field_id%in%usageIDs Mode <- function(x) levels(x)[which.max(table(x))] ## median/mode imputation for predictors, with NA flags: studImp1 <- within(subset(stud,year==1),{ gradeNA <- is.na(grade) grade[is.na(grade)] <- median(grade,na.rm=TRUE) raceNA <- is.na(race) race[is.na(race)] <- Mode(race) sexNA <- is.na(sex) sex[is.na(sex)] <- 'F' specedNA <- is.na(spec_speced) spec_speced[is.na(spec_speced)] <- 0 giftedNA <- is.na(spec_gifted) spec_gifted[is.na(spec_gifted)] <- 0 eslNA <- is.na(spec_esl) spec_esl[is.na(spec_esl)] <- 0 frlNA <- is.na(frl) frl[is.na(frl)] <- 1 xirtNA <- is.na(xirt) xirt[is.na(xirt)] <- median(xirt,na.rm=TRUE)}) studImp2 <- within(subset(stud,year==2),{ gradeNA <- is.na(grade) grade[is.na(grade)] <- median(grade,na.rm=TRUE) raceNA <- is.na(race) race[is.na(race)] <- Mode(race) sexNA <- is.na(sex) sex[is.na(sex)] <- 'F' specedNA <- is.na(spec_speced) spec_speced[is.na(spec_speced)] <- 0 giftedNA <- is.na(spec_gifted) spec_gifted[is.na(spec_gifted)] <- 0 eslNA <- is.na(spec_esl) spec_esl[is.na(spec_esl)] <- 0 frlNA <- is.na(frl) frl[is.na(frl)] <- 1 xirtNA <- is.na(xirt) xirt[is.na(xirt)] <- median(xirt,na.rm=TRUE)}) studImp <- rbind(studImp1,studImp2) ## missingness due to year? summary(glmer(obsUsage~year+(1+year|schoolid2),family=binomial,data=stud)) ## due to changes in composition? summary(mainEffMod <- glmer(obsUsage~state+grade+gradeNA+race+raceNA+sex+sexNA+spec_speced+specedNA+spec_gifted+spec_esl+eslNA+frl+frlNA+xirt+xirtNA+year+(1|schoolid2),data=studImp,family=binomial)) summary(missMod <- glmer(obsUsage~(state+grade+gradeNA+race+raceNA+sex+sexNA+spec_speced+specedNA+spec_gifted+spec_esl+eslNA+frl+frlNA+xirt+xirtNA)*year+(1|schoolid2),data=studImp,family=binomial)) schoolMiss <- NULL for(scl in unique(stud$schoolid2)){ schoolMiss <- rbind(schoolMiss,with(stud, c(mean(obsUsage[year==1 & schoolid2==scl]), mean(obsUsage[year==2 & schoolid2==scl]))))} rownames(schoolMiss) <- unique(stud$schoolid2) schoolMiss <- cbind(schoolMiss,diff=schoolMiss[,1]-schoolMiss[,2]) smallDiff <- rownames(schoolMiss)[!is.na(schoolMiss[,'diff']) & abs(schoolMiss[,'diff'])<0.4 & schoolMiss[,1]>0& schoolMiss[,2]>0]
/missingUsage.r
no_license
adamSales/cpPaper
R
false
false
4,338
r
library(dplyr) library(lme4) ### we have more usage data in year 1 than year 2 print(load('~/Box Sync/CT/data/sectionLevelUsageData/advanceData.RData')) advance%>%group_by(year)%>%summarize(n_distinct(field_id)) print(load('~/Box Sync/CT/data/problemLevelUsageData/probLevelData.RData')) x%>%group_by(study.year)%>%summarize(n_distinct(field_id)) #### what about as percentage of students in RAND dataset? hs1 <- read.csv('~/Box Sync/CT/data/RANDstudyData/H1_algebra_rcal_20121119_fieldid.csv') hs2 <- read.csv('~/Box Sync/CT/data/RANDstudyData/H2_algebra_rcal_20121119_fieldid.csv') ### treated students in RAND dataset: ids <- unique(c(hs1$field_id[hs1$treatment==1],hs2$field_id[hs2$treatment==1])) year <- ifelse(ids%in%hs1$field_id,1,2) ### "advance" dataset: mean(ids[year==1]%in%advance$field_id) mean(ids[year==2]%in%advance$field_id) ### "problem level" dataset: mean(ids[year==1]%in%x$field_id) mean(ids[year==2]%in%x$field_id) ### both: usageIDs <- unique(c(unique(x$field_id),unique(advance$field_id))) mean(ids[year==1]%in%usageIDs) mean(ids[year==2]%in%usageIDs) ### (is this just because I counted "both-year" students as yr-1?) length(intersect(hs1$field_id,hs2$field_id)) (sum(ids[year==2]%in%usageIDs)+193)/(length(ids[year==2])+193) ### what predicts missingness? stopifnot(all.equal(names(hs1)[1:100],names(hs2)[1:100])) stud <- rbind(hs1[,1:100],hs2[!hs2$field_id%in%hs1$field_id,1:100]) stud$field_id <- c(hs1$field_id,hs2$field_id[!hs2$field_id%in%hs1$field_id]) stud <- stud[stud$treatment==1,] stud$obsUsage <- stud$field_id%in%usageIDs Mode <- function(x) levels(x)[which.max(table(x))] ## median/mode imputation for predictors, with NA flags: studImp1 <- within(subset(stud,year==1),{ gradeNA <- is.na(grade) grade[is.na(grade)] <- median(grade,na.rm=TRUE) raceNA <- is.na(race) race[is.na(race)] <- Mode(race) sexNA <- is.na(sex) sex[is.na(sex)] <- 'F' specedNA <- is.na(spec_speced) spec_speced[is.na(spec_speced)] <- 0 giftedNA <- is.na(spec_gifted) spec_gifted[is.na(spec_gifted)] <- 0 eslNA <- is.na(spec_esl) spec_esl[is.na(spec_esl)] <- 0 frlNA <- is.na(frl) frl[is.na(frl)] <- 1 xirtNA <- is.na(xirt) xirt[is.na(xirt)] <- median(xirt,na.rm=TRUE)}) studImp2 <- within(subset(stud,year==2),{ gradeNA <- is.na(grade) grade[is.na(grade)] <- median(grade,na.rm=TRUE) raceNA <- is.na(race) race[is.na(race)] <- Mode(race) sexNA <- is.na(sex) sex[is.na(sex)] <- 'F' specedNA <- is.na(spec_speced) spec_speced[is.na(spec_speced)] <- 0 giftedNA <- is.na(spec_gifted) spec_gifted[is.na(spec_gifted)] <- 0 eslNA <- is.na(spec_esl) spec_esl[is.na(spec_esl)] <- 0 frlNA <- is.na(frl) frl[is.na(frl)] <- 1 xirtNA <- is.na(xirt) xirt[is.na(xirt)] <- median(xirt,na.rm=TRUE)}) studImp <- rbind(studImp1,studImp2) ## missingness due to year? summary(glmer(obsUsage~year+(1+year|schoolid2),family=binomial,data=stud)) ## due to changes in composition? summary(mainEffMod <- glmer(obsUsage~state+grade+gradeNA+race+raceNA+sex+sexNA+spec_speced+specedNA+spec_gifted+spec_esl+eslNA+frl+frlNA+xirt+xirtNA+year+(1|schoolid2),data=studImp,family=binomial)) summary(missMod <- glmer(obsUsage~(state+grade+gradeNA+race+raceNA+sex+sexNA+spec_speced+specedNA+spec_gifted+spec_esl+eslNA+frl+frlNA+xirt+xirtNA)*year+(1|schoolid2),data=studImp,family=binomial)) schoolMiss <- NULL for(scl in unique(stud$schoolid2)){ schoolMiss <- rbind(schoolMiss,with(stud, c(mean(obsUsage[year==1 & schoolid2==scl]), mean(obsUsage[year==2 & schoolid2==scl]))))} rownames(schoolMiss) <- unique(stud$schoolid2) schoolMiss <- cbind(schoolMiss,diff=schoolMiss[,1]-schoolMiss[,2]) smallDiff <- rownames(schoolMiss)[!is.na(schoolMiss[,'diff']) & abs(schoolMiss[,'diff'])<0.4 & schoolMiss[,1]>0& schoolMiss[,2]>0]
library(Peptides) ### Name: aaComp ### Title: Compute the amino acid composition of a protein sequence ### Aliases: aaComp ### ** Examples # COMPARED TO PEPSTATS # http://emboss.bioinformatics.nl/cgi-bin/emboss/pepstats # Property Residues Number Mole% # Tiny (A+C+G+S+T) 4 19.048 # Small (A+B+C+D+G+N+P+S+T+V) 4 19.048 # Aliphatic (A+I+L+V) 5 23.810 # Aromatic (F+H+W+Y) 5 23.810 # Non-polar (A+C+F+G+I+L+M+P+V+W+Y) 11 52.381 # Polar (D+E+H+K+N+Q+R+S+T+Z) 9 42.857 # Charged (B+D+E+H+K+R+Z) 8 38.095 # Basic (H+K+R) 8 38.095 # Acidic (B+D+E+Z) 0 00.000 ## AA composition of PDB: 1D9J Cecropin Peptide aaComp(seq= "KWKLFKKIGIGKFLHSAKKFX") ## Output # Number Mole % # Tiny 4 19.048 # Small 4 19.048 # Aliphatic 5 23.810 # Aromatic 5 23.810 # NonPolar 11 52.381 # Polar 9 42.857 # Charged 8 38.095 # Basic 8 38.095 # Acidic 0 0.000
/data/genthat_extracted_code/Peptides/examples/aaComp.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
1,096
r
library(Peptides) ### Name: aaComp ### Title: Compute the amino acid composition of a protein sequence ### Aliases: aaComp ### ** Examples # COMPARED TO PEPSTATS # http://emboss.bioinformatics.nl/cgi-bin/emboss/pepstats # Property Residues Number Mole% # Tiny (A+C+G+S+T) 4 19.048 # Small (A+B+C+D+G+N+P+S+T+V) 4 19.048 # Aliphatic (A+I+L+V) 5 23.810 # Aromatic (F+H+W+Y) 5 23.810 # Non-polar (A+C+F+G+I+L+M+P+V+W+Y) 11 52.381 # Polar (D+E+H+K+N+Q+R+S+T+Z) 9 42.857 # Charged (B+D+E+H+K+R+Z) 8 38.095 # Basic (H+K+R) 8 38.095 # Acidic (B+D+E+Z) 0 00.000 ## AA composition of PDB: 1D9J Cecropin Peptide aaComp(seq= "KWKLFKKIGIGKFLHSAKKFX") ## Output # Number Mole % # Tiny 4 19.048 # Small 4 19.048 # Aliphatic 5 23.810 # Aromatic 5 23.810 # NonPolar 11 52.381 # Polar 9 42.857 # Charged 8 38.095 # Basic 8 38.095 # Acidic 0 0.000